LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - vce_v3_0.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 418 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 32 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2014 Advanced Micro Devices, Inc.
       3             :  * All Rights Reserved.
       4             :  *
       5             :  * Permission is hereby granted, free of charge, to any person obtaining a
       6             :  * copy of this software and associated documentation files (the
       7             :  * "Software"), to deal in the Software without restriction, including
       8             :  * without limitation the rights to use, copy, modify, merge, publish,
       9             :  * distribute, sub license, and/or sell copies of the Software, and to
      10             :  * permit persons to whom the Software is furnished to do so, subject to
      11             :  * the following conditions:
      12             :  *
      13             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      14             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      15             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      16             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      17             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      18             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      19             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      20             :  *
      21             :  * The above copyright notice and this permission notice (including the
      22             :  * next paragraph) shall be included in all copies or substantial portions
      23             :  * of the Software.
      24             :  *
      25             :  * Authors: Christian König <christian.koenig@amd.com>
      26             :  */
      27             : 
      28             : #include <linux/firmware.h>
      29             : 
      30             : #include "amdgpu.h"
      31             : #include "amdgpu_vce.h"
      32             : #include "vid.h"
      33             : #include "vce/vce_3_0_d.h"
      34             : #include "vce/vce_3_0_sh_mask.h"
      35             : #include "oss/oss_3_0_d.h"
      36             : #include "oss/oss_3_0_sh_mask.h"
      37             : #include "gca/gfx_8_0_d.h"
      38             : #include "smu/smu_7_1_2_d.h"
      39             : #include "smu/smu_7_1_2_sh_mask.h"
      40             : #include "gca/gfx_8_0_sh_mask.h"
      41             : #include "ivsrcid/ivsrcid_vislands30.h"
      42             : 
      43             : 
      44             : #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT     0x04
      45             : #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK       0x10
      46             : #define GRBM_GFX_INDEX__VCE_ALL_PIPE            0x07
      47             : 
      48             : #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
      49             : #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
      50             : #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
      51             : #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
      52             : 
      53             : #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
      54             : 
      55             : #define VCE_V3_0_FW_SIZE        (384 * 1024)
      56             : #define VCE_V3_0_STACK_SIZE     (64 * 1024)
      57             : #define VCE_V3_0_DATA_SIZE      ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
      58             : 
      59             : #define FW_52_8_3       ((52 << 24) | (8 << 16) | (3 << 8))
      60             : 
      61             : #define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
      62             :                                         | GRBM_GFX_INDEX__VCE_ALL_PIPE)
      63             : 
      64             : static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
      65             : static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
      66             : static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
      67             : static int vce_v3_0_wait_for_idle(void *handle);
      68             : static int vce_v3_0_set_clockgating_state(void *handle,
      69             :                                           enum amd_clockgating_state state);
      70             : /**
      71             :  * vce_v3_0_ring_get_rptr - get read pointer
      72             :  *
      73             :  * @ring: amdgpu_ring pointer
      74             :  *
      75             :  * Returns the current hardware read pointer
      76             :  */
      77           0 : static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
      78             : {
      79           0 :         struct amdgpu_device *adev = ring->adev;
      80             :         u32 v;
      81             : 
      82           0 :         mutex_lock(&adev->grbm_idx_mutex);
      83           0 :         if (adev->vce.harvest_config == 0 ||
      84             :                 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
      85           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
      86           0 :         else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
      87           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
      88             : 
      89           0 :         if (ring->me == 0)
      90           0 :                 v = RREG32(mmVCE_RB_RPTR);
      91           0 :         else if (ring->me == 1)
      92           0 :                 v = RREG32(mmVCE_RB_RPTR2);
      93             :         else
      94           0 :                 v = RREG32(mmVCE_RB_RPTR3);
      95             : 
      96           0 :         WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
      97           0 :         mutex_unlock(&adev->grbm_idx_mutex);
      98             : 
      99           0 :         return v;
     100             : }
     101             : 
     102             : /**
     103             :  * vce_v3_0_ring_get_wptr - get write pointer
     104             :  *
     105             :  * @ring: amdgpu_ring pointer
     106             :  *
     107             :  * Returns the current hardware write pointer
     108             :  */
     109           0 : static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
     110             : {
     111           0 :         struct amdgpu_device *adev = ring->adev;
     112             :         u32 v;
     113             : 
     114           0 :         mutex_lock(&adev->grbm_idx_mutex);
     115           0 :         if (adev->vce.harvest_config == 0 ||
     116             :                 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
     117           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
     118           0 :         else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
     119           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
     120             : 
     121           0 :         if (ring->me == 0)
     122           0 :                 v = RREG32(mmVCE_RB_WPTR);
     123           0 :         else if (ring->me == 1)
     124           0 :                 v = RREG32(mmVCE_RB_WPTR2);
     125             :         else
     126           0 :                 v = RREG32(mmVCE_RB_WPTR3);
     127             : 
     128           0 :         WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
     129           0 :         mutex_unlock(&adev->grbm_idx_mutex);
     130             : 
     131           0 :         return v;
     132             : }
     133             : 
     134             : /**
     135             :  * vce_v3_0_ring_set_wptr - set write pointer
     136             :  *
     137             :  * @ring: amdgpu_ring pointer
     138             :  *
     139             :  * Commits the write pointer to the hardware
     140             :  */
     141           0 : static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
     142             : {
     143           0 :         struct amdgpu_device *adev = ring->adev;
     144             : 
     145           0 :         mutex_lock(&adev->grbm_idx_mutex);
     146           0 :         if (adev->vce.harvest_config == 0 ||
     147             :                 adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
     148           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
     149           0 :         else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
     150           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
     151             : 
     152           0 :         if (ring->me == 0)
     153           0 :                 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
     154           0 :         else if (ring->me == 1)
     155           0 :                 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
     156             :         else
     157           0 :                 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
     158             : 
     159           0 :         WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
     160           0 :         mutex_unlock(&adev->grbm_idx_mutex);
     161           0 : }
     162             : 
     163           0 : static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
     164             : {
     165           0 :         WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
     166           0 : }
     167             : 
     168           0 : static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
     169             :                                              bool gated)
     170             : {
     171             :         u32 data;
     172             : 
     173             :         /* Set Override to disable Clock Gating */
     174           0 :         vce_v3_0_override_vce_clock_gating(adev, true);
     175             : 
     176             :         /* This function enables MGCG which is controlled by firmware.
     177             :            With the clocks in the gated state the core is still
     178             :            accessible but the firmware will throttle the clocks on the
     179             :            fly as necessary.
     180             :         */
     181           0 :         if (!gated) {
     182           0 :                 data = RREG32(mmVCE_CLOCK_GATING_B);
     183           0 :                 data |= 0x1ff;
     184           0 :                 data &= ~0xef0000;
     185           0 :                 WREG32(mmVCE_CLOCK_GATING_B, data);
     186             : 
     187           0 :                 data = RREG32(mmVCE_UENC_CLOCK_GATING);
     188           0 :                 data |= 0x3ff000;
     189           0 :                 data &= ~0xffc00000;
     190           0 :                 WREG32(mmVCE_UENC_CLOCK_GATING, data);
     191             : 
     192           0 :                 data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
     193           0 :                 data |= 0x2;
     194           0 :                 data &= ~0x00010000;
     195           0 :                 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
     196             : 
     197           0 :                 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
     198           0 :                 data |= 0x37f;
     199           0 :                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
     200             : 
     201           0 :                 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
     202           0 :                 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
     203             :                         VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
     204             :                         VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
     205             :                         0x8;
     206           0 :                 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
     207             :         } else {
     208           0 :                 data = RREG32(mmVCE_CLOCK_GATING_B);
     209           0 :                 data &= ~0x80010;
     210           0 :                 data |= 0xe70008;
     211           0 :                 WREG32(mmVCE_CLOCK_GATING_B, data);
     212             : 
     213           0 :                 data = RREG32(mmVCE_UENC_CLOCK_GATING);
     214           0 :                 data |= 0xffc00000;
     215           0 :                 WREG32(mmVCE_UENC_CLOCK_GATING, data);
     216             : 
     217           0 :                 data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
     218           0 :                 data |= 0x10000;
     219           0 :                 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
     220             : 
     221           0 :                 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
     222           0 :                 data &= ~0x3ff;
     223           0 :                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
     224             : 
     225           0 :                 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
     226           0 :                 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
     227             :                           VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
     228             :                           VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK  |
     229             :                           0x8);
     230           0 :                 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
     231             :         }
     232           0 :         vce_v3_0_override_vce_clock_gating(adev, false);
     233           0 : }
     234             : 
     235           0 : static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
     236             : {
     237             :         int i, j;
     238             : 
     239           0 :         for (i = 0; i < 10; ++i) {
     240           0 :                 for (j = 0; j < 100; ++j) {
     241           0 :                         uint32_t status = RREG32(mmVCE_STATUS);
     242             : 
     243           0 :                         if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
     244             :                                 return 0;
     245           0 :                         mdelay(10);
     246             :                 }
     247             : 
     248           0 :                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
     249           0 :                 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
     250           0 :                 mdelay(10);
     251           0 :                 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
     252           0 :                 mdelay(10);
     253             :         }
     254             : 
     255             :         return -ETIMEDOUT;
     256             : }
     257             : 
     258             : /**
     259             :  * vce_v3_0_start - start VCE block
     260             :  *
     261             :  * @adev: amdgpu_device pointer
     262             :  *
     263             :  * Setup and start the VCE block
     264             :  */
     265           0 : static int vce_v3_0_start(struct amdgpu_device *adev)
     266             : {
     267             :         struct amdgpu_ring *ring;
     268             :         int idx, r;
     269             : 
     270           0 :         mutex_lock(&adev->grbm_idx_mutex);
     271           0 :         for (idx = 0; idx < 2; ++idx) {
     272           0 :                 if (adev->vce.harvest_config & (1 << idx))
     273           0 :                         continue;
     274             : 
     275           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
     276             : 
     277             :                 /* Program instance 0 reg space for two instances or instance 0 case
     278             :                 program instance 1 reg space for only instance 1 available case */
     279           0 :                 if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
     280           0 :                         ring = &adev->vce.ring[0];
     281           0 :                         WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
     282           0 :                         WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
     283           0 :                         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
     284           0 :                         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
     285           0 :                         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
     286             : 
     287           0 :                         ring = &adev->vce.ring[1];
     288           0 :                         WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
     289           0 :                         WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
     290           0 :                         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
     291           0 :                         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
     292           0 :                         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
     293             : 
     294           0 :                         ring = &adev->vce.ring[2];
     295           0 :                         WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
     296           0 :                         WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
     297           0 :                         WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
     298           0 :                         WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
     299           0 :                         WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
     300             :                 }
     301             : 
     302           0 :                 vce_v3_0_mc_resume(adev, idx);
     303           0 :                 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
     304             : 
     305           0 :                 if (adev->asic_type >= CHIP_STONEY)
     306           0 :                         WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
     307             :                 else
     308           0 :                         WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
     309             : 
     310           0 :                 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
     311           0 :                 mdelay(100);
     312             : 
     313           0 :                 r = vce_v3_0_firmware_loaded(adev);
     314             : 
     315             :                 /* clear BUSY flag */
     316           0 :                 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
     317             : 
     318           0 :                 if (r) {
     319           0 :                         DRM_ERROR("VCE not responding, giving up!!!\n");
     320           0 :                         mutex_unlock(&adev->grbm_idx_mutex);
     321           0 :                         return r;
     322             :                 }
     323             :         }
     324             : 
     325           0 :         WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
     326           0 :         mutex_unlock(&adev->grbm_idx_mutex);
     327             : 
     328           0 :         return 0;
     329             : }
     330             : 
     331           0 : static int vce_v3_0_stop(struct amdgpu_device *adev)
     332             : {
     333             :         int idx;
     334             : 
     335           0 :         mutex_lock(&adev->grbm_idx_mutex);
     336           0 :         for (idx = 0; idx < 2; ++idx) {
     337           0 :                 if (adev->vce.harvest_config & (1 << idx))
     338           0 :                         continue;
     339             : 
     340           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
     341             : 
     342           0 :                 if (adev->asic_type >= CHIP_STONEY)
     343           0 :                         WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
     344             :                 else
     345           0 :                         WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
     346             : 
     347             :                 /* hold on ECPU */
     348           0 :                 WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
     349             : 
     350             :                 /* clear VCE STATUS */
     351           0 :                 WREG32(mmVCE_STATUS, 0);
     352             :         }
     353             : 
     354           0 :         WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
     355           0 :         mutex_unlock(&adev->grbm_idx_mutex);
     356             : 
     357           0 :         return 0;
     358             : }
     359             : 
     360             : #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
     361             : #define VCE_HARVEST_FUSE_MACRO__SHIFT       27
     362             : #define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
     363             : 
     364           0 : static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
     365             : {
     366             :         u32 tmp;
     367             : 
     368           0 :         if ((adev->asic_type == CHIP_FIJI) ||
     369             :             (adev->asic_type == CHIP_STONEY))
     370             :                 return AMDGPU_VCE_HARVEST_VCE1;
     371             : 
     372           0 :         if (adev->flags & AMD_IS_APU)
     373           0 :                 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
     374           0 :                        VCE_HARVEST_FUSE_MACRO__MASK) >>
     375             :                         VCE_HARVEST_FUSE_MACRO__SHIFT;
     376             :         else
     377           0 :                 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
     378           0 :                        CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
     379             :                         CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
     380             : 
     381           0 :         switch (tmp) {
     382             :         case 1:
     383             :                 return AMDGPU_VCE_HARVEST_VCE0;
     384             :         case 2:
     385           0 :                 return AMDGPU_VCE_HARVEST_VCE1;
     386             :         case 3:
     387           0 :                 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
     388             :         default:
     389           0 :                 if ((adev->asic_type == CHIP_POLARIS10) ||
     390             :                     (adev->asic_type == CHIP_POLARIS11) ||
     391           0 :                     (adev->asic_type == CHIP_POLARIS12) ||
     392             :                     (adev->asic_type == CHIP_VEGAM))
     393             :                         return AMDGPU_VCE_HARVEST_VCE1;
     394             : 
     395           0 :                 return 0;
     396             :         }
     397             : }
     398             : 
     399           0 : static int vce_v3_0_early_init(void *handle)
     400             : {
     401           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     402             : 
     403           0 :         adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
     404             : 
     405           0 :         if ((adev->vce.harvest_config &
     406             :              (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
     407             :             (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
     408             :                 return -ENOENT;
     409             : 
     410           0 :         adev->vce.num_rings = 3;
     411             : 
     412           0 :         vce_v3_0_set_ring_funcs(adev);
     413           0 :         vce_v3_0_set_irq_funcs(adev);
     414             : 
     415           0 :         return 0;
     416             : }
     417             : 
     418           0 : static int vce_v3_0_sw_init(void *handle)
     419             : {
     420           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     421             :         struct amdgpu_ring *ring;
     422             :         int r, i;
     423             : 
     424             :         /* VCE */
     425           0 :         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
     426           0 :         if (r)
     427             :                 return r;
     428             : 
     429           0 :         r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
     430             :                 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
     431           0 :         if (r)
     432             :                 return r;
     433             : 
     434             :         /* 52.8.3 required for 3 ring support */
     435           0 :         if (adev->vce.fw_version < FW_52_8_3)
     436           0 :                 adev->vce.num_rings = 2;
     437             : 
     438           0 :         r = amdgpu_vce_resume(adev);
     439           0 :         if (r)
     440             :                 return r;
     441             : 
     442           0 :         for (i = 0; i < adev->vce.num_rings; i++) {
     443           0 :                 enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
     444             : 
     445           0 :                 ring = &adev->vce.ring[i];
     446           0 :                 sprintf(ring->name, "vce%d", i);
     447           0 :                 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
     448             :                                      hw_prio, NULL);
     449           0 :                 if (r)
     450             :                         return r;
     451             :         }
     452             : 
     453           0 :         r = amdgpu_vce_entity_init(adev);
     454             : 
     455           0 :         return r;
     456             : }
     457             : 
     458           0 : static int vce_v3_0_sw_fini(void *handle)
     459             : {
     460             :         int r;
     461           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     462             : 
     463           0 :         r = amdgpu_vce_suspend(adev);
     464           0 :         if (r)
     465             :                 return r;
     466             : 
     467           0 :         return amdgpu_vce_sw_fini(adev);
     468             : }
     469             : 
     470           0 : static int vce_v3_0_hw_init(void *handle)
     471             : {
     472             :         int r, i;
     473           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     474             : 
     475           0 :         vce_v3_0_override_vce_clock_gating(adev, true);
     476             : 
     477           0 :         amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
     478             : 
     479           0 :         for (i = 0; i < adev->vce.num_rings; i++) {
     480           0 :                 r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
     481           0 :                 if (r)
     482             :                         return r;
     483             :         }
     484             : 
     485           0 :         DRM_INFO("VCE initialized successfully.\n");
     486             : 
     487           0 :         return 0;
     488             : }
     489             : 
     490           0 : static int vce_v3_0_hw_fini(void *handle)
     491             : {
     492             :         int r;
     493           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     494             : 
     495           0 :         cancel_delayed_work_sync(&adev->vce.idle_work);
     496             : 
     497           0 :         r = vce_v3_0_wait_for_idle(handle);
     498           0 :         if (r)
     499             :                 return r;
     500             : 
     501           0 :         vce_v3_0_stop(adev);
     502           0 :         return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
     503             : }
     504             : 
     505           0 : static int vce_v3_0_suspend(void *handle)
     506             : {
     507             :         int r;
     508           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     509             : 
     510             :         /*
     511             :          * Proper cleanups before halting the HW engine:
     512             :          *   - cancel the delayed idle work
     513             :          *   - enable powergating
     514             :          *   - enable clockgating
     515             :          *   - disable dpm
     516             :          *
     517             :          * TODO: to align with the VCN implementation, move the
     518             :          * jobs for clockgating/powergating/dpm setting to
     519             :          * ->set_powergating_state().
     520             :          */
     521           0 :         cancel_delayed_work_sync(&adev->vce.idle_work);
     522             : 
     523           0 :         if (adev->pm.dpm_enabled) {
     524           0 :                 amdgpu_dpm_enable_vce(adev, false);
     525             :         } else {
     526           0 :                 amdgpu_asic_set_vce_clocks(adev, 0, 0);
     527           0 :                 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
     528             :                                                        AMD_PG_STATE_GATE);
     529           0 :                 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
     530             :                                                        AMD_CG_STATE_GATE);
     531             :         }
     532             : 
     533           0 :         r = vce_v3_0_hw_fini(adev);
     534           0 :         if (r)
     535             :                 return r;
     536             : 
     537           0 :         return amdgpu_vce_suspend(adev);
     538             : }
     539             : 
     540           0 : static int vce_v3_0_resume(void *handle)
     541             : {
     542             :         int r;
     543           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     544             : 
     545           0 :         r = amdgpu_vce_resume(adev);
     546           0 :         if (r)
     547             :                 return r;
     548             : 
     549           0 :         return vce_v3_0_hw_init(adev);
     550             : }
     551             : 
     552           0 : static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
     553             : {
     554             :         uint32_t offset, size;
     555             : 
     556           0 :         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
     557           0 :         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
     558           0 :         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
     559           0 :         WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
     560             : 
     561           0 :         WREG32(mmVCE_LMI_CTRL, 0x00398000);
     562           0 :         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
     563           0 :         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
     564           0 :         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
     565           0 :         WREG32(mmVCE_LMI_VM_CTRL, 0);
     566           0 :         WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
     567             : 
     568           0 :         if (adev->asic_type >= CHIP_STONEY) {
     569           0 :                 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
     570           0 :                 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
     571           0 :                 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
     572             :         } else
     573           0 :                 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
     574           0 :         offset = AMDGPU_VCE_FIRMWARE_OFFSET;
     575           0 :         size = VCE_V3_0_FW_SIZE;
     576           0 :         WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
     577           0 :         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
     578             : 
     579           0 :         if (idx == 0) {
     580           0 :                 offset += size;
     581           0 :                 size = VCE_V3_0_STACK_SIZE;
     582           0 :                 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
     583           0 :                 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
     584           0 :                 offset += size;
     585           0 :                 size = VCE_V3_0_DATA_SIZE;
     586           0 :                 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
     587           0 :                 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
     588             :         } else {
     589           0 :                 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
     590           0 :                 size = VCE_V3_0_STACK_SIZE;
     591           0 :                 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
     592           0 :                 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
     593           0 :                 offset += size;
     594           0 :                 size = VCE_V3_0_DATA_SIZE;
     595           0 :                 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
     596           0 :                 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
     597             :         }
     598             : 
     599           0 :         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
     600           0 :         WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
     601           0 : }
     602             : 
     603           0 : static bool vce_v3_0_is_idle(void *handle)
     604             : {
     605           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     606           0 :         u32 mask = 0;
     607             : 
     608           0 :         mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
     609           0 :         mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
     610             : 
     611           0 :         return !(RREG32(mmSRBM_STATUS2) & mask);
     612             : }
     613             : 
     614           0 : static int vce_v3_0_wait_for_idle(void *handle)
     615             : {
     616             :         unsigned i;
     617           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     618             : 
     619           0 :         for (i = 0; i < adev->usec_timeout; i++)
     620           0 :                 if (vce_v3_0_is_idle(handle))
     621             :                         return 0;
     622             : 
     623             :         return -ETIMEDOUT;
     624             : }
     625             : 
     626             : #define  VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK  0x00000008L   /* AUTO_BUSY */
     627             : #define  VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK   0x00000010L   /* RB0_BUSY */
     628             : #define  VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK   0x00000020L   /* RB1_BUSY */
     629             : #define  AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
     630             :                                       VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
     631             : 
     632           0 : static bool vce_v3_0_check_soft_reset(void *handle)
     633             : {
     634           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     635           0 :         u32 srbm_soft_reset = 0;
     636             : 
     637             :         /* According to VCE team , we should use VCE_STATUS instead
     638             :          * SRBM_STATUS.VCE_BUSY bit for busy status checking.
     639             :          * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
     640             :          * instance's registers are accessed
     641             :          * (0 for 1st instance, 10 for 2nd instance).
     642             :          *
     643             :          *VCE_STATUS
     644             :          *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 |          |FW_LOADED|JOB |
     645             :          *|----+----+-----------+----+----+----+----------+---------+----|
     646             :          *|bit8|bit7|    bit6   |bit5|bit4|bit3|   bit2   |  bit1   |bit0|
     647             :          *
     648             :          * VCE team suggest use bit 3--bit 6 for busy status check
     649             :          */
     650           0 :         mutex_lock(&adev->grbm_idx_mutex);
     651           0 :         WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
     652           0 :         if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
     653           0 :                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
     654           0 :                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
     655             :         }
     656           0 :         WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
     657           0 :         if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
     658           0 :                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
     659           0 :                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
     660             :         }
     661           0 :         WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
     662           0 :         mutex_unlock(&adev->grbm_idx_mutex);
     663             : 
     664           0 :         if (srbm_soft_reset) {
     665           0 :                 adev->vce.srbm_soft_reset = srbm_soft_reset;
     666           0 :                 return true;
     667             :         } else {
     668           0 :                 adev->vce.srbm_soft_reset = 0;
     669           0 :                 return false;
     670             :         }
     671             : }
     672             : 
     673           0 : static int vce_v3_0_soft_reset(void *handle)
     674             : {
     675           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     676             :         u32 srbm_soft_reset;
     677             : 
     678           0 :         if (!adev->vce.srbm_soft_reset)
     679             :                 return 0;
     680           0 :         srbm_soft_reset = adev->vce.srbm_soft_reset;
     681             : 
     682             :         if (srbm_soft_reset) {
     683             :                 u32 tmp;
     684             : 
     685           0 :                 tmp = RREG32(mmSRBM_SOFT_RESET);
     686           0 :                 tmp |= srbm_soft_reset;
     687           0 :                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
     688           0 :                 WREG32(mmSRBM_SOFT_RESET, tmp);
     689           0 :                 tmp = RREG32(mmSRBM_SOFT_RESET);
     690             : 
     691           0 :                 udelay(50);
     692             : 
     693           0 :                 tmp &= ~srbm_soft_reset;
     694           0 :                 WREG32(mmSRBM_SOFT_RESET, tmp);
     695           0 :                 tmp = RREG32(mmSRBM_SOFT_RESET);
     696             : 
     697             :                 /* Wait a little for things to settle down */
     698             :                 udelay(50);
     699             :         }
     700             : 
     701           0 :         return 0;
     702             : }
     703             : 
     704           0 : static int vce_v3_0_pre_soft_reset(void *handle)
     705             : {
     706           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     707             : 
     708           0 :         if (!adev->vce.srbm_soft_reset)
     709             :                 return 0;
     710             : 
     711           0 :         mdelay(5);
     712             : 
     713           0 :         return vce_v3_0_suspend(adev);
     714             : }
     715             : 
     716             : 
     717           0 : static int vce_v3_0_post_soft_reset(void *handle)
     718             : {
     719           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     720             : 
     721           0 :         if (!adev->vce.srbm_soft_reset)
     722             :                 return 0;
     723             : 
     724           0 :         mdelay(5);
     725             : 
     726           0 :         return vce_v3_0_resume(adev);
     727             : }
     728             : 
     729           0 : static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
     730             :                                         struct amdgpu_irq_src *source,
     731             :                                         unsigned type,
     732             :                                         enum amdgpu_interrupt_state state)
     733             : {
     734           0 :         uint32_t val = 0;
     735             : 
     736           0 :         if (state == AMDGPU_IRQ_STATE_ENABLE)
     737           0 :                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
     738             : 
     739           0 :         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
     740           0 :         return 0;
     741             : }
     742             : 
     743           0 : static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
     744             :                                       struct amdgpu_irq_src *source,
     745             :                                       struct amdgpu_iv_entry *entry)
     746             : {
     747           0 :         DRM_DEBUG("IH: VCE\n");
     748             : 
     749           0 :         WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
     750             : 
     751           0 :         switch (entry->src_data[0]) {
     752             :         case 0:
     753             :         case 1:
     754             :         case 2:
     755           0 :                 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
     756           0 :                 break;
     757             :         default:
     758           0 :                 DRM_ERROR("Unhandled interrupt: %d %d\n",
     759             :                           entry->src_id, entry->src_data[0]);
     760           0 :                 break;
     761             :         }
     762             : 
     763           0 :         return 0;
     764             : }
     765             : 
     766           0 : static int vce_v3_0_set_clockgating_state(void *handle,
     767             :                                           enum amd_clockgating_state state)
     768             : {
     769           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     770           0 :         bool enable = (state == AMD_CG_STATE_GATE);
     771             :         int i;
     772             : 
     773           0 :         if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
     774             :                 return 0;
     775             : 
     776           0 :         mutex_lock(&adev->grbm_idx_mutex);
     777           0 :         for (i = 0; i < 2; i++) {
     778             :                 /* Program VCE Instance 0 or 1 if not harvested */
     779           0 :                 if (adev->vce.harvest_config & (1 << i))
     780           0 :                         continue;
     781             : 
     782           0 :                 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
     783             : 
     784           0 :                 if (!enable) {
     785             :                         /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
     786           0 :                         uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
     787           0 :                         data &= ~(0xf | 0xff0);
     788           0 :                         data |= ((0x0 << 0) | (0x04 << 4));
     789           0 :                         WREG32(mmVCE_CLOCK_GATING_A, data);
     790             : 
     791             :                         /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
     792           0 :                         data = RREG32(mmVCE_UENC_CLOCK_GATING);
     793           0 :                         data &= ~(0xf | 0xff0);
     794           0 :                         data |= ((0x0 << 0) | (0x04 << 4));
     795           0 :                         WREG32(mmVCE_UENC_CLOCK_GATING, data);
     796             :                 }
     797             : 
     798           0 :                 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
     799             :         }
     800             : 
     801           0 :         WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
     802           0 :         mutex_unlock(&adev->grbm_idx_mutex);
     803             : 
     804           0 :         return 0;
     805             : }
     806             : 
     807           0 : static int vce_v3_0_set_powergating_state(void *handle,
     808             :                                           enum amd_powergating_state state)
     809             : {
     810             :         /* This doesn't actually powergate the VCE block.
     811             :          * That's done in the dpm code via the SMC.  This
     812             :          * just re-inits the block as necessary.  The actual
     813             :          * gating still happens in the dpm code.  We should
     814             :          * revisit this when there is a cleaner line between
     815             :          * the smc and the hw blocks
     816             :          */
     817           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     818           0 :         int ret = 0;
     819             : 
     820           0 :         if (state == AMD_PG_STATE_GATE) {
     821           0 :                 ret = vce_v3_0_stop(adev);
     822             :                 if (ret)
     823             :                         goto out;
     824             :         } else {
     825           0 :                 ret = vce_v3_0_start(adev);
     826             :                 if (ret)
     827             :                         goto out;
     828             :         }
     829             : 
     830             : out:
     831           0 :         return ret;
     832             : }
     833             : 
     834           0 : static void vce_v3_0_get_clockgating_state(void *handle, u64 *flags)
     835             : {
     836           0 :         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
     837             :         int data;
     838             : 
     839           0 :         mutex_lock(&adev->pm.mutex);
     840             : 
     841           0 :         if (adev->flags & AMD_IS_APU)
     842           0 :                 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
     843             :         else
     844           0 :                 data = RREG32_SMC(ixCURRENT_PG_STATUS);
     845             : 
     846           0 :         if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
     847           0 :                 DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
     848           0 :                 goto out;
     849             :         }
     850             : 
     851           0 :         WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
     852             : 
     853             :         /* AMD_CG_SUPPORT_VCE_MGCG */
     854           0 :         data = RREG32(mmVCE_CLOCK_GATING_A);
     855           0 :         if (data & (0x04 << 4))
     856           0 :                 *flags |= AMD_CG_SUPPORT_VCE_MGCG;
     857             : 
     858             : out:
     859           0 :         mutex_unlock(&adev->pm.mutex);
     860           0 : }
     861             : 
     862           0 : static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
     863             :                                   struct amdgpu_job *job,
     864             :                                   struct amdgpu_ib *ib,
     865             :                                   uint32_t flags)
     866             : {
     867           0 :         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
     868             : 
     869           0 :         amdgpu_ring_write(ring, VCE_CMD_IB_VM);
     870           0 :         amdgpu_ring_write(ring, vmid);
     871           0 :         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
     872           0 :         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
     873           0 :         amdgpu_ring_write(ring, ib->length_dw);
     874           0 : }
     875             : 
     876           0 : static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
     877             :                                    unsigned int vmid, uint64_t pd_addr)
     878             : {
     879           0 :         amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
     880           0 :         amdgpu_ring_write(ring, vmid);
     881           0 :         amdgpu_ring_write(ring, pd_addr >> 12);
     882             : 
     883           0 :         amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
     884           0 :         amdgpu_ring_write(ring, vmid);
     885           0 :         amdgpu_ring_write(ring, VCE_CMD_END);
     886           0 : }
     887             : 
     888           0 : static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
     889             : {
     890           0 :         uint32_t seq = ring->fence_drv.sync_seq;
     891           0 :         uint64_t addr = ring->fence_drv.gpu_addr;
     892             : 
     893           0 :         amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
     894           0 :         amdgpu_ring_write(ring, lower_32_bits(addr));
     895           0 :         amdgpu_ring_write(ring, upper_32_bits(addr));
     896           0 :         amdgpu_ring_write(ring, seq);
     897           0 : }
     898             : 
     899             : static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
     900             :         .name = "vce_v3_0",
     901             :         .early_init = vce_v3_0_early_init,
     902             :         .late_init = NULL,
     903             :         .sw_init = vce_v3_0_sw_init,
     904             :         .sw_fini = vce_v3_0_sw_fini,
     905             :         .hw_init = vce_v3_0_hw_init,
     906             :         .hw_fini = vce_v3_0_hw_fini,
     907             :         .suspend = vce_v3_0_suspend,
     908             :         .resume = vce_v3_0_resume,
     909             :         .is_idle = vce_v3_0_is_idle,
     910             :         .wait_for_idle = vce_v3_0_wait_for_idle,
     911             :         .check_soft_reset = vce_v3_0_check_soft_reset,
     912             :         .pre_soft_reset = vce_v3_0_pre_soft_reset,
     913             :         .soft_reset = vce_v3_0_soft_reset,
     914             :         .post_soft_reset = vce_v3_0_post_soft_reset,
     915             :         .set_clockgating_state = vce_v3_0_set_clockgating_state,
     916             :         .set_powergating_state = vce_v3_0_set_powergating_state,
     917             :         .get_clockgating_state = vce_v3_0_get_clockgating_state,
     918             : };
     919             : 
     920             : static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
     921             :         .type = AMDGPU_RING_TYPE_VCE,
     922             :         .align_mask = 0xf,
     923             :         .nop = VCE_CMD_NO_OP,
     924             :         .support_64bit_ptrs = false,
     925             :         .no_user_fence = true,
     926             :         .get_rptr = vce_v3_0_ring_get_rptr,
     927             :         .get_wptr = vce_v3_0_ring_get_wptr,
     928             :         .set_wptr = vce_v3_0_ring_set_wptr,
     929             :         .parse_cs = amdgpu_vce_ring_parse_cs,
     930             :         .emit_frame_size =
     931             :                 4 + /* vce_v3_0_emit_pipeline_sync */
     932             :                 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
     933             :         .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
     934             :         .emit_ib = amdgpu_vce_ring_emit_ib,
     935             :         .emit_fence = amdgpu_vce_ring_emit_fence,
     936             :         .test_ring = amdgpu_vce_ring_test_ring,
     937             :         .test_ib = amdgpu_vce_ring_test_ib,
     938             :         .insert_nop = amdgpu_ring_insert_nop,
     939             :         .pad_ib = amdgpu_ring_generic_pad_ib,
     940             :         .begin_use = amdgpu_vce_ring_begin_use,
     941             :         .end_use = amdgpu_vce_ring_end_use,
     942             : };
     943             : 
     944             : static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
     945             :         .type = AMDGPU_RING_TYPE_VCE,
     946             :         .align_mask = 0xf,
     947             :         .nop = VCE_CMD_NO_OP,
     948             :         .support_64bit_ptrs = false,
     949             :         .no_user_fence = true,
     950             :         .get_rptr = vce_v3_0_ring_get_rptr,
     951             :         .get_wptr = vce_v3_0_ring_get_wptr,
     952             :         .set_wptr = vce_v3_0_ring_set_wptr,
     953             :         .parse_cs = amdgpu_vce_ring_parse_cs_vm,
     954             :         .emit_frame_size =
     955             :                 6 + /* vce_v3_0_emit_vm_flush */
     956             :                 4 + /* vce_v3_0_emit_pipeline_sync */
     957             :                 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
     958             :         .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
     959             :         .emit_ib = vce_v3_0_ring_emit_ib,
     960             :         .emit_vm_flush = vce_v3_0_emit_vm_flush,
     961             :         .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
     962             :         .emit_fence = amdgpu_vce_ring_emit_fence,
     963             :         .test_ring = amdgpu_vce_ring_test_ring,
     964             :         .test_ib = amdgpu_vce_ring_test_ib,
     965             :         .insert_nop = amdgpu_ring_insert_nop,
     966             :         .pad_ib = amdgpu_ring_generic_pad_ib,
     967             :         .begin_use = amdgpu_vce_ring_begin_use,
     968             :         .end_use = amdgpu_vce_ring_end_use,
     969             : };
     970             : 
     971           0 : static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
     972             : {
     973             :         int i;
     974             : 
     975           0 :         if (adev->asic_type >= CHIP_STONEY) {
     976           0 :                 for (i = 0; i < adev->vce.num_rings; i++) {
     977           0 :                         adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
     978           0 :                         adev->vce.ring[i].me = i;
     979             :                 }
     980           0 :                 DRM_INFO("VCE enabled in VM mode\n");
     981             :         } else {
     982           0 :                 for (i = 0; i < adev->vce.num_rings; i++) {
     983           0 :                         adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
     984           0 :                         adev->vce.ring[i].me = i;
     985             :                 }
     986           0 :                 DRM_INFO("VCE enabled in physical mode\n");
     987             :         }
     988           0 : }
     989             : 
     990             : static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
     991             :         .set = vce_v3_0_set_interrupt_state,
     992             :         .process = vce_v3_0_process_interrupt,
     993             : };
     994             : 
     995             : static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
     996             : {
     997           0 :         adev->vce.irq.num_types = 1;
     998           0 :         adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
     999             : };
    1000             : 
    1001             : const struct amdgpu_ip_block_version vce_v3_0_ip_block =
    1002             : {
    1003             :         .type = AMD_IP_BLOCK_TYPE_VCE,
    1004             :         .major = 3,
    1005             :         .minor = 0,
    1006             :         .rev = 0,
    1007             :         .funcs = &vce_v3_0_ip_funcs,
    1008             : };
    1009             : 
    1010             : const struct amdgpu_ip_block_version vce_v3_1_ip_block =
    1011             : {
    1012             :         .type = AMD_IP_BLOCK_TYPE_VCE,
    1013             :         .major = 3,
    1014             :         .minor = 1,
    1015             :         .rev = 0,
    1016             :         .funcs = &vce_v3_0_ip_funcs,
    1017             : };
    1018             : 
    1019             : const struct amdgpu_ip_block_version vce_v3_4_ip_block =
    1020             : {
    1021             :         .type = AMD_IP_BLOCK_TYPE_VCE,
    1022             :         .major = 3,
    1023             :         .minor = 4,
    1024             :         .rev = 0,
    1025             :         .funcs = &vce_v3_0_ip_funcs,
    1026             : };

Generated by: LCOV version 1.14