LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - amdgpu_virt.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 464 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 34 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2016 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  */
      23             : 
      24             : #include <linux/module.h>
      25             : 
      26             : #ifdef CONFIG_X86
      27             : #include <asm/hypervisor.h>
      28             : #endif
      29             : 
      30             : #include <drm/drm_drv.h>
      31             : #include <xen/xen.h>
      32             : 
      33             : #include "amdgpu.h"
      34             : #include "amdgpu_ras.h"
      35             : #include "vi.h"
      36             : #include "soc15.h"
      37             : #include "nv.h"
      38             : 
      39             : #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
      40             :         do { \
      41             :                 vf2pf_info->ucode_info[ucode].id = ucode; \
      42             :                 vf2pf_info->ucode_info[ucode].version = ver; \
      43             :         } while (0)
      44             : 
      45           0 : bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
      46             : {
      47             :         /* By now all MMIO pages except mailbox are blocked */
      48             :         /* if blocking is enabled in hypervisor. Choose the */
      49             :         /* SCRATCH_REG0 to test. */
      50           0 :         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
      51             : }
      52             : 
      53           0 : void amdgpu_virt_init_setting(struct amdgpu_device *adev)
      54             : {
      55           0 :         struct drm_device *ddev = adev_to_drm(adev);
      56             : 
      57             :         /* enable virtual display */
      58           0 :         if (adev->asic_type != CHIP_ALDEBARAN &&
      59             :             adev->asic_type != CHIP_ARCTURUS) {
      60           0 :                 if (adev->mode_info.num_crtc == 0)
      61           0 :                         adev->mode_info.num_crtc = 1;
      62           0 :                 adev->enable_virtual_display = true;
      63             :         }
      64           0 :         ddev->driver_features &= ~DRIVER_ATOMIC;
      65           0 :         adev->cg_flags = 0;
      66           0 :         adev->pg_flags = 0;
      67           0 : }
      68             : 
      69           0 : void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
      70             :                                         uint32_t reg0, uint32_t reg1,
      71             :                                         uint32_t ref, uint32_t mask)
      72             : {
      73           0 :         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
      74           0 :         struct amdgpu_ring *ring = &kiq->ring;
      75           0 :         signed long r, cnt = 0;
      76             :         unsigned long flags;
      77             :         uint32_t seq;
      78             : 
      79           0 :         if (adev->mes.ring.sched.ready) {
      80           0 :                 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
      81             :                                               ref, mask);
      82           0 :                 return;
      83             :         }
      84             : 
      85           0 :         spin_lock_irqsave(&kiq->ring_lock, flags);
      86           0 :         amdgpu_ring_alloc(ring, 32);
      87           0 :         amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
      88             :                                             ref, mask);
      89           0 :         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
      90           0 :         if (r)
      91             :                 goto failed_undo;
      92             : 
      93           0 :         amdgpu_ring_commit(ring);
      94           0 :         spin_unlock_irqrestore(&kiq->ring_lock, flags);
      95             : 
      96           0 :         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
      97             : 
      98             :         /* don't wait anymore for IRQ context */
      99           0 :         if (r < 1 && in_interrupt())
     100             :                 goto failed_kiq;
     101             : 
     102             :         might_sleep();
     103           0 :         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
     104             : 
     105           0 :                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
     106           0 :                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
     107             :         }
     108             : 
     109           0 :         if (cnt > MAX_KIQ_REG_TRY)
     110             :                 goto failed_kiq;
     111             : 
     112             :         return;
     113             : 
     114             : failed_undo:
     115           0 :         amdgpu_ring_undo(ring);
     116           0 :         spin_unlock_irqrestore(&kiq->ring_lock, flags);
     117             : failed_kiq:
     118           0 :         dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
     119             : }
     120             : 
     121             : /**
     122             :  * amdgpu_virt_request_full_gpu() - request full gpu access
     123             :  * @adev:       amdgpu device.
     124             :  * @init:       is driver init time.
     125             :  * When start to init/fini driver, first need to request full gpu access.
     126             :  * Return: Zero if request success, otherwise will return error.
     127             :  */
     128           0 : int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
     129             : {
     130           0 :         struct amdgpu_virt *virt = &adev->virt;
     131             :         int r;
     132             : 
     133           0 :         if (virt->ops && virt->ops->req_full_gpu) {
     134           0 :                 r = virt->ops->req_full_gpu(adev, init);
     135           0 :                 if (r)
     136             :                         return r;
     137             : 
     138           0 :                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
     139             :         }
     140             : 
     141             :         return 0;
     142             : }
     143             : 
     144             : /**
     145             :  * amdgpu_virt_release_full_gpu() - release full gpu access
     146             :  * @adev:       amdgpu device.
     147             :  * @init:       is driver init time.
     148             :  * When finishing driver init/fini, need to release full gpu access.
     149             :  * Return: Zero if release success, otherwise will returen error.
     150             :  */
     151           0 : int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
     152             : {
     153           0 :         struct amdgpu_virt *virt = &adev->virt;
     154             :         int r;
     155             : 
     156           0 :         if (virt->ops && virt->ops->rel_full_gpu) {
     157           0 :                 r = virt->ops->rel_full_gpu(adev, init);
     158           0 :                 if (r)
     159             :                         return r;
     160             : 
     161           0 :                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
     162             :         }
     163             :         return 0;
     164             : }
     165             : 
     166             : /**
     167             :  * amdgpu_virt_reset_gpu() - reset gpu
     168             :  * @adev:       amdgpu device.
     169             :  * Send reset command to GPU hypervisor to reset GPU that VM is using
     170             :  * Return: Zero if reset success, otherwise will return error.
     171             :  */
     172           0 : int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
     173             : {
     174           0 :         struct amdgpu_virt *virt = &adev->virt;
     175             :         int r;
     176             : 
     177           0 :         if (virt->ops && virt->ops->reset_gpu) {
     178           0 :                 r = virt->ops->reset_gpu(adev);
     179           0 :                 if (r)
     180             :                         return r;
     181             : 
     182           0 :                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
     183             :         }
     184             : 
     185             :         return 0;
     186             : }
     187             : 
     188           0 : void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
     189             : {
     190           0 :         struct amdgpu_virt *virt = &adev->virt;
     191             : 
     192           0 :         if (virt->ops && virt->ops->req_init_data)
     193           0 :                 virt->ops->req_init_data(adev);
     194             : 
     195           0 :         if (adev->virt.req_init_data_ver > 0)
     196           0 :                 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
     197             :         else
     198           0 :                 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
     199           0 : }
     200             : 
     201             : /**
     202             :  * amdgpu_virt_wait_reset() - wait for reset gpu completed
     203             :  * @adev:       amdgpu device.
     204             :  * Wait for GPU reset completed.
     205             :  * Return: Zero if reset success, otherwise will return error.
     206             :  */
     207           0 : int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
     208             : {
     209           0 :         struct amdgpu_virt *virt = &adev->virt;
     210             : 
     211           0 :         if (!virt->ops || !virt->ops->wait_reset)
     212             :                 return -EINVAL;
     213             : 
     214           0 :         return virt->ops->wait_reset(adev);
     215             : }
     216             : 
     217             : /**
     218             :  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
     219             :  * @adev:       amdgpu device.
     220             :  * MM table is used by UVD and VCE for its initialization
     221             :  * Return: Zero if allocate success.
     222             :  */
     223           0 : int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
     224             : {
     225             :         int r;
     226             : 
     227           0 :         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
     228             :                 return 0;
     229             : 
     230           0 :         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
     231             :                                     AMDGPU_GEM_DOMAIN_VRAM,
     232             :                                     &adev->virt.mm_table.bo,
     233           0 :                                     &adev->virt.mm_table.gpu_addr,
     234           0 :                                     (void *)&adev->virt.mm_table.cpu_addr);
     235           0 :         if (r) {
     236           0 :                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
     237           0 :                 return r;
     238             :         }
     239             : 
     240           0 :         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
     241           0 :         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
     242             :                  adev->virt.mm_table.gpu_addr,
     243             :                  adev->virt.mm_table.cpu_addr);
     244           0 :         return 0;
     245             : }
     246             : 
     247             : /**
     248             :  * amdgpu_virt_free_mm_table() - free mm table memory
     249             :  * @adev:       amdgpu device.
     250             :  * Free MM table memory
     251             :  */
     252           0 : void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
     253             : {
     254           0 :         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
     255             :                 return;
     256             : 
     257           0 :         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
     258           0 :                               &adev->virt.mm_table.gpu_addr,
     259           0 :                               (void *)&adev->virt.mm_table.cpu_addr);
     260           0 :         adev->virt.mm_table.gpu_addr = 0;
     261             : }
     262             : 
     263             : 
     264           0 : unsigned int amd_sriov_msg_checksum(void *obj,
     265             :                                 unsigned long obj_size,
     266             :                                 unsigned int key,
     267             :                                 unsigned int checksum)
     268             : {
     269           0 :         unsigned int ret = key;
     270           0 :         unsigned long i = 0;
     271             :         unsigned char *pos;
     272             : 
     273           0 :         pos = (char *)obj;
     274             :         /* calculate checksum */
     275           0 :         for (i = 0; i < obj_size; ++i)
     276           0 :                 ret += *(pos + i);
     277             :         /* minus the checksum itself */
     278             :         pos = (char *)&checksum;
     279           0 :         for (i = 0; i < sizeof(checksum); ++i)
     280           0 :                 ret -= *(pos + i);
     281           0 :         return ret;
     282             : }
     283             : 
     284           0 : static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
     285             : {
     286           0 :         struct amdgpu_virt *virt = &adev->virt;
     287           0 :         struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
     288             :         /* GPU will be marked bad on host if bp count more then 10,
     289             :          * so alloc 512 is enough.
     290             :          */
     291           0 :         unsigned int align_space = 512;
     292           0 :         void *bps = NULL;
     293           0 :         struct amdgpu_bo **bps_bo = NULL;
     294             : 
     295           0 :         *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
     296           0 :         if (!*data)
     297             :                 goto data_failure;
     298             : 
     299           0 :         bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
     300           0 :         if (!bps)
     301             :                 goto bps_failure;
     302             : 
     303           0 :         bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
     304           0 :         if (!bps_bo)
     305             :                 goto bps_bo_failure;
     306             : 
     307           0 :         (*data)->bps = bps;
     308           0 :         (*data)->bps_bo = bps_bo;
     309           0 :         (*data)->count = 0;
     310           0 :         (*data)->last_reserved = 0;
     311             : 
     312           0 :         virt->ras_init_done = true;
     313             : 
     314           0 :         return 0;
     315             : 
     316             : bps_bo_failure:
     317           0 :         kfree(bps);
     318             : bps_failure:
     319           0 :         kfree(*data);
     320             : data_failure:
     321             :         return -ENOMEM;
     322             : }
     323             : 
     324           0 : static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
     325             : {
     326           0 :         struct amdgpu_virt *virt = &adev->virt;
     327           0 :         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
     328             :         struct amdgpu_bo *bo;
     329             :         int i;
     330             : 
     331           0 :         if (!data)
     332           0 :                 return;
     333             : 
     334           0 :         for (i = data->last_reserved - 1; i >= 0; i--) {
     335           0 :                 bo = data->bps_bo[i];
     336           0 :                 amdgpu_bo_free_kernel(&bo, NULL, NULL);
     337           0 :                 data->bps_bo[i] = bo;
     338           0 :                 data->last_reserved = i;
     339             :         }
     340             : }
     341             : 
     342           0 : void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
     343             : {
     344           0 :         struct amdgpu_virt *virt = &adev->virt;
     345           0 :         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
     346             : 
     347           0 :         virt->ras_init_done = false;
     348             : 
     349           0 :         if (!data)
     350             :                 return;
     351             : 
     352           0 :         amdgpu_virt_ras_release_bp(adev);
     353             : 
     354           0 :         kfree(data->bps);
     355           0 :         kfree(data->bps_bo);
     356           0 :         kfree(data);
     357           0 :         virt->virt_eh_data = NULL;
     358             : }
     359             : 
     360           0 : static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
     361             :                 struct eeprom_table_record *bps, int pages)
     362             : {
     363           0 :         struct amdgpu_virt *virt = &adev->virt;
     364           0 :         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
     365             : 
     366           0 :         if (!data)
     367             :                 return;
     368             : 
     369           0 :         memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
     370           0 :         data->count += pages;
     371             : }
     372             : 
     373           0 : static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
     374             : {
     375           0 :         struct amdgpu_virt *virt = &adev->virt;
     376           0 :         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
     377           0 :         struct amdgpu_bo *bo = NULL;
     378             :         uint64_t bp;
     379             :         int i;
     380             : 
     381           0 :         if (!data)
     382           0 :                 return;
     383             : 
     384           0 :         for (i = data->last_reserved; i < data->count; i++) {
     385           0 :                 bp = data->bps[i].retired_page;
     386             : 
     387             :                 /* There are two cases of reserve error should be ignored:
     388             :                  * 1) a ras bad page has been allocated (used by someone);
     389             :                  * 2) a ras bad page has been reserved (duplicate error injection
     390             :                  *    for one page);
     391             :                  */
     392           0 :                 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
     393             :                                                AMDGPU_GPU_PAGE_SIZE,
     394             :                                                AMDGPU_GEM_DOMAIN_VRAM,
     395             :                                                &bo, NULL))
     396           0 :                         DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
     397             : 
     398           0 :                 data->bps_bo[i] = bo;
     399           0 :                 data->last_reserved = i + 1;
     400           0 :                 bo = NULL;
     401             :         }
     402             : }
     403             : 
     404             : static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
     405             :                 uint64_t retired_page)
     406             : {
     407           0 :         struct amdgpu_virt *virt = &adev->virt;
     408           0 :         struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
     409             :         int i;
     410             : 
     411           0 :         if (!data)
     412             :                 return true;
     413             : 
     414           0 :         for (i = 0; i < data->count; i++)
     415           0 :                 if (retired_page == data->bps[i].retired_page)
     416             :                         return true;
     417             : 
     418             :         return false;
     419             : }
     420             : 
     421           0 : static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
     422             :                 uint64_t bp_block_offset, uint32_t bp_block_size)
     423             : {
     424             :         struct eeprom_table_record bp;
     425             :         uint64_t retired_page;
     426             :         uint32_t bp_idx, bp_cnt;
     427             : 
     428           0 :         if (bp_block_size) {
     429           0 :                 bp_cnt = bp_block_size / sizeof(uint64_t);
     430           0 :                 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
     431           0 :                         retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
     432           0 :                                         bp_block_offset + bp_idx * sizeof(uint64_t));
     433           0 :                         bp.retired_page = retired_page;
     434             : 
     435           0 :                         if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
     436           0 :                                 continue;
     437             : 
     438           0 :                         amdgpu_virt_ras_add_bps(adev, &bp, 1);
     439             : 
     440           0 :                         amdgpu_virt_ras_reserve_bps(adev);
     441             :                 }
     442             :         }
     443           0 : }
     444             : 
     445           0 : static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
     446             : {
     447           0 :         struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
     448             :         uint32_t checksum;
     449             :         uint32_t checkval;
     450             : 
     451             :         uint32_t i;
     452             :         uint32_t tmp;
     453             : 
     454           0 :         if (adev->virt.fw_reserve.p_pf2vf == NULL)
     455             :                 return -EINVAL;
     456             : 
     457           0 :         if (pf2vf_info->size > 1024) {
     458           0 :                 DRM_ERROR("invalid pf2vf message size\n");
     459           0 :                 return -EINVAL;
     460             :         }
     461             : 
     462           0 :         switch (pf2vf_info->version) {
     463             :         case 1:
     464           0 :                 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
     465           0 :                 checkval = amd_sriov_msg_checksum(
     466           0 :                         adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
     467             :                         adev->virt.fw_reserve.checksum_key, checksum);
     468           0 :                 if (checksum != checkval) {
     469           0 :                         DRM_ERROR("invalid pf2vf message\n");
     470           0 :                         return -EINVAL;
     471             :                 }
     472             : 
     473           0 :                 adev->virt.gim_feature =
     474           0 :                         ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
     475           0 :                 break;
     476             :         case 2:
     477             :                 /* TODO: missing key, need to add it later */
     478           0 :                 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
     479           0 :                 checkval = amd_sriov_msg_checksum(
     480           0 :                         adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
     481             :                         0, checksum);
     482           0 :                 if (checksum != checkval) {
     483           0 :                         DRM_ERROR("invalid pf2vf message\n");
     484           0 :                         return -EINVAL;
     485             :                 }
     486             : 
     487           0 :                 adev->virt.vf2pf_update_interval_ms =
     488           0 :                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
     489           0 :                 adev->virt.gim_feature =
     490           0 :                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
     491           0 :                 adev->virt.reg_access =
     492           0 :                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
     493             : 
     494           0 :                 adev->virt.decode_max_dimension_pixels = 0;
     495           0 :                 adev->virt.decode_max_frame_pixels = 0;
     496           0 :                 adev->virt.encode_max_dimension_pixels = 0;
     497           0 :                 adev->virt.encode_max_frame_pixels = 0;
     498           0 :                 adev->virt.is_mm_bw_enabled = false;
     499           0 :                 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
     500           0 :                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
     501           0 :                         adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
     502             : 
     503           0 :                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
     504           0 :                         adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
     505             : 
     506           0 :                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
     507           0 :                         adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
     508             : 
     509           0 :                         tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
     510           0 :                         adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
     511             :                 }
     512           0 :                 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
     513           0 :                         adev->virt.is_mm_bw_enabled = true;
     514             : 
     515           0 :                 adev->unique_id =
     516           0 :                         ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
     517           0 :                 break;
     518             :         default:
     519           0 :                 DRM_ERROR("invalid pf2vf version\n");
     520           0 :                 return -EINVAL;
     521             :         }
     522             : 
     523             :         /* correct too large or too little interval value */
     524           0 :         if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
     525           0 :                 adev->virt.vf2pf_update_interval_ms = 2000;
     526             : 
     527             :         return 0;
     528             : }
     529             : 
     530           0 : static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
     531             : {
     532             :         struct amd_sriov_msg_vf2pf_info *vf2pf_info;
     533           0 :         vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
     534             : 
     535           0 :         if (adev->virt.fw_reserve.p_vf2pf == NULL)
     536             :                 return;
     537             : 
     538           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
     539           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
     540           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
     541           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
     542           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
     543           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
     544           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
     545           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
     546           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
     547           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
     548           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
     549           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
     550           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
     551           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
     552             :                             adev->psp.asd_context.bin_desc.fw_version);
     553           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
     554             :                             adev->psp.ras_context.context.bin_desc.fw_version);
     555           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
     556             :                             adev->psp.xgmi_context.context.bin_desc.fw_version);
     557           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
     558           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
     559           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
     560           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
     561           0 :         POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
     562             : }
     563             : 
     564           0 : static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
     565             : {
     566             :         struct amd_sriov_msg_vf2pf_info *vf2pf_info;
     567             : 
     568           0 :         vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
     569             : 
     570           0 :         if (adev->virt.fw_reserve.p_vf2pf == NULL)
     571             :                 return -EINVAL;
     572             : 
     573           0 :         memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
     574             : 
     575           0 :         vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
     576           0 :         vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
     577             : 
     578             : #ifdef MODULE
     579             :         if (THIS_MODULE->version != NULL)
     580             :                 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
     581             :         else
     582             : #endif
     583           0 :                 strcpy(vf2pf_info->driver_version, "N/A");
     584             : 
     585           0 :         vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
     586           0 :         vf2pf_info->driver_cert = 0;
     587           0 :         vf2pf_info->os_info.all = 0;
     588             : 
     589           0 :         vf2pf_info->fb_usage =
     590           0 :                 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
     591           0 :         vf2pf_info->fb_vis_usage =
     592           0 :                 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
     593           0 :         vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
     594           0 :         vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
     595             : 
     596           0 :         amdgpu_virt_populate_vf2pf_ucode_info(adev);
     597             : 
     598             :         /* TODO: read dynamic info */
     599           0 :         vf2pf_info->gfx_usage = 0;
     600           0 :         vf2pf_info->compute_usage = 0;
     601           0 :         vf2pf_info->encode_usage = 0;
     602           0 :         vf2pf_info->decode_usage = 0;
     603             : 
     604           0 :         vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
     605           0 :         vf2pf_info->checksum =
     606           0 :                 amd_sriov_msg_checksum(
     607           0 :                 vf2pf_info, vf2pf_info->header.size, 0, 0);
     608             : 
     609           0 :         return 0;
     610             : }
     611             : 
     612           0 : static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
     613             : {
     614           0 :         struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
     615             :         int ret;
     616             : 
     617           0 :         ret = amdgpu_virt_read_pf2vf_data(adev);
     618           0 :         if (ret)
     619             :                 goto out;
     620           0 :         amdgpu_virt_write_vf2pf_data(adev);
     621             : 
     622             : out:
     623           0 :         schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
     624           0 : }
     625             : 
     626           0 : void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
     627             : {
     628           0 :         if (adev->virt.vf2pf_update_interval_ms != 0) {
     629           0 :                 DRM_INFO("clean up the vf2pf work item\n");
     630           0 :                 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
     631           0 :                 adev->virt.vf2pf_update_interval_ms = 0;
     632             :         }
     633           0 : }
     634             : 
     635           0 : void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
     636             : {
     637           0 :         adev->virt.fw_reserve.p_pf2vf = NULL;
     638           0 :         adev->virt.fw_reserve.p_vf2pf = NULL;
     639           0 :         adev->virt.vf2pf_update_interval_ms = 0;
     640             : 
     641           0 :         if (adev->mman.fw_vram_usage_va != NULL) {
     642             :                 /* go through this logic in ip_init and reset to init workqueue*/
     643           0 :                 amdgpu_virt_exchange_data(adev);
     644             : 
     645           0 :                 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
     646           0 :                 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
     647           0 :         } else if (adev->bios != NULL) {
     648             :                 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
     649           0 :                 adev->virt.fw_reserve.p_pf2vf =
     650             :                         (struct amd_sriov_msg_pf2vf_info_header *)
     651           0 :                         (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
     652             : 
     653           0 :                 amdgpu_virt_read_pf2vf_data(adev);
     654             :         }
     655           0 : }
     656             : 
     657             : 
     658           0 : void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
     659             : {
     660           0 :         uint64_t bp_block_offset = 0;
     661           0 :         uint32_t bp_block_size = 0;
     662           0 :         struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
     663             : 
     664           0 :         if (adev->mman.fw_vram_usage_va != NULL) {
     665             : 
     666           0 :                 adev->virt.fw_reserve.p_pf2vf =
     667             :                         (struct amd_sriov_msg_pf2vf_info_header *)
     668           0 :                         (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
     669           0 :                 adev->virt.fw_reserve.p_vf2pf =
     670             :                         (struct amd_sriov_msg_vf2pf_info_header *)
     671           0 :                         (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
     672             : 
     673           0 :                 amdgpu_virt_read_pf2vf_data(adev);
     674           0 :                 amdgpu_virt_write_vf2pf_data(adev);
     675             : 
     676             :                 /* bad page handling for version 2 */
     677           0 :                 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
     678           0 :                                 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
     679             : 
     680           0 :                                 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
     681           0 :                                                 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
     682           0 :                                 bp_block_size = pf2vf_v2->bp_block_size;
     683             : 
     684           0 :                                 if (bp_block_size && !adev->virt.ras_init_done)
     685           0 :                                         amdgpu_virt_init_ras_err_handler_data(adev);
     686             : 
     687           0 :                                 if (adev->virt.ras_init_done)
     688           0 :                                         amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
     689             :                         }
     690             :         }
     691           0 : }
     692             : 
     693           0 : void amdgpu_detect_virtualization(struct amdgpu_device *adev)
     694             : {
     695             :         uint32_t reg;
     696             : 
     697           0 :         switch (adev->asic_type) {
     698             :         case CHIP_TONGA:
     699             :         case CHIP_FIJI:
     700           0 :                 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
     701           0 :                 break;
     702             :         case CHIP_VEGA10:
     703             :         case CHIP_VEGA20:
     704             :         case CHIP_NAVI10:
     705             :         case CHIP_NAVI12:
     706             :         case CHIP_SIENNA_CICHLID:
     707             :         case CHIP_ARCTURUS:
     708             :         case CHIP_ALDEBARAN:
     709             :         case CHIP_IP_DISCOVERY:
     710           0 :                 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
     711           0 :                 break;
     712             :         default: /* other chip doesn't support SRIOV */
     713             :                 reg = 0;
     714             :                 break;
     715             :         }
     716             : 
     717           0 :         if (reg & 1)
     718           0 :                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
     719             : 
     720           0 :         if (reg & 0x80000000)
     721           0 :                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
     722             : 
     723             :         if (!reg) {
     724             :                 /* passthrough mode exclus sriov mod */
     725             :                 if (is_virtual_machine() && !xen_initial_domain())
     726             :                         adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
     727             :         }
     728             : 
     729             :         /* we have the ability to check now */
     730           0 :         if (amdgpu_sriov_vf(adev)) {
     731           0 :                 switch (adev->asic_type) {
     732             :                 case CHIP_TONGA:
     733             :                 case CHIP_FIJI:
     734           0 :                         vi_set_virt_ops(adev);
     735           0 :                         break;
     736             :                 case CHIP_VEGA10:
     737           0 :                         soc15_set_virt_ops(adev);
     738             : #ifdef CONFIG_X86
     739             :                         /* not send GPU_INIT_DATA with MS_HYPERV*/
     740             :                         if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
     741             : #endif
     742             :                                 /* send a dummy GPU_INIT_DATA request to host on vega10 */
     743           0 :                                 amdgpu_virt_request_init_data(adev);
     744           0 :                         break;
     745             :                 case CHIP_VEGA20:
     746             :                 case CHIP_ARCTURUS:
     747             :                 case CHIP_ALDEBARAN:
     748           0 :                         soc15_set_virt_ops(adev);
     749           0 :                         break;
     750             :                 case CHIP_NAVI10:
     751             :                 case CHIP_NAVI12:
     752             :                 case CHIP_SIENNA_CICHLID:
     753             :                 case CHIP_IP_DISCOVERY:
     754           0 :                         nv_set_virt_ops(adev);
     755             :                         /* try send GPU_INIT_DATA request to host */
     756           0 :                         amdgpu_virt_request_init_data(adev);
     757           0 :                         break;
     758             :                 default: /* other chip doesn't support SRIOV */
     759           0 :                         DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
     760           0 :                         break;
     761             :                 }
     762             :         }
     763           0 : }
     764             : 
     765             : static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
     766             : {
     767           0 :         return amdgpu_sriov_is_debug(adev) ? true : false;
     768             : }
     769             : 
     770             : static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
     771             : {
     772           0 :         return amdgpu_sriov_is_normal(adev) ? true : false;
     773             : }
     774             : 
     775           0 : int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
     776             : {
     777           0 :         if (!amdgpu_sriov_vf(adev) ||
     778           0 :             amdgpu_virt_access_debugfs_is_kiq(adev))
     779             :                 return 0;
     780             : 
     781           0 :         if (amdgpu_virt_access_debugfs_is_mmio(adev))
     782           0 :                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
     783             :         else
     784             :                 return -EPERM;
     785             : 
     786           0 :         return 0;
     787             : }
     788             : 
     789           0 : void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
     790             : {
     791           0 :         if (amdgpu_sriov_vf(adev))
     792           0 :                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
     793           0 : }
     794             : 
     795           0 : enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
     796             : {
     797             :         enum amdgpu_sriov_vf_mode mode;
     798             : 
     799           0 :         if (amdgpu_sriov_vf(adev)) {
     800           0 :                 if (amdgpu_sriov_is_pp_one_vf(adev))
     801             :                         mode = SRIOV_VF_MODE_ONE_VF;
     802             :                 else
     803           0 :                         mode = SRIOV_VF_MODE_MULTI_VF;
     804             :         } else {
     805             :                 mode = SRIOV_VF_MODE_BARE_METAL;
     806             :         }
     807             : 
     808           0 :         return mode;
     809             : }
     810             : 
     811           0 : bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
     812             : {
     813           0 :         switch (adev->ip_versions[MP0_HWIP][0]) {
     814             :         case IP_VERSION(13, 0, 0):
     815             :                 /* no vf autoload, white list */
     816           0 :                 if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
     817             :                     ucode_id == AMDGPU_UCODE_ID_VCN)
     818             :                         return false;
     819             :                 else
     820           0 :                         return true;
     821             :         case IP_VERSION(13, 0, 10):
     822             :                 /* white list */
     823           0 :                 if (ucode_id == AMDGPU_UCODE_ID_CAP
     824           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
     825             :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
     826           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
     827             :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
     828           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
     829             :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
     830           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
     831             :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
     832           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
     833             :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
     834           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
     835             :                 || ucode_id == AMDGPU_UCODE_ID_CP_MES
     836           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
     837             :                 || ucode_id == AMDGPU_UCODE_ID_CP_MES1
     838           0 :                 || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
     839             :                 || ucode_id == AMDGPU_UCODE_ID_VCN1
     840           0 :                 || ucode_id == AMDGPU_UCODE_ID_VCN)
     841             :                         return false;
     842             :                 else
     843           0 :                         return true;
     844             :         default:
     845             :                 /* lagacy black list */
     846           0 :                 if (ucode_id == AMDGPU_UCODE_ID_SDMA0
     847             :                     || ucode_id == AMDGPU_UCODE_ID_SDMA1
     848             :                     || ucode_id == AMDGPU_UCODE_ID_SDMA2
     849             :                     || ucode_id == AMDGPU_UCODE_ID_SDMA3
     850             :                     || ucode_id == AMDGPU_UCODE_ID_SDMA4
     851             :                     || ucode_id == AMDGPU_UCODE_ID_SDMA5
     852             :                     || ucode_id == AMDGPU_UCODE_ID_SDMA6
     853           0 :                     || ucode_id == AMDGPU_UCODE_ID_SDMA7
     854           0 :                     || ucode_id == AMDGPU_UCODE_ID_RLC_G
     855             :                     || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
     856           0 :                     || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
     857           0 :                     || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
     858           0 :                     || ucode_id == AMDGPU_UCODE_ID_SMC)
     859             :                         return true;
     860             :                 else
     861           0 :                         return false;
     862             :         }
     863             : }
     864             : 
     865           0 : void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
     866             :                         struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
     867             :                         struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
     868             : {
     869             :         uint32_t i;
     870             : 
     871           0 :         if (!adev->virt.is_mm_bw_enabled)
     872             :                 return;
     873             : 
     874           0 :         if (encode) {
     875           0 :                 for (i = 0; i < encode_array_size; i++) {
     876           0 :                         encode[i].max_width = adev->virt.encode_max_dimension_pixels;
     877           0 :                         encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
     878           0 :                         if (encode[i].max_width > 0)
     879           0 :                                 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
     880             :                         else
     881           0 :                                 encode[i].max_height = 0;
     882             :                 }
     883             :         }
     884             : 
     885           0 :         if (decode) {
     886           0 :                 for (i = 0; i < decode_array_size; i++) {
     887           0 :                         decode[i].max_width = adev->virt.decode_max_dimension_pixels;
     888           0 :                         decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
     889           0 :                         if (decode[i].max_width > 0)
     890           0 :                                 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
     891             :                         else
     892           0 :                                 decode[i].max_height = 0;
     893             :                 }
     894             :         }
     895             : }
     896             : 
     897           0 : static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
     898             :                                                  u32 acc_flags, u32 hwip,
     899             :                                                  bool write, u32 *rlcg_flag)
     900             : {
     901           0 :         bool ret = false;
     902             : 
     903           0 :         switch (hwip) {
     904             :         case GC_HWIP:
     905           0 :                 if (amdgpu_sriov_reg_indirect_gc(adev)) {
     906           0 :                         *rlcg_flag =
     907           0 :                                 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
     908           0 :                         ret = true;
     909             :                 /* only in new version, AMDGPU_REGS_NO_KIQ and
     910             :                  * AMDGPU_REGS_RLC are enabled simultaneously */
     911           0 :                 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
     912           0 :                                 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
     913           0 :                         *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
     914           0 :                         ret = true;
     915             :                 }
     916             :                 break;
     917             :         case MMHUB_HWIP:
     918           0 :                 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
     919           0 :                     (acc_flags & AMDGPU_REGS_RLC) && write) {
     920           0 :                         *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
     921           0 :                         ret = true;
     922             :                 }
     923             :                 break;
     924             :         default:
     925             :                 break;
     926             :         }
     927           0 :         return ret;
     928             : }
     929             : 
     930           0 : static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
     931             : {
     932             :         struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
     933           0 :         uint32_t timeout = 50000;
     934             :         uint32_t i, tmp;
     935           0 :         uint32_t ret = 0;
     936             :         void *scratch_reg0;
     937             :         void *scratch_reg1;
     938             :         void *scratch_reg2;
     939             :         void *scratch_reg3;
     940             :         void *spare_int;
     941             : 
     942           0 :         if (!adev->gfx.rlc.rlcg_reg_access_supported) {
     943           0 :                 dev_err(adev->dev,
     944             :                         "indirect registers access through rlcg is not available\n");
     945           0 :                 return 0;
     946             :         }
     947             : 
     948           0 :         reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
     949           0 :         scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
     950           0 :         scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
     951           0 :         scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
     952           0 :         scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
     953           0 :         if (reg_access_ctrl->spare_int)
     954           0 :                 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
     955             : 
     956           0 :         if (offset == reg_access_ctrl->grbm_cntl) {
     957             :                 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
     958           0 :                 writel(v, scratch_reg2);
     959           0 :                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
     960           0 :         } else if (offset == reg_access_ctrl->grbm_idx) {
     961             :                 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
     962           0 :                 writel(v, scratch_reg3);
     963           0 :                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
     964             :         } else {
     965             :                 /*
     966             :                  * SCRATCH_REG0         = read/write value
     967             :                  * SCRATCH_REG1[30:28]  = command
     968             :                  * SCRATCH_REG1[19:0]   = address in dword
     969             :                  * SCRATCH_REG1[26:24]  = Error reporting
     970             :                  */
     971           0 :                 writel(v, scratch_reg0);
     972           0 :                 writel((offset | flag), scratch_reg1);
     973           0 :                 if (reg_access_ctrl->spare_int)
     974             :                         writel(1, spare_int);
     975             : 
     976           0 :                 for (i = 0; i < timeout; i++) {
     977           0 :                         tmp = readl(scratch_reg1);
     978           0 :                         if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
     979             :                                 break;
     980           0 :                         udelay(10);
     981             :                 }
     982             : 
     983           0 :                 if (i >= timeout) {
     984           0 :                         if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
     985           0 :                                 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
     986           0 :                                         dev_err(adev->dev,
     987             :                                                 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
     988           0 :                                 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
     989           0 :                                         dev_err(adev->dev,
     990             :                                                 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
     991           0 :                                 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
     992           0 :                                         dev_err(adev->dev,
     993             :                                                 "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
     994             :                                 } else {
     995           0 :                                         dev_err(adev->dev,
     996             :                                                 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
     997             :                                 }
     998             :                         } else {
     999           0 :                                 dev_err(adev->dev,
    1000             :                                         "timeout: rlcg faled to program reg: 0x%05x\n", offset);
    1001             :                         }
    1002             :                 }
    1003             :         }
    1004             : 
    1005           0 :         ret = readl(scratch_reg0);
    1006           0 :         return ret;
    1007             : }
    1008             : 
    1009           0 : void amdgpu_sriov_wreg(struct amdgpu_device *adev,
    1010             :                        u32 offset, u32 value,
    1011             :                        u32 acc_flags, u32 hwip)
    1012             : {
    1013             :         u32 rlcg_flag;
    1014             : 
    1015           0 :         if (!amdgpu_sriov_runtime(adev) &&
    1016           0 :                 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
    1017           0 :                 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
    1018           0 :                 return;
    1019             :         }
    1020             : 
    1021           0 :         if (acc_flags & AMDGPU_REGS_NO_KIQ)
    1022           0 :                 WREG32_NO_KIQ(offset, value);
    1023             :         else
    1024           0 :                 WREG32(offset, value);
    1025             : }
    1026             : 
    1027           0 : u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
    1028             :                       u32 offset, u32 acc_flags, u32 hwip)
    1029             : {
    1030             :         u32 rlcg_flag;
    1031             : 
    1032           0 :         if (!amdgpu_sriov_runtime(adev) &&
    1033           0 :                 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
    1034           0 :                 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
    1035             : 
    1036           0 :         if (acc_flags & AMDGPU_REGS_NO_KIQ)
    1037           0 :                 return RREG32_NO_KIQ(offset);
    1038             :         else
    1039           0 :                 return RREG32(offset);
    1040             : }

Generated by: LCOV version 1.14