LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - amdgpu_ring.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 189 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 13 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2008 Advanced Micro Devices, Inc.
       3             :  * Copyright 2008 Red Hat Inc.
       4             :  * Copyright 2009 Jerome Glisse.
       5             :  *
       6             :  * Permission is hereby granted, free of charge, to any person obtaining a
       7             :  * copy of this software and associated documentation files (the "Software"),
       8             :  * to deal in the Software without restriction, including without limitation
       9             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      10             :  * and/or sell copies of the Software, and to permit persons to whom the
      11             :  * Software is furnished to do so, subject to the following conditions:
      12             :  *
      13             :  * The above copyright notice and this permission notice shall be included in
      14             :  * all copies or substantial portions of the Software.
      15             :  *
      16             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      17             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      18             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      19             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      20             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      21             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      22             :  * OTHER DEALINGS IN THE SOFTWARE.
      23             :  *
      24             :  * Authors: Dave Airlie
      25             :  *          Alex Deucher
      26             :  *          Jerome Glisse
      27             :  *          Christian König
      28             :  */
      29             : #include <linux/seq_file.h>
      30             : #include <linux/slab.h>
      31             : #include <linux/uaccess.h>
      32             : #include <linux/debugfs.h>
      33             : 
      34             : #include <drm/amdgpu_drm.h>
      35             : #include "amdgpu.h"
      36             : #include "atom.h"
      37             : 
      38             : /*
      39             :  * Rings
      40             :  * Most engines on the GPU are fed via ring buffers.  Ring
      41             :  * buffers are areas of GPU accessible memory that the host
      42             :  * writes commands into and the GPU reads commands out of.
      43             :  * There is a rptr (read pointer) that determines where the
      44             :  * GPU is currently reading, and a wptr (write pointer)
      45             :  * which determines where the host has written.  When the
      46             :  * pointers are equal, the ring is idle.  When the host
      47             :  * writes commands to the ring buffer, it increments the
      48             :  * wptr.  The GPU then starts fetching commands and executes
      49             :  * them until the pointers are equal again.
      50             :  */
      51             : 
      52             : /**
      53             :  * amdgpu_ring_alloc - allocate space on the ring buffer
      54             :  *
      55             :  * @ring: amdgpu_ring structure holding ring information
      56             :  * @ndw: number of dwords to allocate in the ring buffer
      57             :  *
      58             :  * Allocate @ndw dwords in the ring buffer (all asics).
      59             :  * Returns 0 on success, error on failure.
      60             :  */
      61           0 : int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
      62             : {
      63             :         /* Align requested size with padding so unlock_commit can
      64             :          * pad safely */
      65           0 :         ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
      66             : 
      67             :         /* Make sure we aren't trying to allocate more space
      68             :          * than the maximum for one submission
      69             :          */
      70           0 :         if (WARN_ON_ONCE(ndw > ring->max_dw))
      71             :                 return -ENOMEM;
      72             : 
      73           0 :         ring->count_dw = ndw;
      74           0 :         ring->wptr_old = ring->wptr;
      75             : 
      76           0 :         if (ring->funcs->begin_use)
      77           0 :                 ring->funcs->begin_use(ring);
      78             : 
      79             :         return 0;
      80             : }
      81             : 
      82             : /** amdgpu_ring_insert_nop - insert NOP packets
      83             :  *
      84             :  * @ring: amdgpu_ring structure holding ring information
      85             :  * @count: the number of NOP packets to insert
      86             :  *
      87             :  * This is the generic insert_nop function for rings except SDMA
      88             :  */
      89           0 : void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
      90             : {
      91             :         int i;
      92             : 
      93           0 :         for (i = 0; i < count; i++)
      94           0 :                 amdgpu_ring_write(ring, ring->funcs->nop);
      95           0 : }
      96             : 
      97             : /**
      98             :  * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
      99             :  *
     100             :  * @ring: amdgpu_ring structure holding ring information
     101             :  * @ib: IB to add NOP packets to
     102             :  *
     103             :  * This is the generic pad_ib function for rings except SDMA
     104             :  */
     105           0 : void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
     106             : {
     107           0 :         while (ib->length_dw & ring->funcs->align_mask)
     108           0 :                 ib->ptr[ib->length_dw++] = ring->funcs->nop;
     109           0 : }
     110             : 
     111             : /**
     112             :  * amdgpu_ring_commit - tell the GPU to execute the new
     113             :  * commands on the ring buffer
     114             :  *
     115             :  * @ring: amdgpu_ring structure holding ring information
     116             :  *
     117             :  * Update the wptr (write pointer) to tell the GPU to
     118             :  * execute new commands on the ring buffer (all asics).
     119             :  */
     120           0 : void amdgpu_ring_commit(struct amdgpu_ring *ring)
     121             : {
     122             :         uint32_t count;
     123             : 
     124             :         /* We pad to match fetch size */
     125           0 :         count = ring->funcs->align_mask + 1 -
     126           0 :                 (ring->wptr & ring->funcs->align_mask);
     127           0 :         count %= ring->funcs->align_mask + 1;
     128           0 :         ring->funcs->insert_nop(ring, count);
     129             : 
     130           0 :         mb();
     131           0 :         amdgpu_ring_set_wptr(ring);
     132             : 
     133           0 :         if (ring->funcs->end_use)
     134           0 :                 ring->funcs->end_use(ring);
     135           0 : }
     136             : 
     137             : /**
     138             :  * amdgpu_ring_undo - reset the wptr
     139             :  *
     140             :  * @ring: amdgpu_ring structure holding ring information
     141             :  *
     142             :  * Reset the driver's copy of the wptr (all asics).
     143             :  */
     144           0 : void amdgpu_ring_undo(struct amdgpu_ring *ring)
     145             : {
     146           0 :         ring->wptr = ring->wptr_old;
     147             : 
     148           0 :         if (ring->funcs->end_use)
     149           0 :                 ring->funcs->end_use(ring);
     150           0 : }
     151             : 
     152             : #define amdgpu_ring_get_gpu_addr(ring, offset)                          \
     153             :         (ring->is_mes_queue ?                                                \
     154             :          (ring->mes_ctx->meta_data_gpu_addr + offset) :                   \
     155             :          (ring->adev->wb.gpu_addr + offset * 4))
     156             : 
     157             : #define amdgpu_ring_get_cpu_addr(ring, offset)                          \
     158             :         (ring->is_mes_queue ?                                                \
     159             :          (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
     160             :          (&ring->adev->wb.wb[offset]))
     161             : 
     162             : /**
     163             :  * amdgpu_ring_init - init driver ring struct.
     164             :  *
     165             :  * @adev: amdgpu_device pointer
     166             :  * @ring: amdgpu_ring structure holding ring information
     167             :  * @max_dw: maximum number of dw for ring alloc
     168             :  * @irq_src: interrupt source to use for this ring
     169             :  * @irq_type: interrupt type to use for this ring
     170             :  * @hw_prio: ring priority (NORMAL/HIGH)
     171             :  * @sched_score: optional score atomic shared with other schedulers
     172             :  *
     173             :  * Initialize the driver information for the selected ring (all asics).
     174             :  * Returns 0 on success, error on failure.
     175             :  */
     176           0 : int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
     177             :                      unsigned int max_dw, struct amdgpu_irq_src *irq_src,
     178             :                      unsigned int irq_type, unsigned int hw_prio,
     179             :                      atomic_t *sched_score)
     180             : {
     181             :         int r;
     182           0 :         int sched_hw_submission = amdgpu_sched_hw_submission;
     183             :         u32 *num_sched;
     184             :         u32 hw_ip;
     185             : 
     186             :         /* Set the hw submission limit higher for KIQ because
     187             :          * it's used for a number of gfx/compute tasks by both
     188             :          * KFD and KGD which may have outstanding fences and
     189             :          * it doesn't really use the gpu scheduler anyway;
     190             :          * KIQ tasks get submitted directly to the ring.
     191             :          */
     192           0 :         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
     193           0 :                 sched_hw_submission = max(sched_hw_submission, 256);
     194           0 :         else if (ring == &adev->sdma.instance[0].page)
     195           0 :                 sched_hw_submission = 256;
     196             : 
     197           0 :         if (ring->adev == NULL) {
     198           0 :                 if (adev->num_rings >= AMDGPU_MAX_RINGS)
     199             :                         return -EINVAL;
     200             : 
     201           0 :                 ring->adev = adev;
     202           0 :                 ring->num_hw_submission = sched_hw_submission;
     203           0 :                 ring->sched_score = sched_score;
     204           0 :                 ring->vmid_wait = dma_fence_get_stub();
     205             : 
     206           0 :                 if (!ring->is_mes_queue) {
     207           0 :                         ring->idx = adev->num_rings++;
     208           0 :                         adev->rings[ring->idx] = ring;
     209             :                 }
     210             : 
     211           0 :                 r = amdgpu_fence_driver_init_ring(ring);
     212           0 :                 if (r)
     213             :                         return r;
     214             :         }
     215             : 
     216           0 :         if (ring->is_mes_queue) {
     217           0 :                 ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
     218             :                                 AMDGPU_MES_CTX_RPTR_OFFS);
     219           0 :                 ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
     220             :                                 AMDGPU_MES_CTX_WPTR_OFFS);
     221           0 :                 ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
     222             :                                 AMDGPU_MES_CTX_FENCE_OFFS);
     223           0 :                 ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
     224             :                                 AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
     225           0 :                 ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
     226             :                                 AMDGPU_MES_CTX_COND_EXE_OFFS);
     227             :         } else {
     228           0 :                 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
     229           0 :                 if (r) {
     230           0 :                         dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
     231           0 :                         return r;
     232             :                 }
     233             : 
     234           0 :                 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
     235           0 :                 if (r) {
     236           0 :                         dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
     237           0 :                         return r;
     238             :                 }
     239             : 
     240           0 :                 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
     241           0 :                 if (r) {
     242           0 :                         dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
     243           0 :                         return r;
     244             :                 }
     245             : 
     246           0 :                 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
     247           0 :                 if (r) {
     248           0 :                         dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
     249           0 :                         return r;
     250             :                 }
     251             : 
     252           0 :                 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
     253           0 :                 if (r) {
     254           0 :                         dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
     255           0 :                         return r;
     256             :                 }
     257             :         }
     258             : 
     259           0 :         ring->fence_gpu_addr =
     260           0 :                 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
     261           0 :         ring->fence_cpu_addr =
     262           0 :                 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
     263             : 
     264           0 :         ring->rptr_gpu_addr =
     265           0 :                 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
     266           0 :         ring->rptr_cpu_addr =
     267           0 :                 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
     268             : 
     269           0 :         ring->wptr_gpu_addr =
     270           0 :                 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
     271           0 :         ring->wptr_cpu_addr =
     272           0 :                 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
     273             : 
     274           0 :         ring->trail_fence_gpu_addr =
     275           0 :                 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
     276           0 :         ring->trail_fence_cpu_addr =
     277           0 :                 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
     278             : 
     279           0 :         ring->cond_exe_gpu_addr =
     280           0 :                 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
     281           0 :         ring->cond_exe_cpu_addr =
     282           0 :                 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
     283             : 
     284             :         /* always set cond_exec_polling to CONTINUE */
     285           0 :         *ring->cond_exe_cpu_addr = 1;
     286             : 
     287           0 :         r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
     288           0 :         if (r) {
     289           0 :                 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
     290           0 :                 return r;
     291             :         }
     292             : 
     293           0 :         ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
     294             : 
     295           0 :         ring->buf_mask = (ring->ring_size / 4) - 1;
     296           0 :         ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
     297           0 :                 0xffffffffffffffff : ring->buf_mask;
     298             : 
     299             :         /* Allocate ring buffer */
     300           0 :         if (ring->is_mes_queue) {
     301           0 :                 int offset = 0;
     302             : 
     303           0 :                 BUG_ON(ring->ring_size > PAGE_SIZE*4);
     304             : 
     305           0 :                 offset = amdgpu_mes_ctx_get_offs(ring,
     306             :                                          AMDGPU_MES_CTX_RING_OFFS);
     307           0 :                 ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
     308           0 :                 ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
     309             :                 amdgpu_ring_clear_ring(ring);
     310             : 
     311           0 :         } else if (ring->ring_obj == NULL) {
     312           0 :                 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
     313             :                                             AMDGPU_GEM_DOMAIN_GTT,
     314             :                                             &ring->ring_obj,
     315           0 :                                             &ring->gpu_addr,
     316           0 :                                             (void **)&ring->ring);
     317           0 :                 if (r) {
     318           0 :                         dev_err(adev->dev, "(%d) ring create failed\n", r);
     319           0 :                         return r;
     320             :                 }
     321             :                 amdgpu_ring_clear_ring(ring);
     322             :         }
     323             : 
     324           0 :         ring->max_dw = max_dw;
     325           0 :         ring->hw_prio = hw_prio;
     326             : 
     327           0 :         if (!ring->no_scheduler) {
     328           0 :                 hw_ip = ring->funcs->type;
     329           0 :                 num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
     330           0 :                 adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
     331           0 :                         &ring->sched;
     332             :         }
     333             : 
     334             :         return 0;
     335             : }
     336             : 
     337             : /**
     338             :  * amdgpu_ring_fini - tear down the driver ring struct.
     339             :  *
     340             :  * @ring: amdgpu_ring structure holding ring information
     341             :  *
     342             :  * Tear down the driver information for the selected ring (all asics).
     343             :  */
     344           0 : void amdgpu_ring_fini(struct amdgpu_ring *ring)
     345             : {
     346             : 
     347             :         /* Not to finish a ring which is not initialized */
     348           0 :         if (!(ring->adev) ||
     349           0 :             (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
     350             :                 return;
     351             : 
     352           0 :         ring->sched.ready = false;
     353             : 
     354           0 :         if (!ring->is_mes_queue) {
     355           0 :                 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
     356           0 :                 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
     357             : 
     358           0 :                 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
     359           0 :                 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
     360             : 
     361           0 :                 amdgpu_bo_free_kernel(&ring->ring_obj,
     362           0 :                                       &ring->gpu_addr,
     363           0 :                                       (void **)&ring->ring);
     364             :         }
     365             : 
     366           0 :         dma_fence_put(ring->vmid_wait);
     367           0 :         ring->vmid_wait = NULL;
     368           0 :         ring->me = 0;
     369             : 
     370           0 :         if (!ring->is_mes_queue)
     371           0 :                 ring->adev->rings[ring->idx] = NULL;
     372             : }
     373             : 
     374             : /**
     375             :  * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
     376             :  *
     377             :  * @ring: ring to write to
     378             :  * @reg0: register to write
     379             :  * @reg1: register to wait on
     380             :  * @ref: reference value to write/wait on
     381             :  * @mask: mask to wait on
     382             :  *
     383             :  * Helper for rings that don't support write and wait in a
     384             :  * single oneshot packet.
     385             :  */
     386           0 : void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
     387             :                                                 uint32_t reg0, uint32_t reg1,
     388             :                                                 uint32_t ref, uint32_t mask)
     389             : {
     390           0 :         amdgpu_ring_emit_wreg(ring, reg0, ref);
     391           0 :         amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
     392           0 : }
     393             : 
     394             : /**
     395             :  * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
     396             :  *
     397             :  * @ring: ring to try the recovery on
     398             :  * @vmid: VMID we try to get going again
     399             :  * @fence: timedout fence
     400             :  *
     401             :  * Tries to get a ring proceeding again when it is stuck.
     402             :  */
     403           0 : bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
     404             :                                struct dma_fence *fence)
     405             : {
     406           0 :         ktime_t deadline = ktime_add_us(ktime_get(), 10000);
     407             : 
     408           0 :         if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
     409             :                 return false;
     410             : 
     411           0 :         if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
     412             :                 return false;
     413             : 
     414           0 :         atomic_inc(&ring->adev->gpu_reset_counter);
     415           0 :         while (!dma_fence_is_signaled(fence) &&
     416           0 :                ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
     417           0 :                 ring->funcs->soft_recovery(ring, vmid);
     418             : 
     419           0 :         return dma_fence_is_signaled(fence);
     420             : }
     421             : 
     422             : /*
     423             :  * Debugfs info
     424             :  */
     425             : #if defined(CONFIG_DEBUG_FS)
     426             : 
     427             : /* Layout of file is 12 bytes consisting of
     428             :  * - rptr
     429             :  * - wptr
     430             :  * - driver's copy of wptr
     431             :  *
     432             :  * followed by n-words of ring data
     433             :  */
     434             : static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
     435             :                                         size_t size, loff_t *pos)
     436             : {
     437             :         struct amdgpu_ring *ring = file_inode(f)->i_private;
     438             :         int r, i;
     439             :         uint32_t value, result, early[3];
     440             : 
     441             :         if (*pos & 3 || size & 3)
     442             :                 return -EINVAL;
     443             : 
     444             :         result = 0;
     445             : 
     446             :         if (*pos < 12) {
     447             :                 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
     448             :                 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
     449             :                 early[2] = ring->wptr & ring->buf_mask;
     450             :                 for (i = *pos / 4; i < 3 && size; i++) {
     451             :                         r = put_user(early[i], (uint32_t *)buf);
     452             :                         if (r)
     453             :                                 return r;
     454             :                         buf += 4;
     455             :                         result += 4;
     456             :                         size -= 4;
     457             :                         *pos += 4;
     458             :                 }
     459             :         }
     460             : 
     461             :         while (size) {
     462             :                 if (*pos >= (ring->ring_size + 12))
     463             :                         return result;
     464             : 
     465             :                 value = ring->ring[(*pos - 12)/4];
     466             :                 r = put_user(value, (uint32_t *)buf);
     467             :                 if (r)
     468             :                         return r;
     469             :                 buf += 4;
     470             :                 result += 4;
     471             :                 size -= 4;
     472             :                 *pos += 4;
     473             :         }
     474             : 
     475             :         return result;
     476             : }
     477             : 
     478             : static const struct file_operations amdgpu_debugfs_ring_fops = {
     479             :         .owner = THIS_MODULE,
     480             :         .read = amdgpu_debugfs_ring_read,
     481             :         .llseek = default_llseek
     482             : };
     483             : 
     484             : #endif
     485             : 
     486           0 : void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
     487             :                               struct amdgpu_ring *ring)
     488             : {
     489             : #if defined(CONFIG_DEBUG_FS)
     490             :         struct drm_minor *minor = adev_to_drm(adev)->primary;
     491             :         struct dentry *root = minor->debugfs_root;
     492             :         char name[32];
     493             : 
     494             :         sprintf(name, "amdgpu_ring_%s", ring->name);
     495             :         debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
     496             :                                  &amdgpu_debugfs_ring_fops,
     497             :                                  ring->ring_size + 12);
     498             : 
     499             : #endif
     500           0 : }
     501             : 
     502             : /**
     503             :  * amdgpu_ring_test_helper - tests ring and set sched readiness status
     504             :  *
     505             :  * @ring: ring to try the recovery on
     506             :  *
     507             :  * Tests ring and set sched readiness status
     508             :  *
     509             :  * Returns 0 on success, error on failure.
     510             :  */
     511           0 : int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
     512             : {
     513           0 :         struct amdgpu_device *adev = ring->adev;
     514             :         int r;
     515             : 
     516           0 :         r = amdgpu_ring_test_ring(ring);
     517           0 :         if (r)
     518           0 :                 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
     519             :                               ring->name, r);
     520             :         else
     521           0 :                 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
     522             :                               ring->name);
     523             : 
     524           0 :         ring->sched.ready = !r;
     525           0 :         return r;
     526             : }
     527             : 
     528           0 : static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
     529             :                                     struct amdgpu_mqd_prop *prop)
     530             : {
     531           0 :         struct amdgpu_device *adev = ring->adev;
     532             : 
     533           0 :         memset(prop, 0, sizeof(*prop));
     534             : 
     535           0 :         prop->mqd_gpu_addr = ring->mqd_gpu_addr;
     536           0 :         prop->hqd_base_gpu_addr = ring->gpu_addr;
     537           0 :         prop->rptr_gpu_addr = ring->rptr_gpu_addr;
     538           0 :         prop->wptr_gpu_addr = ring->wptr_gpu_addr;
     539           0 :         prop->queue_size = ring->ring_size;
     540           0 :         prop->eop_gpu_addr = ring->eop_gpu_addr;
     541           0 :         prop->use_doorbell = ring->use_doorbell;
     542           0 :         prop->doorbell_index = ring->doorbell_index;
     543             : 
     544             :         /* map_queues packet doesn't need activate the queue,
     545             :          * so only kiq need set this field.
     546             :          */
     547           0 :         prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
     548             : 
     549           0 :         if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
     550           0 :              amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
     551           0 :             (ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
     552           0 :              amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
     553           0 :                 prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
     554           0 :                 prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
     555             :         }
     556           0 : }
     557             : 
     558           0 : int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
     559             : {
     560           0 :         struct amdgpu_device *adev = ring->adev;
     561             :         struct amdgpu_mqd *mqd_mgr;
     562             :         struct amdgpu_mqd_prop prop;
     563             : 
     564           0 :         amdgpu_ring_to_mqd_prop(ring, &prop);
     565             : 
     566           0 :         ring->wptr = 0;
     567             : 
     568           0 :         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
     569           0 :                 mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
     570             :         else
     571           0 :                 mqd_mgr = &adev->mqds[ring->funcs->type];
     572             : 
     573           0 :         return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
     574             : }

Generated by: LCOV version 1.14