LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - amdgpu_ring.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 33 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 2 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2016 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  * Authors: Christian König
      23             :  */
      24             : #ifndef __AMDGPU_RING_H__
      25             : #define __AMDGPU_RING_H__
      26             : 
      27             : #include <drm/amdgpu_drm.h>
      28             : #include <drm/gpu_scheduler.h>
      29             : #include <drm/drm_print.h>
      30             : 
      31             : struct amdgpu_device;
      32             : struct amdgpu_ring;
      33             : struct amdgpu_ib;
      34             : struct amdgpu_cs_parser;
      35             : struct amdgpu_job;
      36             : struct amdgpu_vm;
      37             : 
      38             : /* max number of rings */
      39             : #define AMDGPU_MAX_RINGS                28
      40             : #define AMDGPU_MAX_HWIP_RINGS           8
      41             : #define AMDGPU_MAX_GFX_RINGS            2
      42             : #define AMDGPU_MAX_COMPUTE_RINGS        8
      43             : #define AMDGPU_MAX_VCE_RINGS            3
      44             : #define AMDGPU_MAX_UVD_ENC_RINGS        2
      45             : 
      46             : enum amdgpu_ring_priority_level {
      47             :         AMDGPU_RING_PRIO_0,
      48             :         AMDGPU_RING_PRIO_1,
      49             :         AMDGPU_RING_PRIO_DEFAULT = 1,
      50             :         AMDGPU_RING_PRIO_2,
      51             :         AMDGPU_RING_PRIO_MAX
      52             : };
      53             : 
      54             : /* some special values for the owner field */
      55             : #define AMDGPU_FENCE_OWNER_UNDEFINED    ((void *)0ul)
      56             : #define AMDGPU_FENCE_OWNER_VM           ((void *)1ul)
      57             : #define AMDGPU_FENCE_OWNER_KFD          ((void *)2ul)
      58             : 
      59             : #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
      60             : #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
      61             : #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
      62             : 
      63             : #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
      64             : 
      65             : #define AMDGPU_IB_POOL_SIZE     (1024 * 1024)
      66             : 
      67             : enum amdgpu_ring_type {
      68             :         AMDGPU_RING_TYPE_GFX            = AMDGPU_HW_IP_GFX,
      69             :         AMDGPU_RING_TYPE_COMPUTE        = AMDGPU_HW_IP_COMPUTE,
      70             :         AMDGPU_RING_TYPE_SDMA           = AMDGPU_HW_IP_DMA,
      71             :         AMDGPU_RING_TYPE_UVD            = AMDGPU_HW_IP_UVD,
      72             :         AMDGPU_RING_TYPE_VCE            = AMDGPU_HW_IP_VCE,
      73             :         AMDGPU_RING_TYPE_UVD_ENC        = AMDGPU_HW_IP_UVD_ENC,
      74             :         AMDGPU_RING_TYPE_VCN_DEC        = AMDGPU_HW_IP_VCN_DEC,
      75             :         AMDGPU_RING_TYPE_VCN_ENC        = AMDGPU_HW_IP_VCN_ENC,
      76             :         AMDGPU_RING_TYPE_VCN_JPEG       = AMDGPU_HW_IP_VCN_JPEG,
      77             :         AMDGPU_RING_TYPE_KIQ,
      78             :         AMDGPU_RING_TYPE_MES
      79             : };
      80             : 
      81             : enum amdgpu_ib_pool_type {
      82             :         /* Normal submissions to the top of the pipeline. */
      83             :         AMDGPU_IB_POOL_DELAYED,
      84             :         /* Immediate submissions to the bottom of the pipeline. */
      85             :         AMDGPU_IB_POOL_IMMEDIATE,
      86             :         /* Direct submission to the ring buffer during init and reset. */
      87             :         AMDGPU_IB_POOL_DIRECT,
      88             : 
      89             :         AMDGPU_IB_POOL_MAX
      90             : };
      91             : 
      92             : struct amdgpu_ib {
      93             :         struct amdgpu_sa_bo             *sa_bo;
      94             :         uint32_t                        length_dw;
      95             :         uint64_t                        gpu_addr;
      96             :         uint32_t                        *ptr;
      97             :         uint32_t                        flags;
      98             : };
      99             : 
     100             : struct amdgpu_sched {
     101             :         u32                             num_scheds;
     102             :         struct drm_gpu_scheduler        *sched[AMDGPU_MAX_HWIP_RINGS];
     103             : };
     104             : 
     105             : /*
     106             :  * Fences.
     107             :  */
     108             : struct amdgpu_fence_driver {
     109             :         uint64_t                        gpu_addr;
     110             :         volatile uint32_t               *cpu_addr;
     111             :         /* sync_seq is protected by ring emission lock */
     112             :         uint32_t                        sync_seq;
     113             :         atomic_t                        last_seq;
     114             :         bool                            initialized;
     115             :         struct amdgpu_irq_src           *irq_src;
     116             :         unsigned                        irq_type;
     117             :         struct timer_list               fallback_timer;
     118             :         unsigned                        num_fences_mask;
     119             :         spinlock_t                      lock;
     120             :         struct dma_fence                **fences;
     121             : };
     122             : 
     123             : extern const struct drm_sched_backend_ops amdgpu_sched_ops;
     124             : 
     125             : void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
     126             : void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
     127             : 
     128             : int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
     129             : int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
     130             :                                    struct amdgpu_irq_src *irq_src,
     131             :                                    unsigned irq_type);
     132             : void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
     133             : void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
     134             : int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
     135             : void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
     136             : int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
     137             :                       unsigned flags);
     138             : int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
     139             :                               uint32_t timeout);
     140             : bool amdgpu_fence_process(struct amdgpu_ring *ring);
     141             : int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
     142             : signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
     143             :                                       uint32_t wait_seq,
     144             :                                       signed long timeout);
     145             : unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
     146             : void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop);
     147             : 
     148             : /*
     149             :  * Rings.
     150             :  */
     151             : 
     152             : /* provided by hw blocks that expose a ring buffer for commands */
     153             : struct amdgpu_ring_funcs {
     154             :         enum amdgpu_ring_type   type;
     155             :         uint32_t                align_mask;
     156             :         u32                     nop;
     157             :         bool                    support_64bit_ptrs;
     158             :         bool                    no_user_fence;
     159             :         bool                    secure_submission_supported;
     160             :         unsigned                vmhub;
     161             :         unsigned                extra_dw;
     162             : 
     163             :         /* ring read/write ptr handling */
     164             :         u64 (*get_rptr)(struct amdgpu_ring *ring);
     165             :         u64 (*get_wptr)(struct amdgpu_ring *ring);
     166             :         void (*set_wptr)(struct amdgpu_ring *ring);
     167             :         /* validating and patching of IBs */
     168             :         int (*parse_cs)(struct amdgpu_cs_parser *p,
     169             :                         struct amdgpu_job *job,
     170             :                         struct amdgpu_ib *ib);
     171             :         int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
     172             :                                  struct amdgpu_job *job,
     173             :                                  struct amdgpu_ib *ib);
     174             :         /* constants to calculate how many DW are needed for an emit */
     175             :         unsigned emit_frame_size;
     176             :         unsigned emit_ib_size;
     177             :         /* command emit functions */
     178             :         void (*emit_ib)(struct amdgpu_ring *ring,
     179             :                         struct amdgpu_job *job,
     180             :                         struct amdgpu_ib *ib,
     181             :                         uint32_t flags);
     182             :         void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
     183             :                            uint64_t seq, unsigned flags);
     184             :         void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
     185             :         void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
     186             :                               uint64_t pd_addr);
     187             :         void (*emit_hdp_flush)(struct amdgpu_ring *ring);
     188             :         void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
     189             :                                 uint32_t gds_base, uint32_t gds_size,
     190             :                                 uint32_t gws_base, uint32_t gws_size,
     191             :                                 uint32_t oa_base, uint32_t oa_size);
     192             :         /* testing functions */
     193             :         int (*test_ring)(struct amdgpu_ring *ring);
     194             :         int (*test_ib)(struct amdgpu_ring *ring, long timeout);
     195             :         /* insert NOP packets */
     196             :         void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
     197             :         void (*insert_start)(struct amdgpu_ring *ring);
     198             :         void (*insert_end)(struct amdgpu_ring *ring);
     199             :         /* pad the indirect buffer to the necessary number of dw */
     200             :         void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
     201             :         unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
     202             :         void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
     203             :         /* note usage for clock and power gating */
     204             :         void (*begin_use)(struct amdgpu_ring *ring);
     205             :         void (*end_use)(struct amdgpu_ring *ring);
     206             :         void (*emit_switch_buffer) (struct amdgpu_ring *ring);
     207             :         void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
     208             :         void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
     209             :                           uint32_t reg_val_offs);
     210             :         void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
     211             :         void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
     212             :                               uint32_t val, uint32_t mask);
     213             :         void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
     214             :                                         uint32_t reg0, uint32_t reg1,
     215             :                                         uint32_t ref, uint32_t mask);
     216             :         void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
     217             :                                 bool secure);
     218             :         /* Try to soft recover the ring to make the fence signal */
     219             :         void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
     220             :         int (*preempt_ib)(struct amdgpu_ring *ring);
     221             :         void (*emit_mem_sync)(struct amdgpu_ring *ring);
     222             :         void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
     223             : };
     224             : 
     225             : struct amdgpu_ring {
     226             :         struct amdgpu_device            *adev;
     227             :         const struct amdgpu_ring_funcs  *funcs;
     228             :         struct amdgpu_fence_driver      fence_drv;
     229             :         struct drm_gpu_scheduler        sched;
     230             : 
     231             :         struct amdgpu_bo        *ring_obj;
     232             :         volatile uint32_t       *ring;
     233             :         unsigned                rptr_offs;
     234             :         u64                     rptr_gpu_addr;
     235             :         volatile u32            *rptr_cpu_addr;
     236             :         u64                     wptr;
     237             :         u64                     wptr_old;
     238             :         unsigned                ring_size;
     239             :         unsigned                max_dw;
     240             :         int                     count_dw;
     241             :         uint64_t                gpu_addr;
     242             :         uint64_t                ptr_mask;
     243             :         uint32_t                buf_mask;
     244             :         u32                     idx;
     245             :         u32                     me;
     246             :         u32                     pipe;
     247             :         u32                     queue;
     248             :         struct amdgpu_bo        *mqd_obj;
     249             :         uint64_t                mqd_gpu_addr;
     250             :         void                    *mqd_ptr;
     251             :         uint64_t                eop_gpu_addr;
     252             :         u32                     doorbell_index;
     253             :         bool                    use_doorbell;
     254             :         bool                    use_pollmem;
     255             :         unsigned                wptr_offs;
     256             :         u64                     wptr_gpu_addr;
     257             :         volatile u32            *wptr_cpu_addr;
     258             :         unsigned                fence_offs;
     259             :         u64                     fence_gpu_addr;
     260             :         volatile u32            *fence_cpu_addr;
     261             :         uint64_t                current_ctx;
     262             :         char                    name[16];
     263             :         u32                     trail_seq;
     264             :         unsigned                trail_fence_offs;
     265             :         u64                     trail_fence_gpu_addr;
     266             :         volatile u32            *trail_fence_cpu_addr;
     267             :         unsigned                cond_exe_offs;
     268             :         u64                     cond_exe_gpu_addr;
     269             :         volatile u32            *cond_exe_cpu_addr;
     270             :         unsigned                vm_inv_eng;
     271             :         struct dma_fence        *vmid_wait;
     272             :         bool                    has_compute_vm_bug;
     273             :         bool                    no_scheduler;
     274             :         int                     hw_prio;
     275             :         unsigned                num_hw_submission;
     276             :         atomic_t                *sched_score;
     277             : 
     278             :         /* used for mes */
     279             :         bool                    is_mes_queue;
     280             :         uint32_t                hw_queue_id;
     281             :         struct amdgpu_mes_ctx_data *mes_ctx;
     282             : };
     283             : 
     284             : #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
     285             : #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
     286             : #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
     287             : #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
     288             : #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
     289             : #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
     290             : #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
     291             : #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
     292             : #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
     293             : #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
     294             : #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
     295             : #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
     296             : #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
     297             : #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
     298             : #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
     299             : #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
     300             : #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
     301             : #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
     302             : #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
     303             : #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
     304             : #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
     305             : #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
     306             : #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
     307             : #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
     308             : 
     309             : int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
     310             : void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
     311             : void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
     312             : void amdgpu_ring_commit(struct amdgpu_ring *ring);
     313             : void amdgpu_ring_undo(struct amdgpu_ring *ring);
     314             : int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
     315             :                      unsigned int max_dw, struct amdgpu_irq_src *irq_src,
     316             :                      unsigned int irq_type, unsigned int hw_prio,
     317             :                      atomic_t *sched_score);
     318             : void amdgpu_ring_fini(struct amdgpu_ring *ring);
     319             : void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
     320             :                                                 uint32_t reg0, uint32_t val0,
     321             :                                                 uint32_t reg1, uint32_t val1);
     322             : bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
     323             :                                struct dma_fence *fence);
     324             : 
     325             : static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
     326             :                                                         bool cond_exec)
     327             : {
     328           0 :         *ring->cond_exe_cpu_addr = cond_exec;
     329             : }
     330             : 
     331             : static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
     332             : {
     333           0 :         int i = 0;
     334           0 :         while (i <= ring->buf_mask)
     335           0 :                 ring->ring[i++] = ring->funcs->nop;
     336             : 
     337             : }
     338             : 
     339           0 : static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
     340             : {
     341           0 :         if (ring->count_dw <= 0)
     342           0 :                 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
     343           0 :         ring->ring[ring->wptr++ & ring->buf_mask] = v;
     344           0 :         ring->wptr &= ring->ptr_mask;
     345           0 :         ring->count_dw--;
     346           0 : }
     347             : 
     348           0 : static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
     349             :                                               void *src, int count_dw)
     350             : {
     351             :         unsigned occupied, chunk1, chunk2;
     352             :         void *dst;
     353             : 
     354           0 :         if (unlikely(ring->count_dw < count_dw))
     355           0 :                 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
     356             : 
     357           0 :         occupied = ring->wptr & ring->buf_mask;
     358           0 :         dst = (void *)&ring->ring[occupied];
     359           0 :         chunk1 = ring->buf_mask + 1 - occupied;
     360           0 :         chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
     361           0 :         chunk2 = count_dw - chunk1;
     362           0 :         chunk1 <<= 2;
     363           0 :         chunk2 <<= 2;
     364             : 
     365           0 :         if (chunk1)
     366           0 :                 memcpy(dst, src, chunk1);
     367             : 
     368           0 :         if (chunk2) {
     369           0 :                 src += chunk1;
     370           0 :                 dst = (void *)ring->ring;
     371           0 :                 memcpy(dst, src, chunk2);
     372             :         }
     373             : 
     374           0 :         ring->wptr += count_dw;
     375           0 :         ring->wptr &= ring->ptr_mask;
     376           0 :         ring->count_dw -= count_dw;
     377           0 : }
     378             : 
     379             : #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset)                  \
     380             :         (ring->is_mes_queue && ring->mes_ctx ?                            \
     381             :          (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
     382             : 
     383             : #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset)                  \
     384             :         (ring->is_mes_queue && ring->mes_ctx ?                            \
     385             :          (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
     386             :          NULL)
     387             : 
     388             : int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
     389             : 
     390             : void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
     391             :                               struct amdgpu_ring *ring);
     392             : 
     393             : int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
     394             : 
     395             : static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
     396             : {
     397           0 :         return ib->ptr[idx];
     398             : }
     399             : 
     400             : static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
     401             :                                        uint32_t value)
     402             : {
     403           0 :         ib->ptr[idx] = value;
     404             : }
     405             : 
     406             : int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
     407             :                   unsigned size,
     408             :                   enum amdgpu_ib_pool_type pool,
     409             :                   struct amdgpu_ib *ib);
     410             : void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
     411             :                     struct dma_fence *f);
     412             : int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
     413             :                        struct amdgpu_ib *ibs, struct amdgpu_job *job,
     414             :                        struct dma_fence **f);
     415             : int amdgpu_ib_pool_init(struct amdgpu_device *adev);
     416             : void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
     417             : int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
     418             : 
     419             : #endif

Generated by: LCOV version 1.14