LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - amdgpu_ids.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 211 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 15 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2017 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  */
      23             : #include "amdgpu_ids.h"
      24             : 
      25             : #include <linux/idr.h>
      26             : #include <linux/dma-fence-array.h>
      27             : 
      28             : 
      29             : #include "amdgpu.h"
      30             : #include "amdgpu_trace.h"
      31             : 
      32             : /*
      33             :  * PASID manager
      34             :  *
      35             :  * PASIDs are global address space identifiers that can be shared
      36             :  * between the GPU, an IOMMU and the driver. VMs on different devices
      37             :  * may use the same PASID if they share the same address
      38             :  * space. Therefore PASIDs are allocated using a global IDA. VMs are
      39             :  * looked up from the PASID per amdgpu_device.
      40             :  */
      41             : static DEFINE_IDA(amdgpu_pasid_ida);
      42             : 
      43             : /* Helper to free pasid from a fence callback */
      44             : struct amdgpu_pasid_cb {
      45             :         struct dma_fence_cb cb;
      46             :         u32 pasid;
      47             : };
      48             : 
      49             : /**
      50             :  * amdgpu_pasid_alloc - Allocate a PASID
      51             :  * @bits: Maximum width of the PASID in bits, must be at least 1
      52             :  *
      53             :  * Allocates a PASID of the given width while keeping smaller PASIDs
      54             :  * available if possible.
      55             :  *
      56             :  * Returns a positive integer on success. Returns %-EINVAL if bits==0.
      57             :  * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
      58             :  * memory allocation failure.
      59             :  */
      60           0 : int amdgpu_pasid_alloc(unsigned int bits)
      61             : {
      62           0 :         int pasid = -EINVAL;
      63             : 
      64           0 :         for (bits = min(bits, 31U); bits > 0; bits--) {
      65           0 :                 pasid = ida_simple_get(&amdgpu_pasid_ida,
      66             :                                        1U << (bits - 1), 1U << bits,
      67             :                                        GFP_KERNEL);
      68           0 :                 if (pasid != -ENOSPC)
      69             :                         break;
      70             :         }
      71             : 
      72             :         if (pasid >= 0)
      73             :                 trace_amdgpu_pasid_allocated(pasid);
      74             : 
      75           0 :         return pasid;
      76             : }
      77             : 
      78             : /**
      79             :  * amdgpu_pasid_free - Free a PASID
      80             :  * @pasid: PASID to free
      81             :  */
      82           0 : void amdgpu_pasid_free(u32 pasid)
      83             : {
      84           0 :         trace_amdgpu_pasid_freed(pasid);
      85           0 :         ida_simple_remove(&amdgpu_pasid_ida, pasid);
      86           0 : }
      87             : 
      88           0 : static void amdgpu_pasid_free_cb(struct dma_fence *fence,
      89             :                                  struct dma_fence_cb *_cb)
      90             : {
      91           0 :         struct amdgpu_pasid_cb *cb =
      92           0 :                 container_of(_cb, struct amdgpu_pasid_cb, cb);
      93             : 
      94           0 :         amdgpu_pasid_free(cb->pasid);
      95           0 :         dma_fence_put(fence);
      96           0 :         kfree(cb);
      97           0 : }
      98             : 
      99             : /**
     100             :  * amdgpu_pasid_free_delayed - free pasid when fences signal
     101             :  *
     102             :  * @resv: reservation object with the fences to wait for
     103             :  * @pasid: pasid to free
     104             :  *
     105             :  * Free the pasid only after all the fences in resv are signaled.
     106             :  */
     107           0 : void amdgpu_pasid_free_delayed(struct dma_resv *resv,
     108             :                                u32 pasid)
     109             : {
     110             :         struct amdgpu_pasid_cb *cb;
     111             :         struct dma_fence *fence;
     112             :         int r;
     113             : 
     114           0 :         r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
     115           0 :         if (r)
     116             :                 goto fallback;
     117             : 
     118           0 :         if (!fence) {
     119             :                 amdgpu_pasid_free(pasid);
     120           0 :                 return;
     121             :         }
     122             : 
     123           0 :         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
     124           0 :         if (!cb) {
     125             :                 /* Last resort when we are OOM */
     126           0 :                 dma_fence_wait(fence, false);
     127           0 :                 dma_fence_put(fence);
     128             :                 amdgpu_pasid_free(pasid);
     129             :         } else {
     130           0 :                 cb->pasid = pasid;
     131           0 :                 if (dma_fence_add_callback(fence, &cb->cb,
     132             :                                            amdgpu_pasid_free_cb))
     133           0 :                         amdgpu_pasid_free_cb(fence, &cb->cb);
     134             :         }
     135             : 
     136             :         return;
     137             : 
     138             : fallback:
     139             :         /* Not enough memory for the delayed delete, as last resort
     140             :          * block for all the fences to complete.
     141             :          */
     142           0 :         dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
     143             :                               false, MAX_SCHEDULE_TIMEOUT);
     144           0 :         amdgpu_pasid_free(pasid);
     145             : }
     146             : 
     147             : /*
     148             :  * VMID manager
     149             :  *
     150             :  * VMIDs are a per VMHUB identifier for page tables handling.
     151             :  */
     152             : 
     153             : /**
     154             :  * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
     155             :  *
     156             :  * @adev: amdgpu_device pointer
     157             :  * @id: VMID structure
     158             :  *
     159             :  * Check if GPU reset occured since last use of the VMID.
     160             :  */
     161           0 : bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
     162             :                                struct amdgpu_vmid *id)
     163             : {
     164           0 :         return id->current_gpu_reset_count !=
     165           0 :                 atomic_read(&adev->gpu_reset_counter);
     166             : }
     167             : 
     168             : /**
     169             :  * amdgpu_vmid_grab_idle - grab idle VMID
     170             :  *
     171             :  * @vm: vm to allocate id for
     172             :  * @ring: ring we want to submit job to
     173             :  * @sync: sync object where we add dependencies
     174             :  * @idle: resulting idle VMID
     175             :  *
     176             :  * Try to find an idle VMID, if none is idle add a fence to wait to the sync
     177             :  * object. Returns -ENOMEM when we are out of memory.
     178             :  */
     179           0 : static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
     180             :                                  struct amdgpu_ring *ring,
     181             :                                  struct amdgpu_sync *sync,
     182             :                                  struct amdgpu_vmid **idle)
     183             : {
     184           0 :         struct amdgpu_device *adev = ring->adev;
     185           0 :         unsigned vmhub = ring->funcs->vmhub;
     186           0 :         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
     187             :         struct dma_fence **fences;
     188             :         unsigned i;
     189             :         int r;
     190             : 
     191           0 :         if (!dma_fence_is_signaled(ring->vmid_wait))
     192           0 :                 return amdgpu_sync_fence(sync, ring->vmid_wait);
     193             : 
     194           0 :         fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
     195           0 :         if (!fences)
     196             :                 return -ENOMEM;
     197             : 
     198             :         /* Check if we have an idle VMID */
     199           0 :         i = 0;
     200           0 :         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
     201             :                 /* Don't use per engine and per process VMID at the same time */
     202           0 :                 struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
     203           0 :                         NULL : ring;
     204             : 
     205           0 :                 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
     206           0 :                 if (!fences[i])
     207             :                         break;
     208           0 :                 ++i;
     209             :         }
     210             : 
     211             :         /* If we can't find a idle VMID to use, wait till one becomes available */
     212           0 :         if (&(*idle)->list == &id_mgr->ids_lru) {
     213           0 :                 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
     214           0 :                 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
     215             :                 struct dma_fence_array *array;
     216             :                 unsigned j;
     217             : 
     218           0 :                 *idle = NULL;
     219           0 :                 for (j = 0; j < i; ++j)
     220           0 :                         dma_fence_get(fences[j]);
     221             : 
     222           0 :                 array = dma_fence_array_create(i, fences, fence_context,
     223             :                                                seqno, true);
     224           0 :                 if (!array) {
     225           0 :                         for (j = 0; j < i; ++j)
     226           0 :                                 dma_fence_put(fences[j]);
     227           0 :                         kfree(fences);
     228             :                         return -ENOMEM;
     229             :                 }
     230             : 
     231           0 :                 r = amdgpu_sync_fence(sync, &array->base);
     232           0 :                 dma_fence_put(ring->vmid_wait);
     233           0 :                 ring->vmid_wait = &array->base;
     234             :                 return r;
     235             :         }
     236           0 :         kfree(fences);
     237             : 
     238             :         return 0;
     239             : }
     240             : 
     241             : /**
     242             :  * amdgpu_vmid_grab_reserved - try to assign reserved VMID
     243             :  *
     244             :  * @vm: vm to allocate id for
     245             :  * @ring: ring we want to submit job to
     246             :  * @sync: sync object where we add dependencies
     247             :  * @fence: fence protecting ID from reuse
     248             :  * @job: job who wants to use the VMID
     249             :  * @id: resulting VMID
     250             :  *
     251             :  * Try to assign a reserved VMID.
     252             :  */
     253           0 : static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
     254             :                                      struct amdgpu_ring *ring,
     255             :                                      struct amdgpu_sync *sync,
     256             :                                      struct dma_fence *fence,
     257             :                                      struct amdgpu_job *job,
     258             :                                      struct amdgpu_vmid **id)
     259             : {
     260           0 :         struct amdgpu_device *adev = ring->adev;
     261           0 :         unsigned vmhub = ring->funcs->vmhub;
     262           0 :         uint64_t fence_context = adev->fence_context + ring->idx;
     263           0 :         bool needs_flush = vm->use_cpu_for_update;
     264           0 :         uint64_t updates = amdgpu_vm_tlb_seq(vm);
     265             :         int r;
     266             : 
     267           0 :         *id = vm->reserved_vmid[vmhub];
     268           0 :         if ((*id)->owner != vm->immediate.fence_context ||
     269           0 :             (*id)->pd_gpu_addr != job->vm_pd_addr ||
     270           0 :             (*id)->flushed_updates < updates ||
     271           0 :             !(*id)->last_flush ||
     272           0 :             ((*id)->last_flush->context != fence_context &&
     273           0 :              !dma_fence_is_signaled((*id)->last_flush))) {
     274             :                 struct dma_fence *tmp;
     275             : 
     276             :                 /* Don't use per engine and per process VMID at the same time */
     277           0 :                 if (adev->vm_manager.concurrent_flush)
     278           0 :                         ring = NULL;
     279             : 
     280             :                 /* to prevent one context starved by another context */
     281           0 :                 (*id)->pd_gpu_addr = 0;
     282           0 :                 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
     283           0 :                 if (tmp) {
     284           0 :                         *id = NULL;
     285           0 :                         return amdgpu_sync_fence(sync, tmp);
     286             :                 }
     287             :                 needs_flush = true;
     288             :         }
     289             : 
     290             :         /* Good we can use this VMID. Remember this submission as
     291             :         * user of the VMID.
     292             :         */
     293           0 :         r = amdgpu_sync_fence(&(*id)->active, fence);
     294           0 :         if (r)
     295             :                 return r;
     296             : 
     297           0 :         (*id)->flushed_updates = updates;
     298           0 :         job->vm_needs_flush = needs_flush;
     299           0 :         return 0;
     300             : }
     301             : 
     302             : /**
     303             :  * amdgpu_vmid_grab_used - try to reuse a VMID
     304             :  *
     305             :  * @vm: vm to allocate id for
     306             :  * @ring: ring we want to submit job to
     307             :  * @sync: sync object where we add dependencies
     308             :  * @fence: fence protecting ID from reuse
     309             :  * @job: job who wants to use the VMID
     310             :  * @id: resulting VMID
     311             :  *
     312             :  * Try to reuse a VMID for this submission.
     313             :  */
     314           0 : static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
     315             :                                  struct amdgpu_ring *ring,
     316             :                                  struct amdgpu_sync *sync,
     317             :                                  struct dma_fence *fence,
     318             :                                  struct amdgpu_job *job,
     319             :                                  struct amdgpu_vmid **id)
     320             : {
     321           0 :         struct amdgpu_device *adev = ring->adev;
     322           0 :         unsigned vmhub = ring->funcs->vmhub;
     323           0 :         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
     324           0 :         uint64_t fence_context = adev->fence_context + ring->idx;
     325           0 :         uint64_t updates = amdgpu_vm_tlb_seq(vm);
     326             :         int r;
     327             : 
     328           0 :         job->vm_needs_flush = vm->use_cpu_for_update;
     329             : 
     330             :         /* Check if we can use a VMID already assigned to this VM */
     331           0 :         list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
     332           0 :                 bool needs_flush = vm->use_cpu_for_update;
     333             : 
     334             :                 /* Check all the prerequisites to using this VMID */
     335           0 :                 if ((*id)->owner != vm->immediate.fence_context)
     336           0 :                         continue;
     337             : 
     338           0 :                 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
     339           0 :                         continue;
     340             : 
     341           0 :                 if (!(*id)->last_flush ||
     342           0 :                     ((*id)->last_flush->context != fence_context &&
     343           0 :                      !dma_fence_is_signaled((*id)->last_flush)))
     344             :                         needs_flush = true;
     345             : 
     346           0 :                 if ((*id)->flushed_updates < updates)
     347           0 :                         needs_flush = true;
     348             : 
     349           0 :                 if (needs_flush && !adev->vm_manager.concurrent_flush)
     350           0 :                         continue;
     351             : 
     352             :                 /* Good, we can use this VMID. Remember this submission as
     353             :                  * user of the VMID.
     354             :                  */
     355           0 :                 r = amdgpu_sync_fence(&(*id)->active, fence);
     356           0 :                 if (r)
     357             :                         return r;
     358             : 
     359           0 :                 (*id)->flushed_updates = updates;
     360           0 :                 job->vm_needs_flush |= needs_flush;
     361             :                 return 0;
     362             :         }
     363             : 
     364           0 :         *id = NULL;
     365             :         return 0;
     366             : }
     367             : 
     368             : /**
     369             :  * amdgpu_vmid_grab - allocate the next free VMID
     370             :  *
     371             :  * @vm: vm to allocate id for
     372             :  * @ring: ring we want to submit job to
     373             :  * @sync: sync object where we add dependencies
     374             :  * @fence: fence protecting ID from reuse
     375             :  * @job: job who wants to use the VMID
     376             :  *
     377             :  * Allocate an id for the vm, adding fences to the sync obj as necessary.
     378             :  */
     379           0 : int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
     380             :                      struct amdgpu_sync *sync, struct dma_fence *fence,
     381             :                      struct amdgpu_job *job)
     382             : {
     383           0 :         struct amdgpu_device *adev = ring->adev;
     384           0 :         unsigned vmhub = ring->funcs->vmhub;
     385           0 :         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
     386           0 :         struct amdgpu_vmid *idle = NULL;
     387           0 :         struct amdgpu_vmid *id = NULL;
     388           0 :         int r = 0;
     389             : 
     390           0 :         mutex_lock(&id_mgr->lock);
     391           0 :         r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
     392           0 :         if (r || !idle)
     393             :                 goto error;
     394             : 
     395           0 :         if (vm->reserved_vmid[vmhub]) {
     396           0 :                 r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
     397           0 :                 if (r || !id)
     398             :                         goto error;
     399             :         } else {
     400           0 :                 r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
     401           0 :                 if (r)
     402             :                         goto error;
     403             : 
     404           0 :                 if (!id) {
     405             :                         /* Still no ID to use? Then use the idle one found earlier */
     406           0 :                         id = idle;
     407             : 
     408             :                         /* Remember this submission as user of the VMID */
     409           0 :                         r = amdgpu_sync_fence(&id->active, fence);
     410           0 :                         if (r)
     411             :                                 goto error;
     412             : 
     413           0 :                         id->flushed_updates = amdgpu_vm_tlb_seq(vm);
     414           0 :                         job->vm_needs_flush = true;
     415             :                 }
     416             : 
     417           0 :                 list_move_tail(&id->list, &id_mgr->ids_lru);
     418             :         }
     419             : 
     420           0 :         id->pd_gpu_addr = job->vm_pd_addr;
     421           0 :         id->owner = vm->immediate.fence_context;
     422             : 
     423           0 :         if (job->vm_needs_flush) {
     424           0 :                 dma_fence_put(id->last_flush);
     425           0 :                 id->last_flush = NULL;
     426             :         }
     427           0 :         job->vmid = id - id_mgr->ids;
     428           0 :         job->pasid = vm->pasid;
     429           0 :         trace_amdgpu_vm_grab_id(vm, ring, job);
     430             : 
     431             : error:
     432           0 :         mutex_unlock(&id_mgr->lock);
     433           0 :         return r;
     434             : }
     435             : 
     436           0 : int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
     437             :                                struct amdgpu_vm *vm,
     438             :                                unsigned vmhub)
     439             : {
     440             :         struct amdgpu_vmid_mgr *id_mgr;
     441             :         struct amdgpu_vmid *idle;
     442           0 :         int r = 0;
     443             : 
     444           0 :         id_mgr = &adev->vm_manager.id_mgr[vmhub];
     445           0 :         mutex_lock(&id_mgr->lock);
     446           0 :         if (vm->reserved_vmid[vmhub])
     447             :                 goto unlock;
     448           0 :         if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
     449             :             AMDGPU_VM_MAX_RESERVED_VMID) {
     450           0 :                 DRM_ERROR("Over limitation of reserved vmid\n");
     451           0 :                 atomic_dec(&id_mgr->reserved_vmid_num);
     452           0 :                 r = -EINVAL;
     453           0 :                 goto unlock;
     454             :         }
     455             :         /* Select the first entry VMID */
     456           0 :         idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
     457           0 :         list_del_init(&idle->list);
     458           0 :         vm->reserved_vmid[vmhub] = idle;
     459           0 :         mutex_unlock(&id_mgr->lock);
     460             : 
     461           0 :         return 0;
     462             : unlock:
     463           0 :         mutex_unlock(&id_mgr->lock);
     464           0 :         return r;
     465             : }
     466             : 
     467           0 : void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
     468             :                                struct amdgpu_vm *vm,
     469             :                                unsigned vmhub)
     470             : {
     471           0 :         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
     472             : 
     473           0 :         mutex_lock(&id_mgr->lock);
     474           0 :         if (vm->reserved_vmid[vmhub]) {
     475           0 :                 list_add(&vm->reserved_vmid[vmhub]->list,
     476             :                         &id_mgr->ids_lru);
     477           0 :                 vm->reserved_vmid[vmhub] = NULL;
     478           0 :                 atomic_dec(&id_mgr->reserved_vmid_num);
     479             :         }
     480           0 :         mutex_unlock(&id_mgr->lock);
     481           0 : }
     482             : 
     483             : /**
     484             :  * amdgpu_vmid_reset - reset VMID to zero
     485             :  *
     486             :  * @adev: amdgpu device structure
     487             :  * @vmhub: vmhub type
     488             :  * @vmid: vmid number to use
     489             :  *
     490             :  * Reset saved GDW, GWS and OA to force switch on next flush.
     491             :  */
     492           0 : void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
     493             :                        unsigned vmid)
     494             : {
     495           0 :         struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
     496           0 :         struct amdgpu_vmid *id = &id_mgr->ids[vmid];
     497             : 
     498           0 :         mutex_lock(&id_mgr->lock);
     499           0 :         id->owner = 0;
     500           0 :         id->gds_base = 0;
     501           0 :         id->gds_size = 0;
     502           0 :         id->gws_base = 0;
     503           0 :         id->gws_size = 0;
     504           0 :         id->oa_base = 0;
     505           0 :         id->oa_size = 0;
     506           0 :         mutex_unlock(&id_mgr->lock);
     507           0 : }
     508             : 
     509             : /**
     510             :  * amdgpu_vmid_reset_all - reset VMID to zero
     511             :  *
     512             :  * @adev: amdgpu device structure
     513             :  *
     514             :  * Reset VMID to force flush on next use
     515             :  */
     516           0 : void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
     517             : {
     518             :         unsigned i, j;
     519             : 
     520           0 :         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
     521           0 :                 struct amdgpu_vmid_mgr *id_mgr =
     522             :                         &adev->vm_manager.id_mgr[i];
     523             : 
     524           0 :                 for (j = 1; j < id_mgr->num_ids; ++j)
     525           0 :                         amdgpu_vmid_reset(adev, i, j);
     526             :         }
     527           0 : }
     528             : 
     529             : /**
     530             :  * amdgpu_vmid_mgr_init - init the VMID manager
     531             :  *
     532             :  * @adev: amdgpu_device pointer
     533             :  *
     534             :  * Initialize the VM manager structures
     535             :  */
     536           0 : void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
     537             : {
     538             :         unsigned i, j;
     539             : 
     540           0 :         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
     541           0 :                 struct amdgpu_vmid_mgr *id_mgr =
     542             :                         &adev->vm_manager.id_mgr[i];
     543             : 
     544           0 :                 mutex_init(&id_mgr->lock);
     545           0 :                 INIT_LIST_HEAD(&id_mgr->ids_lru);
     546           0 :                 atomic_set(&id_mgr->reserved_vmid_num, 0);
     547             : 
     548             :                 /* manage only VMIDs not used by KFD */
     549           0 :                 id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
     550             : 
     551             :                 /* skip over VMID 0, since it is the system VM */
     552           0 :                 for (j = 1; j < id_mgr->num_ids; ++j) {
     553           0 :                         amdgpu_vmid_reset(adev, i, j);
     554           0 :                         amdgpu_sync_create(&id_mgr->ids[j].active);
     555           0 :                         list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
     556             :                 }
     557             :         }
     558           0 : }
     559             : 
     560             : /**
     561             :  * amdgpu_vmid_mgr_fini - cleanup VM manager
     562             :  *
     563             :  * @adev: amdgpu_device pointer
     564             :  *
     565             :  * Cleanup the VM manager and free resources.
     566             :  */
     567           0 : void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
     568             : {
     569             :         unsigned i, j;
     570             : 
     571           0 :         for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
     572             :                 struct amdgpu_vmid_mgr *id_mgr =
     573             :                         &adev->vm_manager.id_mgr[i];
     574             : 
     575             :                 mutex_destroy(&id_mgr->lock);
     576           0 :                 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
     577           0 :                         struct amdgpu_vmid *id = &id_mgr->ids[j];
     578             : 
     579           0 :                         amdgpu_sync_free(&id->active);
     580           0 :                         dma_fence_put(id->last_flush);
     581           0 :                         dma_fence_put(id->pasid_mapping);
     582             :                 }
     583             :         }
     584           0 : }

Generated by: LCOV version 1.14