LCOV - code coverage report
Current view: top level - drivers/gpu/drm/amd/amdgpu - amdgpu_vram_mgr.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 237 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 17 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 2016 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  * Authors: Christian König
      23             :  */
      24             : 
      25             : #include <linux/dma-mapping.h>
      26             : #include <drm/ttm/ttm_range_manager.h>
      27             : 
      28             : #include "amdgpu.h"
      29             : #include "amdgpu_vm.h"
      30             : #include "amdgpu_res_cursor.h"
      31             : #include "amdgpu_atomfirmware.h"
      32             : #include "atom.h"
      33             : 
      34             : struct amdgpu_vram_reservation {
      35             :         struct list_head node;
      36             :         struct drm_mm_node mm_node;
      37             : };
      38             : 
      39             : static inline struct amdgpu_vram_mgr *
      40             : to_vram_mgr(struct ttm_resource_manager *man)
      41             : {
      42           0 :         return container_of(man, struct amdgpu_vram_mgr, manager);
      43             : }
      44             : 
      45             : static inline struct amdgpu_device *
      46             : to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
      47             : {
      48           0 :         return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
      49             : }
      50             : 
      51             : /**
      52             :  * DOC: mem_info_vram_total
      53             :  *
      54             :  * The amdgpu driver provides a sysfs API for reporting current total VRAM
      55             :  * available on the device
      56             :  * The file mem_info_vram_total is used for this and returns the total
      57             :  * amount of VRAM in bytes
      58             :  */
      59           0 : static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
      60             :                 struct device_attribute *attr, char *buf)
      61             : {
      62           0 :         struct drm_device *ddev = dev_get_drvdata(dev);
      63           0 :         struct amdgpu_device *adev = drm_to_adev(ddev);
      64             : 
      65           0 :         return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
      66             : }
      67             : 
      68             : /**
      69             :  * DOC: mem_info_vis_vram_total
      70             :  *
      71             :  * The amdgpu driver provides a sysfs API for reporting current total
      72             :  * visible VRAM available on the device
      73             :  * The file mem_info_vis_vram_total is used for this and returns the total
      74             :  * amount of visible VRAM in bytes
      75             :  */
      76           0 : static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
      77             :                 struct device_attribute *attr, char *buf)
      78             : {
      79           0 :         struct drm_device *ddev = dev_get_drvdata(dev);
      80           0 :         struct amdgpu_device *adev = drm_to_adev(ddev);
      81             : 
      82           0 :         return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
      83             : }
      84             : 
      85             : /**
      86             :  * DOC: mem_info_vram_used
      87             :  *
      88             :  * The amdgpu driver provides a sysfs API for reporting current total VRAM
      89             :  * available on the device
      90             :  * The file mem_info_vram_used is used for this and returns the total
      91             :  * amount of currently used VRAM in bytes
      92             :  */
      93           0 : static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
      94             :                                               struct device_attribute *attr,
      95             :                                               char *buf)
      96             : {
      97           0 :         struct drm_device *ddev = dev_get_drvdata(dev);
      98           0 :         struct amdgpu_device *adev = drm_to_adev(ddev);
      99           0 :         struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
     100             : 
     101           0 :         return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
     102             : }
     103             : 
     104             : /**
     105             :  * DOC: mem_info_vis_vram_used
     106             :  *
     107             :  * The amdgpu driver provides a sysfs API for reporting current total of
     108             :  * used visible VRAM
     109             :  * The file mem_info_vis_vram_used is used for this and returns the total
     110             :  * amount of currently used visible VRAM in bytes
     111             :  */
     112           0 : static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
     113             :                                                   struct device_attribute *attr,
     114             :                                                   char *buf)
     115             : {
     116           0 :         struct drm_device *ddev = dev_get_drvdata(dev);
     117           0 :         struct amdgpu_device *adev = drm_to_adev(ddev);
     118             : 
     119           0 :         return sysfs_emit(buf, "%llu\n",
     120             :                           amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
     121             : }
     122             : 
     123             : /**
     124             :  * DOC: mem_info_vram_vendor
     125             :  *
     126             :  * The amdgpu driver provides a sysfs API for reporting the vendor of the
     127             :  * installed VRAM
     128             :  * The file mem_info_vram_vendor is used for this and returns the name of the
     129             :  * vendor.
     130             :  */
     131           0 : static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
     132             :                                            struct device_attribute *attr,
     133             :                                            char *buf)
     134             : {
     135           0 :         struct drm_device *ddev = dev_get_drvdata(dev);
     136           0 :         struct amdgpu_device *adev = drm_to_adev(ddev);
     137             : 
     138           0 :         switch (adev->gmc.vram_vendor) {
     139             :         case SAMSUNG:
     140           0 :                 return sysfs_emit(buf, "samsung\n");
     141             :         case INFINEON:
     142           0 :                 return sysfs_emit(buf, "infineon\n");
     143             :         case ELPIDA:
     144           0 :                 return sysfs_emit(buf, "elpida\n");
     145             :         case ETRON:
     146           0 :                 return sysfs_emit(buf, "etron\n");
     147             :         case NANYA:
     148           0 :                 return sysfs_emit(buf, "nanya\n");
     149             :         case HYNIX:
     150           0 :                 return sysfs_emit(buf, "hynix\n");
     151             :         case MOSEL:
     152           0 :                 return sysfs_emit(buf, "mosel\n");
     153             :         case WINBOND:
     154           0 :                 return sysfs_emit(buf, "winbond\n");
     155             :         case ESMT:
     156           0 :                 return sysfs_emit(buf, "esmt\n");
     157             :         case MICRON:
     158           0 :                 return sysfs_emit(buf, "micron\n");
     159             :         default:
     160           0 :                 return sysfs_emit(buf, "unknown\n");
     161             :         }
     162             : }
     163             : 
     164             : static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
     165             :                    amdgpu_mem_info_vram_total_show, NULL);
     166             : static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
     167             :                    amdgpu_mem_info_vis_vram_total_show,NULL);
     168             : static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
     169             :                    amdgpu_mem_info_vram_used_show, NULL);
     170             : static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
     171             :                    amdgpu_mem_info_vis_vram_used_show, NULL);
     172             : static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
     173             :                    amdgpu_mem_info_vram_vendor, NULL);
     174             : 
     175             : static struct attribute *amdgpu_vram_mgr_attributes[] = {
     176             :         &dev_attr_mem_info_vram_total.attr,
     177             :         &dev_attr_mem_info_vis_vram_total.attr,
     178             :         &dev_attr_mem_info_vram_used.attr,
     179             :         &dev_attr_mem_info_vis_vram_used.attr,
     180             :         &dev_attr_mem_info_vram_vendor.attr,
     181             :         NULL
     182             : };
     183             : 
     184             : const struct attribute_group amdgpu_vram_mgr_attr_group = {
     185             :         .attrs = amdgpu_vram_mgr_attributes
     186             : };
     187             : 
     188             : /**
     189             :  * amdgpu_vram_mgr_vis_size - Calculate visible node size
     190             :  *
     191             :  * @adev: amdgpu_device pointer
     192             :  * @node: MM node structure
     193             :  *
     194             :  * Calculate how many bytes of the MM node are inside visible VRAM
     195             :  */
     196             : static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
     197             :                                     struct drm_mm_node *node)
     198             : {
     199           0 :         uint64_t start = node->start << PAGE_SHIFT;
     200           0 :         uint64_t end = (node->size + node->start) << PAGE_SHIFT;
     201             : 
     202           0 :         if (start >= adev->gmc.visible_vram_size)
     203             :                 return 0;
     204             : 
     205             :         return (end > adev->gmc.visible_vram_size ?
     206           0 :                 adev->gmc.visible_vram_size : end) - start;
     207             : }
     208             : 
     209             : /**
     210             :  * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
     211             :  *
     212             :  * @bo: &amdgpu_bo buffer object (must be in VRAM)
     213             :  *
     214             :  * Returns:
     215             :  * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
     216             :  */
     217           0 : u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
     218             : {
     219           0 :         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
     220           0 :         struct ttm_resource *res = bo->tbo.resource;
     221           0 :         unsigned pages = res->num_pages;
     222             :         struct drm_mm_node *mm;
     223             :         u64 usage;
     224             : 
     225           0 :         if (amdgpu_gmc_vram_full_visible(&adev->gmc))
     226           0 :                 return amdgpu_bo_size(bo);
     227             : 
     228           0 :         if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
     229             :                 return 0;
     230             : 
     231           0 :         mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
     232           0 :         for (usage = 0; pages; pages -= mm->size, mm++)
     233           0 :                 usage += amdgpu_vram_mgr_vis_size(adev, mm);
     234             : 
     235             :         return usage;
     236             : }
     237             : 
     238             : /* Commit the reservation of VRAM pages */
     239           0 : static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
     240             : {
     241           0 :         struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
     242           0 :         struct amdgpu_device *adev = to_amdgpu_device(mgr);
     243           0 :         struct drm_mm *mm = &mgr->mm;
     244             :         struct amdgpu_vram_reservation *rsv, *temp;
     245             :         uint64_t vis_usage;
     246             : 
     247           0 :         list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
     248           0 :                 if (drm_mm_reserve_node(mm, &rsv->mm_node))
     249           0 :                         continue;
     250             : 
     251             :                 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
     252             :                         rsv->mm_node.start, rsv->mm_node.size);
     253             : 
     254           0 :                 vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
     255           0 :                 atomic64_add(vis_usage, &mgr->vis_usage);
     256           0 :                 spin_lock(&man->bdev->lru_lock);
     257           0 :                 man->usage += rsv->mm_node.size << PAGE_SHIFT;
     258           0 :                 spin_unlock(&man->bdev->lru_lock);
     259           0 :                 list_move(&rsv->node, &mgr->reserved_pages);
     260             :         }
     261           0 : }
     262             : 
     263             : /**
     264             :  * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
     265             :  *
     266             :  * @mgr: amdgpu_vram_mgr pointer
     267             :  * @start: start address of the range in VRAM
     268             :  * @size: size of the range
     269             :  *
     270             :  * Reserve memory from start address with the specified size in VRAM
     271             :  */
     272           0 : int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
     273             :                                   uint64_t start, uint64_t size)
     274             : {
     275             :         struct amdgpu_vram_reservation *rsv;
     276             : 
     277           0 :         rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
     278           0 :         if (!rsv)
     279             :                 return -ENOMEM;
     280             : 
     281           0 :         INIT_LIST_HEAD(&rsv->node);
     282           0 :         rsv->mm_node.start = start >> PAGE_SHIFT;
     283           0 :         rsv->mm_node.size = size >> PAGE_SHIFT;
     284             : 
     285           0 :         spin_lock(&mgr->lock);
     286           0 :         list_add_tail(&rsv->node, &mgr->reservations_pending);
     287           0 :         amdgpu_vram_mgr_do_reserve(&mgr->manager);
     288           0 :         spin_unlock(&mgr->lock);
     289             : 
     290           0 :         return 0;
     291             : }
     292             : 
     293             : /**
     294             :  * amdgpu_vram_mgr_query_page_status - query the reservation status
     295             :  *
     296             :  * @mgr: amdgpu_vram_mgr pointer
     297             :  * @start: start address of a page in VRAM
     298             :  *
     299             :  * Returns:
     300             :  *      -EBUSY: the page is still hold and in pending list
     301             :  *      0: the page has been reserved
     302             :  *      -ENOENT: the input page is not a reservation
     303             :  */
     304           0 : int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
     305             :                                       uint64_t start)
     306             : {
     307             :         struct amdgpu_vram_reservation *rsv;
     308             :         int ret;
     309             : 
     310           0 :         spin_lock(&mgr->lock);
     311             : 
     312           0 :         list_for_each_entry(rsv, &mgr->reservations_pending, node) {
     313           0 :                 if ((rsv->mm_node.start <= start) &&
     314           0 :                     (start < (rsv->mm_node.start + rsv->mm_node.size))) {
     315             :                         ret = -EBUSY;
     316             :                         goto out;
     317             :                 }
     318             :         }
     319             : 
     320           0 :         list_for_each_entry(rsv, &mgr->reserved_pages, node) {
     321           0 :                 if ((rsv->mm_node.start <= start) &&
     322           0 :                     (start < (rsv->mm_node.start + rsv->mm_node.size))) {
     323             :                         ret = 0;
     324             :                         goto out;
     325             :                 }
     326             :         }
     327             : 
     328             :         ret = -ENOENT;
     329             : out:
     330           0 :         spin_unlock(&mgr->lock);
     331           0 :         return ret;
     332             : }
     333             : 
     334             : /**
     335             :  * amdgpu_vram_mgr_virt_start - update virtual start address
     336             :  *
     337             :  * @mem: ttm_resource to update
     338             :  * @node: just allocated node
     339             :  *
     340             :  * Calculate a virtual BO start address to easily check if everything is CPU
     341             :  * accessible.
     342             :  */
     343             : static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
     344             :                                        struct drm_mm_node *node)
     345             : {
     346             :         unsigned long start;
     347             : 
     348           0 :         start = node->start + node->size;
     349           0 :         if (start > mem->num_pages)
     350           0 :                 start -= mem->num_pages;
     351             :         else
     352             :                 start = 0;
     353           0 :         mem->start = max(mem->start, start);
     354             : }
     355             : 
     356             : /**
     357             :  * amdgpu_vram_mgr_new - allocate new ranges
     358             :  *
     359             :  * @man: TTM memory type manager
     360             :  * @tbo: TTM BO we need this range for
     361             :  * @place: placement flags and restrictions
     362             :  * @res: the resulting mem object
     363             :  *
     364             :  * Allocate VRAM for the given BO.
     365             :  */
     366           0 : static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
     367             :                                struct ttm_buffer_object *tbo,
     368             :                                const struct ttm_place *place,
     369             :                                struct ttm_resource **res)
     370             : {
     371             :         unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
     372           0 :         struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
     373           0 :         struct amdgpu_device *adev = to_amdgpu_device(mgr);
     374           0 :         uint64_t vis_usage = 0, mem_bytes, max_bytes;
     375             :         struct ttm_range_mgr_node *node;
     376           0 :         struct drm_mm *mm = &mgr->mm;
     377             :         enum drm_mm_insert_mode mode;
     378             :         unsigned i;
     379             :         int r;
     380             : 
     381           0 :         lpfn = place->lpfn;
     382           0 :         if (!lpfn)
     383           0 :                 lpfn = man->size >> PAGE_SHIFT;
     384             : 
     385           0 :         max_bytes = adev->gmc.mc_vram_size;
     386           0 :         if (tbo->type != ttm_bo_type_kernel)
     387           0 :                 max_bytes -= AMDGPU_VM_RESERVED_VRAM;
     388             : 
     389           0 :         mem_bytes = tbo->base.size;
     390           0 :         if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
     391             :                 pages_per_node = ~0ul;
     392             :                 num_nodes = 1;
     393             :         } else {
     394             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     395             :                 pages_per_node = HPAGE_PMD_NR;
     396             : #else
     397             :                 /* default to 2MB */
     398           0 :                 pages_per_node = 2UL << (20UL - PAGE_SHIFT);
     399             : #endif
     400           0 :                 pages_per_node = max_t(uint32_t, pages_per_node,
     401             :                                        tbo->page_alignment);
     402           0 :                 num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
     403             :         }
     404             : 
     405           0 :         node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
     406             :                         GFP_KERNEL | __GFP_ZERO);
     407           0 :         if (!node)
     408             :                 return -ENOMEM;
     409             : 
     410           0 :         ttm_resource_init(tbo, place, &node->base);
     411             : 
     412             :         /* bail out quickly if there's likely not enough VRAM for this BO */
     413           0 :         if (ttm_resource_manager_usage(man) > max_bytes) {
     414             :                 r = -ENOSPC;
     415             :                 goto error_fini;
     416             :         }
     417             : 
     418           0 :         mode = DRM_MM_INSERT_BEST;
     419           0 :         if (place->flags & TTM_PL_FLAG_TOPDOWN)
     420           0 :                 mode = DRM_MM_INSERT_HIGH;
     421             : 
     422           0 :         pages_left = node->base.num_pages;
     423             : 
     424             :         /* Limit maximum size to 2GB due to SG table limitations */
     425           0 :         pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
     426             : 
     427           0 :         i = 0;
     428           0 :         spin_lock(&mgr->lock);
     429           0 :         while (pages_left) {
     430           0 :                 uint32_t alignment = tbo->page_alignment;
     431             : 
     432           0 :                 if (pages >= pages_per_node)
     433           0 :                         alignment = pages_per_node;
     434             : 
     435           0 :                 r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
     436           0 :                                                 alignment, 0, place->fpfn,
     437             :                                                 lpfn, mode);
     438           0 :                 if (unlikely(r)) {
     439           0 :                         if (pages > pages_per_node) {
     440           0 :                                 if (is_power_of_2(pages))
     441           0 :                                         pages = pages / 2;
     442             :                                 else
     443           0 :                                         pages = rounddown_pow_of_two(pages);
     444           0 :                                 continue;
     445             :                         }
     446             :                         goto error_free;
     447             :                 }
     448             : 
     449           0 :                 vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
     450           0 :                 amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
     451           0 :                 pages_left -= pages;
     452           0 :                 ++i;
     453             : 
     454           0 :                 if (pages > pages_left)
     455           0 :                         pages = pages_left;
     456             :         }
     457           0 :         spin_unlock(&mgr->lock);
     458             : 
     459           0 :         if (i == 1)
     460           0 :                 node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
     461             : 
     462           0 :         if (adev->gmc.xgmi.connected_to_cpu)
     463           0 :                 node->base.bus.caching = ttm_cached;
     464             :         else
     465           0 :                 node->base.bus.caching = ttm_write_combined;
     466             : 
     467           0 :         atomic64_add(vis_usage, &mgr->vis_usage);
     468           0 :         *res = &node->base;
     469           0 :         return 0;
     470             : 
     471             : error_free:
     472           0 :         while (i--)
     473           0 :                 drm_mm_remove_node(&node->mm_nodes[i]);
     474           0 :         spin_unlock(&mgr->lock);
     475             : error_fini:
     476           0 :         ttm_resource_fini(man, &node->base);
     477           0 :         kvfree(node);
     478             : 
     479           0 :         return r;
     480             : }
     481             : 
     482             : /**
     483             :  * amdgpu_vram_mgr_del - free ranges
     484             :  *
     485             :  * @man: TTM memory type manager
     486             :  * @res: TTM memory object
     487             :  *
     488             :  * Free the allocated VRAM again.
     489             :  */
     490           0 : static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
     491             :                                 struct ttm_resource *res)
     492             : {
     493           0 :         struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
     494           0 :         struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
     495           0 :         struct amdgpu_device *adev = to_amdgpu_device(mgr);
     496           0 :         uint64_t vis_usage = 0;
     497             :         unsigned i, pages;
     498             : 
     499           0 :         spin_lock(&mgr->lock);
     500           0 :         for (i = 0, pages = res->num_pages; pages;
     501           0 :              pages -= node->mm_nodes[i].size, ++i) {
     502           0 :                 struct drm_mm_node *mm = &node->mm_nodes[i];
     503             : 
     504           0 :                 drm_mm_remove_node(mm);
     505           0 :                 vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
     506             :         }
     507           0 :         amdgpu_vram_mgr_do_reserve(man);
     508           0 :         spin_unlock(&mgr->lock);
     509             : 
     510           0 :         atomic64_sub(vis_usage, &mgr->vis_usage);
     511             : 
     512           0 :         ttm_resource_fini(man, res);
     513           0 :         kvfree(node);
     514           0 : }
     515             : 
     516             : /**
     517             :  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
     518             :  *
     519             :  * @adev: amdgpu device pointer
     520             :  * @res: TTM memory object
     521             :  * @offset: byte offset from the base of VRAM BO
     522             :  * @length: number of bytes to export in sg_table
     523             :  * @dev: the other device
     524             :  * @dir: dma direction
     525             :  * @sgt: resulting sg table
     526             :  *
     527             :  * Allocate and fill a sg table from a VRAM allocation.
     528             :  */
     529           0 : int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
     530             :                               struct ttm_resource *res,
     531             :                               u64 offset, u64 length,
     532             :                               struct device *dev,
     533             :                               enum dma_data_direction dir,
     534             :                               struct sg_table **sgt)
     535             : {
     536             :         struct amdgpu_res_cursor cursor;
     537             :         struct scatterlist *sg;
     538           0 :         int num_entries = 0;
     539             :         int i, r;
     540             : 
     541           0 :         *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
     542           0 :         if (!*sgt)
     543             :                 return -ENOMEM;
     544             : 
     545             :         /* Determine the number of DRM_MM nodes to export */
     546           0 :         amdgpu_res_first(res, offset, length, &cursor);
     547           0 :         while (cursor.remaining) {
     548           0 :                 num_entries++;
     549           0 :                 amdgpu_res_next(&cursor, cursor.size);
     550             :         }
     551             : 
     552           0 :         r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
     553           0 :         if (r)
     554             :                 goto error_free;
     555             : 
     556             :         /* Initialize scatterlist nodes of sg_table */
     557           0 :         for_each_sgtable_sg((*sgt), sg, i)
     558           0 :                 sg->length = 0;
     559             : 
     560             :         /*
     561             :          * Walk down DRM_MM nodes to populate scatterlist nodes
     562             :          * @note: Use iterator api to get first the DRM_MM node
     563             :          * and the number of bytes from it. Access the following
     564             :          * DRM_MM node(s) if more buffer needs to exported
     565             :          */
     566           0 :         amdgpu_res_first(res, offset, length, &cursor);
     567           0 :         for_each_sgtable_sg((*sgt), sg, i) {
     568           0 :                 phys_addr_t phys = cursor.start + adev->gmc.aper_base;
     569           0 :                 size_t size = cursor.size;
     570             :                 dma_addr_t addr;
     571             : 
     572           0 :                 addr = dma_map_resource(dev, phys, size, dir,
     573             :                                         DMA_ATTR_SKIP_CPU_SYNC);
     574           0 :                 r = dma_mapping_error(dev, addr);
     575           0 :                 if (r)
     576             :                         goto error_unmap;
     577             : 
     578           0 :                 sg_set_page(sg, NULL, size, 0);
     579           0 :                 sg_dma_address(sg) = addr;
     580             :                 sg_dma_len(sg) = size;
     581             : 
     582           0 :                 amdgpu_res_next(&cursor, cursor.size);
     583             :         }
     584             : 
     585             :         return 0;
     586             : 
     587             : error_unmap:
     588           0 :         for_each_sgtable_sg((*sgt), sg, i) {
     589           0 :                 if (!sg->length)
     590           0 :                         continue;
     591             : 
     592           0 :                 dma_unmap_resource(dev, sg->dma_address,
     593             :                                    sg->length, dir,
     594             :                                    DMA_ATTR_SKIP_CPU_SYNC);
     595             :         }
     596           0 :         sg_free_table(*sgt);
     597             : 
     598             : error_free:
     599           0 :         kfree(*sgt);
     600           0 :         return r;
     601             : }
     602             : 
     603             : /**
     604             :  * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
     605             :  *
     606             :  * @dev: device pointer
     607             :  * @dir: data direction of resource to unmap
     608             :  * @sgt: sg table to free
     609             :  *
     610             :  * Free a previously allocate sg table.
     611             :  */
     612           0 : void amdgpu_vram_mgr_free_sgt(struct device *dev,
     613             :                               enum dma_data_direction dir,
     614             :                               struct sg_table *sgt)
     615             : {
     616             :         struct scatterlist *sg;
     617             :         int i;
     618             : 
     619           0 :         for_each_sgtable_sg(sgt, sg, i)
     620           0 :                 dma_unmap_resource(dev, sg->dma_address,
     621           0 :                                    sg->length, dir,
     622             :                                    DMA_ATTR_SKIP_CPU_SYNC);
     623           0 :         sg_free_table(sgt);
     624           0 :         kfree(sgt);
     625           0 : }
     626             : 
     627             : /**
     628             :  * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
     629             :  *
     630             :  * @mgr: amdgpu_vram_mgr pointer
     631             :  *
     632             :  * Returns how many bytes are used in the visible part of VRAM
     633             :  */
     634           0 : uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
     635             : {
     636           0 :         return atomic64_read(&mgr->vis_usage);
     637             : }
     638             : 
     639             : /**
     640             :  * amdgpu_vram_mgr_debug - dump VRAM table
     641             :  *
     642             :  * @man: TTM memory type manager
     643             :  * @printer: DRM printer to use
     644             :  *
     645             :  * Dump the table content using printk.
     646             :  */
     647           0 : static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
     648             :                                   struct drm_printer *printer)
     649             : {
     650           0 :         struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
     651             : 
     652           0 :         drm_printf(printer, "  vis usage:%llu\n",
     653             :                    amdgpu_vram_mgr_vis_usage(mgr));
     654             : 
     655           0 :         spin_lock(&mgr->lock);
     656           0 :         drm_mm_print(&mgr->mm, printer);
     657           0 :         spin_unlock(&mgr->lock);
     658           0 : }
     659             : 
     660             : static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
     661             :         .alloc  = amdgpu_vram_mgr_new,
     662             :         .free   = amdgpu_vram_mgr_del,
     663             :         .debug  = amdgpu_vram_mgr_debug
     664             : };
     665             : 
     666             : /**
     667             :  * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
     668             :  *
     669             :  * @adev: amdgpu_device pointer
     670             :  *
     671             :  * Allocate and initialize the VRAM manager.
     672             :  */
     673           0 : int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
     674             : {
     675           0 :         struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
     676           0 :         struct ttm_resource_manager *man = &mgr->manager;
     677             : 
     678           0 :         ttm_resource_manager_init(man, &adev->mman.bdev,
     679             :                                   adev->gmc.real_vram_size);
     680             : 
     681           0 :         man->func = &amdgpu_vram_mgr_func;
     682             : 
     683           0 :         drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
     684           0 :         spin_lock_init(&mgr->lock);
     685           0 :         INIT_LIST_HEAD(&mgr->reservations_pending);
     686           0 :         INIT_LIST_HEAD(&mgr->reserved_pages);
     687             : 
     688           0 :         ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
     689           0 :         ttm_resource_manager_set_used(man, true);
     690           0 :         return 0;
     691             : }
     692             : 
     693             : /**
     694             :  * amdgpu_vram_mgr_fini - free and destroy VRAM manager
     695             :  *
     696             :  * @adev: amdgpu_device pointer
     697             :  *
     698             :  * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
     699             :  * allocated inside it.
     700             :  */
     701           0 : void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
     702             : {
     703           0 :         struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
     704           0 :         struct ttm_resource_manager *man = &mgr->manager;
     705             :         int ret;
     706             :         struct amdgpu_vram_reservation *rsv, *temp;
     707             : 
     708           0 :         ttm_resource_manager_set_used(man, false);
     709             : 
     710           0 :         ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
     711           0 :         if (ret)
     712             :                 return;
     713             : 
     714           0 :         spin_lock(&mgr->lock);
     715           0 :         list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
     716           0 :                 kfree(rsv);
     717             : 
     718           0 :         list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
     719           0 :                 drm_mm_remove_node(&rsv->mm_node);
     720           0 :                 kfree(rsv);
     721             :         }
     722           0 :         drm_mm_takedown(&mgr->mm);
     723           0 :         spin_unlock(&mgr->lock);
     724             : 
     725           0 :         ttm_resource_manager_cleanup(man);
     726           0 :         ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
     727             : }

Generated by: LCOV version 1.14