LCOV - code coverage report
Current view: top level - drivers/gpu/drm/ttm - ttm_device.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 124 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 9 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 OR MIT */
       2             : 
       3             : /*
       4             :  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
       5             :  * Copyright 2020 Advanced Micro Devices, Inc.
       6             :  *
       7             :  * Permission is hereby granted, free of charge, to any person obtaining a
       8             :  * copy of this software and associated documentation files (the "Software"),
       9             :  * to deal in the Software without restriction, including without limitation
      10             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      11             :  * and/or sell copies of the Software, and to permit persons to whom the
      12             :  * Software is furnished to do so, subject to the following conditions:
      13             :  *
      14             :  * The above copyright notice and this permission notice shall be included in
      15             :  * all copies or substantial portions of the Software.
      16             :  *
      17             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      18             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      19             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      20             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      21             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      22             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      23             :  * OTHER DEALINGS IN THE SOFTWARE.
      24             :  *
      25             :  * Authors: Christian König
      26             :  */
      27             : 
      28             : #define pr_fmt(fmt) "[TTM DEVICE] " fmt
      29             : 
      30             : #include <linux/mm.h>
      31             : 
      32             : #include <drm/ttm/ttm_device.h>
      33             : #include <drm/ttm/ttm_tt.h>
      34             : #include <drm/ttm/ttm_placement.h>
      35             : #include <drm/ttm/ttm_bo_api.h>
      36             : 
      37             : #include "ttm_module.h"
      38             : 
      39             : /*
      40             :  * ttm_global_mutex - protecting the global state
      41             :  */
      42             : static DEFINE_MUTEX(ttm_global_mutex);
      43             : static unsigned ttm_glob_use_count;
      44             : struct ttm_global ttm_glob;
      45             : EXPORT_SYMBOL(ttm_glob);
      46             : 
      47             : struct dentry *ttm_debugfs_root;
      48             : 
      49           0 : static void ttm_global_release(void)
      50             : {
      51           0 :         struct ttm_global *glob = &ttm_glob;
      52             : 
      53           0 :         mutex_lock(&ttm_global_mutex);
      54           0 :         if (--ttm_glob_use_count > 0)
      55             :                 goto out;
      56             : 
      57           0 :         ttm_pool_mgr_fini();
      58           0 :         debugfs_remove(ttm_debugfs_root);
      59             : 
      60           0 :         __free_page(glob->dummy_read_page);
      61           0 :         memset(glob, 0, sizeof(*glob));
      62             : out:
      63           0 :         mutex_unlock(&ttm_global_mutex);
      64           0 : }
      65             : 
      66           0 : static int ttm_global_init(void)
      67             : {
      68           0 :         struct ttm_global *glob = &ttm_glob;
      69             :         unsigned long num_pages, num_dma32;
      70             :         struct sysinfo si;
      71           0 :         int ret = 0;
      72             : 
      73           0 :         mutex_lock(&ttm_global_mutex);
      74           0 :         if (++ttm_glob_use_count > 1)
      75             :                 goto out;
      76             : 
      77           0 :         si_meminfo(&si);
      78             : 
      79           0 :         ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
      80           0 :         if (IS_ERR(ttm_debugfs_root)) {
      81           0 :                 ttm_debugfs_root = NULL;
      82             :         }
      83             : 
      84             :         /* Limit the number of pages in the pool to about 50% of the total
      85             :          * system memory.
      86             :          */
      87           0 :         num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
      88           0 :         num_pages /= 2;
      89             : 
      90             :         /* But for DMA32 we limit ourself to only use 2GiB maximum. */
      91           0 :         num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
      92             :                 >> PAGE_SHIFT;
      93           0 :         num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
      94             : 
      95           0 :         ttm_pool_mgr_init(num_pages);
      96           0 :         ttm_tt_mgr_init(num_pages, num_dma32);
      97             : 
      98           0 :         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
      99             : 
     100           0 :         if (unlikely(glob->dummy_read_page == NULL)) {
     101             :                 ret = -ENOMEM;
     102             :                 goto out;
     103             :         }
     104             : 
     105           0 :         INIT_LIST_HEAD(&glob->device_list);
     106           0 :         atomic_set(&glob->bo_count, 0);
     107             : 
     108           0 :         debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
     109             :                                 &glob->bo_count);
     110             : out:
     111             :         if (ret && ttm_debugfs_root)
     112             :                 debugfs_remove(ttm_debugfs_root);
     113           0 :         if (ret)
     114           0 :                 --ttm_glob_use_count;
     115           0 :         mutex_unlock(&ttm_global_mutex);
     116           0 :         return ret;
     117             : }
     118             : 
     119             : /*
     120             :  * A buffer object shrink method that tries to swap out the first
     121             :  * buffer object on the global::swap_lru list.
     122             :  */
     123           0 : int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
     124             : {
     125           0 :         struct ttm_global *glob = &ttm_glob;
     126             :         struct ttm_device *bdev;
     127           0 :         int ret = 0;
     128             : 
     129           0 :         mutex_lock(&ttm_global_mutex);
     130           0 :         list_for_each_entry(bdev, &glob->device_list, device_list) {
     131           0 :                 ret = ttm_device_swapout(bdev, ctx, gfp_flags);
     132           0 :                 if (ret > 0) {
     133           0 :                         list_move_tail(&bdev->device_list, &glob->device_list);
     134             :                         break;
     135             :                 }
     136             :         }
     137           0 :         mutex_unlock(&ttm_global_mutex);
     138           0 :         return ret;
     139             : }
     140             : EXPORT_SYMBOL(ttm_global_swapout);
     141             : 
     142           0 : int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
     143             :                        gfp_t gfp_flags)
     144             : {
     145             :         struct ttm_resource_cursor cursor;
     146             :         struct ttm_resource_manager *man;
     147             :         struct ttm_resource *res;
     148             :         unsigned i;
     149             :         int ret;
     150             : 
     151           0 :         spin_lock(&bdev->lru_lock);
     152           0 :         for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
     153           0 :                 man = ttm_manager_type(bdev, i);
     154           0 :                 if (!man || !man->use_tt)
     155           0 :                         continue;
     156             : 
     157           0 :                 ttm_resource_manager_for_each_res(man, &cursor, res) {
     158           0 :                         struct ttm_buffer_object *bo = res->bo;
     159             :                         uint32_t num_pages;
     160             : 
     161           0 :                         if (!bo)
     162           0 :                                 continue;
     163             : 
     164           0 :                         num_pages = PFN_UP(bo->base.size);
     165           0 :                         ret = ttm_bo_swapout(bo, ctx, gfp_flags);
     166             :                         /* ttm_bo_swapout has dropped the lru_lock */
     167           0 :                         if (!ret)
     168           0 :                                 return num_pages;
     169           0 :                         if (ret != -EBUSY)
     170             :                                 return ret;
     171             :                 }
     172             :         }
     173           0 :         spin_unlock(&bdev->lru_lock);
     174           0 :         return 0;
     175             : }
     176             : EXPORT_SYMBOL(ttm_device_swapout);
     177             : 
     178           0 : static void ttm_device_delayed_workqueue(struct work_struct *work)
     179             : {
     180           0 :         struct ttm_device *bdev =
     181           0 :                 container_of(work, struct ttm_device, wq.work);
     182             : 
     183           0 :         if (!ttm_bo_delayed_delete(bdev, false))
     184           0 :                 schedule_delayed_work(&bdev->wq,
     185             :                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
     186           0 : }
     187             : 
     188             : /**
     189             :  * ttm_device_init
     190             :  *
     191             :  * @bdev: A pointer to a struct ttm_device to initialize.
     192             :  * @funcs: Function table for the device.
     193             :  * @dev: The core kernel device pointer for DMA mappings and allocations.
     194             :  * @mapping: The address space to use for this bo.
     195             :  * @vma_manager: A pointer to a vma manager.
     196             :  * @use_dma_alloc: If coherent DMA allocation API should be used.
     197             :  * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
     198             :  *
     199             :  * Initializes a struct ttm_device:
     200             :  * Returns:
     201             :  * !0: Failure.
     202             :  */
     203           0 : int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
     204             :                     struct device *dev, struct address_space *mapping,
     205             :                     struct drm_vma_offset_manager *vma_manager,
     206             :                     bool use_dma_alloc, bool use_dma32)
     207             : {
     208           0 :         struct ttm_global *glob = &ttm_glob;
     209             :         int ret;
     210             : 
     211           0 :         if (WARN_ON(vma_manager == NULL))
     212             :                 return -EINVAL;
     213             : 
     214           0 :         ret = ttm_global_init();
     215           0 :         if (ret)
     216             :                 return ret;
     217             : 
     218           0 :         bdev->funcs = funcs;
     219             : 
     220           0 :         ttm_sys_man_init(bdev);
     221           0 :         ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
     222             : 
     223           0 :         bdev->vma_manager = vma_manager;
     224           0 :         INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
     225           0 :         spin_lock_init(&bdev->lru_lock);
     226           0 :         INIT_LIST_HEAD(&bdev->ddestroy);
     227           0 :         INIT_LIST_HEAD(&bdev->pinned);
     228           0 :         bdev->dev_mapping = mapping;
     229           0 :         mutex_lock(&ttm_global_mutex);
     230           0 :         list_add_tail(&bdev->device_list, &glob->device_list);
     231           0 :         mutex_unlock(&ttm_global_mutex);
     232             : 
     233           0 :         return 0;
     234             : }
     235             : EXPORT_SYMBOL(ttm_device_init);
     236             : 
     237           0 : void ttm_device_fini(struct ttm_device *bdev)
     238             : {
     239             :         struct ttm_resource_manager *man;
     240             :         unsigned i;
     241             : 
     242           0 :         man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
     243           0 :         ttm_resource_manager_set_used(man, false);
     244           0 :         ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
     245             : 
     246           0 :         mutex_lock(&ttm_global_mutex);
     247           0 :         list_del(&bdev->device_list);
     248           0 :         mutex_unlock(&ttm_global_mutex);
     249             : 
     250           0 :         cancel_delayed_work_sync(&bdev->wq);
     251             : 
     252           0 :         if (ttm_bo_delayed_delete(bdev, true))
     253             :                 pr_debug("Delayed destroy list was clean\n");
     254             : 
     255           0 :         spin_lock(&bdev->lru_lock);
     256           0 :         for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
     257           0 :                 if (list_empty(&man->lru[0]))
     258             :                         pr_debug("Swap list %d was clean\n", i);
     259           0 :         spin_unlock(&bdev->lru_lock);
     260             : 
     261           0 :         ttm_pool_fini(&bdev->pool);
     262           0 :         ttm_global_release();
     263           0 : }
     264             : EXPORT_SYMBOL(ttm_device_fini);
     265             : 
     266           0 : static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
     267             :                                               struct list_head *list)
     268             : {
     269             :         struct ttm_resource *res;
     270             : 
     271           0 :         spin_lock(&bdev->lru_lock);
     272           0 :         while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
     273           0 :                 struct ttm_buffer_object *bo = res->bo;
     274             : 
     275             :                 /* Take ref against racing releases once lru_lock is unlocked */
     276           0 :                 if (!ttm_bo_get_unless_zero(bo))
     277           0 :                         continue;
     278             : 
     279           0 :                 list_del_init(&res->lru);
     280           0 :                 spin_unlock(&bdev->lru_lock);
     281             : 
     282           0 :                 if (bo->ttm)
     283           0 :                         ttm_tt_unpopulate(bo->bdev, bo->ttm);
     284             : 
     285           0 :                 ttm_bo_put(bo);
     286           0 :                 spin_lock(&bdev->lru_lock);
     287             :         }
     288           0 :         spin_unlock(&bdev->lru_lock);
     289           0 : }
     290             : 
     291           0 : void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
     292             : {
     293             :         struct ttm_resource_manager *man;
     294             :         unsigned int i, j;
     295             : 
     296           0 :         ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
     297             : 
     298           0 :         for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
     299           0 :                 man = ttm_manager_type(bdev, i);
     300           0 :                 if (!man || !man->use_tt)
     301           0 :                         continue;
     302             : 
     303           0 :                 for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
     304           0 :                         ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
     305             :         }
     306           0 : }
     307             : EXPORT_SYMBOL(ttm_device_clear_dma_mappings);

Generated by: LCOV version 1.14