LCOV - code coverage report
Current view: top level - drivers/gpu/drm/ttm - ttm_bo_util.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 261 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 19 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 OR MIT */
       2             : /**************************************************************************
       3             :  *
       4             :  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
       5             :  * All Rights Reserved.
       6             :  *
       7             :  * Permission is hereby granted, free of charge, to any person obtaining a
       8             :  * copy of this software and associated documentation files (the
       9             :  * "Software"), to deal in the Software without restriction, including
      10             :  * without limitation the rights to use, copy, modify, merge, publish,
      11             :  * distribute, sub license, and/or sell copies of the Software, and to
      12             :  * permit persons to whom the Software is furnished to do so, subject to
      13             :  * the following conditions:
      14             :  *
      15             :  * The above copyright notice and this permission notice (including the
      16             :  * next paragraph) shall be included in all copies or substantial portions
      17             :  * of the Software.
      18             :  *
      19             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      20             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      21             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      22             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      23             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      24             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      25             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      26             :  *
      27             :  **************************************************************************/
      28             : /*
      29             :  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
      30             :  */
      31             : 
      32             : #include <drm/ttm/ttm_bo_driver.h>
      33             : #include <drm/ttm/ttm_placement.h>
      34             : #include <drm/drm_cache.h>
      35             : #include <drm/drm_vma_manager.h>
      36             : #include <linux/iosys-map.h>
      37             : #include <linux/io.h>
      38             : #include <linux/highmem.h>
      39             : #include <linux/wait.h>
      40             : #include <linux/slab.h>
      41             : #include <linux/vmalloc.h>
      42             : #include <linux/module.h>
      43             : #include <linux/dma-resv.h>
      44             : 
      45             : struct ttm_transfer_obj {
      46             :         struct ttm_buffer_object base;
      47             :         struct ttm_buffer_object *bo;
      48             : };
      49             : 
      50           0 : int ttm_mem_io_reserve(struct ttm_device *bdev,
      51             :                        struct ttm_resource *mem)
      52             : {
      53           0 :         if (mem->bus.offset || mem->bus.addr)
      54             :                 return 0;
      55             : 
      56           0 :         mem->bus.is_iomem = false;
      57           0 :         if (!bdev->funcs->io_mem_reserve)
      58             :                 return 0;
      59             : 
      60           0 :         return bdev->funcs->io_mem_reserve(bdev, mem);
      61             : }
      62             : 
      63           0 : void ttm_mem_io_free(struct ttm_device *bdev,
      64             :                      struct ttm_resource *mem)
      65             : {
      66           0 :         if (!mem)
      67             :                 return;
      68             : 
      69           0 :         if (!mem->bus.offset && !mem->bus.addr)
      70             :                 return;
      71             : 
      72           0 :         if (bdev->funcs->io_mem_free)
      73           0 :                 bdev->funcs->io_mem_free(bdev, mem);
      74             : 
      75           0 :         mem->bus.offset = 0;
      76           0 :         mem->bus.addr = NULL;
      77             : }
      78             : 
      79             : /**
      80             :  * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
      81             :  * @clear: Whether to clear rather than copy.
      82             :  * @num_pages: Number of pages of the operation.
      83             :  * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
      84             :  * @src_iter: A struct ttm_kmap_iter representing the source resource.
      85             :  *
      86             :  * This function is intended to be able to move out async under a
      87             :  * dma-fence if desired.
      88             :  */
      89           0 : void ttm_move_memcpy(bool clear,
      90             :                      u32 num_pages,
      91             :                      struct ttm_kmap_iter *dst_iter,
      92             :                      struct ttm_kmap_iter *src_iter)
      93             : {
      94           0 :         const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
      95           0 :         const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
      96             :         struct iosys_map src_map, dst_map;
      97             :         pgoff_t i;
      98             : 
      99             :         /* Single TTM move. NOP */
     100           0 :         if (dst_ops->maps_tt && src_ops->maps_tt)
     101           0 :                 return;
     102             : 
     103             :         /* Don't move nonexistent data. Clear destination instead. */
     104           0 :         if (clear) {
     105           0 :                 for (i = 0; i < num_pages; ++i) {
     106           0 :                         dst_ops->map_local(dst_iter, &dst_map, i);
     107           0 :                         if (dst_map.is_iomem)
     108           0 :                                 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
     109             :                         else
     110           0 :                                 memset(dst_map.vaddr, 0, PAGE_SIZE);
     111           0 :                         if (dst_ops->unmap_local)
     112           0 :                                 dst_ops->unmap_local(dst_iter, &dst_map);
     113             :                 }
     114             :                 return;
     115             :         }
     116             : 
     117           0 :         for (i = 0; i < num_pages; ++i) {
     118           0 :                 dst_ops->map_local(dst_iter, &dst_map, i);
     119           0 :                 src_ops->map_local(src_iter, &src_map, i);
     120             : 
     121           0 :                 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
     122             : 
     123           0 :                 if (src_ops->unmap_local)
     124           0 :                         src_ops->unmap_local(src_iter, &src_map);
     125           0 :                 if (dst_ops->unmap_local)
     126           0 :                         dst_ops->unmap_local(dst_iter, &dst_map);
     127             :         }
     128             : }
     129             : EXPORT_SYMBOL(ttm_move_memcpy);
     130             : 
     131           0 : int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
     132             :                        struct ttm_operation_ctx *ctx,
     133             :                        struct ttm_resource *dst_mem)
     134             : {
     135           0 :         struct ttm_device *bdev = bo->bdev;
     136           0 :         struct ttm_resource_manager *dst_man =
     137           0 :                 ttm_manager_type(bo->bdev, dst_mem->mem_type);
     138           0 :         struct ttm_tt *ttm = bo->ttm;
     139           0 :         struct ttm_resource *src_mem = bo->resource;
     140           0 :         struct ttm_resource_manager *src_man =
     141           0 :                 ttm_manager_type(bdev, src_mem->mem_type);
     142             :         union {
     143             :                 struct ttm_kmap_iter_tt tt;
     144             :                 struct ttm_kmap_iter_linear_io io;
     145             :         } _dst_iter, _src_iter;
     146             :         struct ttm_kmap_iter *dst_iter, *src_iter;
     147             :         bool clear;
     148           0 :         int ret = 0;
     149             : 
     150           0 :         if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
     151           0 :                     dst_man->use_tt)) {
     152           0 :                 ret = ttm_tt_populate(bdev, ttm, ctx);
     153           0 :                 if (ret)
     154             :                         return ret;
     155             :         }
     156             : 
     157           0 :         dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
     158           0 :         if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
     159           0 :                 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
     160           0 :         if (IS_ERR(dst_iter))
     161           0 :                 return PTR_ERR(dst_iter);
     162             : 
     163           0 :         src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
     164           0 :         if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
     165           0 :                 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
     166           0 :         if (IS_ERR(src_iter)) {
     167           0 :                 ret = PTR_ERR(src_iter);
     168           0 :                 goto out_src_iter;
     169             :         }
     170             : 
     171           0 :         clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
     172           0 :         if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
     173           0 :                 ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
     174             : 
     175           0 :         if (!src_iter->ops->maps_tt)
     176           0 :                 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
     177           0 :         ttm_bo_move_sync_cleanup(bo, dst_mem);
     178             : 
     179             : out_src_iter:
     180           0 :         if (!dst_iter->ops->maps_tt)
     181           0 :                 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
     182             : 
     183             :         return ret;
     184             : }
     185             : EXPORT_SYMBOL(ttm_bo_move_memcpy);
     186             : 
     187           0 : static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
     188             : {
     189             :         struct ttm_transfer_obj *fbo;
     190             : 
     191           0 :         fbo = container_of(bo, struct ttm_transfer_obj, base);
     192           0 :         dma_resv_fini(&fbo->base.base._resv);
     193           0 :         ttm_bo_put(fbo->bo);
     194           0 :         kfree(fbo);
     195           0 : }
     196             : 
     197             : /**
     198             :  * ttm_buffer_object_transfer
     199             :  *
     200             :  * @bo: A pointer to a struct ttm_buffer_object.
     201             :  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
     202             :  * holding the data of @bo with the old placement.
     203             :  *
     204             :  * This is a utility function that may be called after an accelerated move
     205             :  * has been scheduled. A new buffer object is created as a placeholder for
     206             :  * the old data while it's being copied. When that buffer object is idle,
     207             :  * it can be destroyed, releasing the space of the old placement.
     208             :  * Returns:
     209             :  * !0: Failure.
     210             :  */
     211             : 
     212           0 : static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
     213             :                                       struct ttm_buffer_object **new_obj)
     214             : {
     215             :         struct ttm_transfer_obj *fbo;
     216             :         int ret;
     217             : 
     218           0 :         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
     219           0 :         if (!fbo)
     220             :                 return -ENOMEM;
     221             : 
     222           0 :         fbo->base = *bo;
     223             : 
     224             :         /**
     225             :          * Fix up members that we shouldn't copy directly:
     226             :          * TODO: Explicit member copy would probably be better here.
     227             :          */
     228             : 
     229           0 :         atomic_inc(&ttm_glob.bo_count);
     230           0 :         INIT_LIST_HEAD(&fbo->base.ddestroy);
     231           0 :         drm_vma_node_reset(&fbo->base.base.vma_node);
     232             : 
     233           0 :         kref_init(&fbo->base.kref);
     234           0 :         fbo->base.destroy = &ttm_transfered_destroy;
     235           0 :         fbo->base.pin_count = 0;
     236           0 :         if (bo->type != ttm_bo_type_sg)
     237           0 :                 fbo->base.base.resv = &fbo->base.base._resv;
     238             : 
     239           0 :         if (fbo->base.resource) {
     240           0 :                 ttm_resource_set_bo(fbo->base.resource, &fbo->base);
     241           0 :                 bo->resource = NULL;
     242           0 :                 ttm_bo_set_bulk_move(&fbo->base, NULL);
     243             :         } else {
     244           0 :                 fbo->base.bulk_move = NULL;
     245             :         }
     246             : 
     247           0 :         dma_resv_init(&fbo->base.base._resv);
     248           0 :         fbo->base.base.dev = NULL;
     249           0 :         ret = dma_resv_trylock(&fbo->base.base._resv);
     250           0 :         WARN_ON(!ret);
     251             : 
     252           0 :         ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
     253           0 :         if (ret) {
     254           0 :                 kfree(fbo);
     255           0 :                 return ret;
     256             :         }
     257             : 
     258           0 :         ttm_bo_get(bo);
     259           0 :         fbo->bo = bo;
     260             : 
     261           0 :         ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
     262             : 
     263           0 :         *new_obj = &fbo->base;
     264           0 :         return 0;
     265             : }
     266             : 
     267           0 : pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
     268             :                      pgprot_t tmp)
     269             : {
     270             :         struct ttm_resource_manager *man;
     271             :         enum ttm_caching caching;
     272             : 
     273           0 :         man = ttm_manager_type(bo->bdev, res->mem_type);
     274           0 :         caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
     275             : 
     276           0 :         return ttm_prot_from_caching(caching, tmp);
     277             : }
     278             : EXPORT_SYMBOL(ttm_io_prot);
     279             : 
     280           0 : static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
     281             :                           unsigned long offset,
     282             :                           unsigned long size,
     283             :                           struct ttm_bo_kmap_obj *map)
     284             : {
     285           0 :         struct ttm_resource *mem = bo->resource;
     286             : 
     287           0 :         if (bo->resource->bus.addr) {
     288           0 :                 map->bo_kmap_type = ttm_bo_map_premapped;
     289           0 :                 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
     290             :         } else {
     291           0 :                 resource_size_t res = bo->resource->bus.offset + offset;
     292             : 
     293           0 :                 map->bo_kmap_type = ttm_bo_map_iomap;
     294           0 :                 if (mem->bus.caching == ttm_write_combined)
     295           0 :                         map->virtual = ioremap_wc(res, size);
     296             : #ifdef CONFIG_X86
     297             :                 else if (mem->bus.caching == ttm_cached)
     298             :                         map->virtual = ioremap_cache(res, size);
     299             : #endif
     300             :                 else
     301           0 :                         map->virtual = ioremap(res, size);
     302             :         }
     303           0 :         return (!map->virtual) ? -ENOMEM : 0;
     304             : }
     305             : 
     306           0 : static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
     307             :                            unsigned long start_page,
     308             :                            unsigned long num_pages,
     309             :                            struct ttm_bo_kmap_obj *map)
     310             : {
     311           0 :         struct ttm_resource *mem = bo->resource;
     312           0 :         struct ttm_operation_ctx ctx = {
     313             :                 .interruptible = false,
     314             :                 .no_wait_gpu = false
     315             :         };
     316           0 :         struct ttm_tt *ttm = bo->ttm;
     317             :         pgprot_t prot;
     318             :         int ret;
     319             : 
     320           0 :         BUG_ON(!ttm);
     321             : 
     322           0 :         ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
     323           0 :         if (ret)
     324             :                 return ret;
     325             : 
     326           0 :         if (num_pages == 1 && ttm->caching == ttm_cached) {
     327             :                 /*
     328             :                  * We're mapping a single page, and the desired
     329             :                  * page protection is consistent with the bo.
     330             :                  */
     331             : 
     332           0 :                 map->bo_kmap_type = ttm_bo_map_kmap;
     333           0 :                 map->page = ttm->pages[start_page];
     334           0 :                 map->virtual = kmap(map->page);
     335             :         } else {
     336             :                 /*
     337             :                  * We need to use vmap to get the desired page protection
     338             :                  * or to make the buffer object look contiguous.
     339             :                  */
     340           0 :                 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
     341           0 :                 map->bo_kmap_type = ttm_bo_map_vmap;
     342           0 :                 map->virtual = vmap(ttm->pages + start_page, num_pages,
     343             :                                     0, prot);
     344             :         }
     345           0 :         return (!map->virtual) ? -ENOMEM : 0;
     346             : }
     347             : 
     348           0 : int ttm_bo_kmap(struct ttm_buffer_object *bo,
     349             :                 unsigned long start_page, unsigned long num_pages,
     350             :                 struct ttm_bo_kmap_obj *map)
     351             : {
     352             :         unsigned long offset, size;
     353             :         int ret;
     354             : 
     355           0 :         map->virtual = NULL;
     356           0 :         map->bo = bo;
     357           0 :         if (num_pages > bo->resource->num_pages)
     358             :                 return -EINVAL;
     359           0 :         if ((start_page + num_pages) > bo->resource->num_pages)
     360             :                 return -EINVAL;
     361             : 
     362           0 :         ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
     363           0 :         if (ret)
     364             :                 return ret;
     365           0 :         if (!bo->resource->bus.is_iomem) {
     366           0 :                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
     367             :         } else {
     368           0 :                 offset = start_page << PAGE_SHIFT;
     369           0 :                 size = num_pages << PAGE_SHIFT;
     370           0 :                 return ttm_bo_ioremap(bo, offset, size, map);
     371             :         }
     372             : }
     373             : EXPORT_SYMBOL(ttm_bo_kmap);
     374             : 
     375           0 : void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
     376             : {
     377           0 :         if (!map->virtual)
     378             :                 return;
     379           0 :         switch (map->bo_kmap_type) {
     380             :         case ttm_bo_map_iomap:
     381           0 :                 iounmap(map->virtual);
     382           0 :                 break;
     383             :         case ttm_bo_map_vmap:
     384           0 :                 vunmap(map->virtual);
     385           0 :                 break;
     386             :         case ttm_bo_map_kmap:
     387             :                 kunmap(map->page);
     388             :                 break;
     389             :         case ttm_bo_map_premapped:
     390             :                 break;
     391             :         default:
     392           0 :                 BUG();
     393             :         }
     394           0 :         ttm_mem_io_free(map->bo->bdev, map->bo->resource);
     395           0 :         map->virtual = NULL;
     396           0 :         map->page = NULL;
     397             : }
     398             : EXPORT_SYMBOL(ttm_bo_kunmap);
     399             : 
     400           0 : int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
     401             : {
     402           0 :         struct ttm_resource *mem = bo->resource;
     403             :         int ret;
     404             : 
     405           0 :         ret = ttm_mem_io_reserve(bo->bdev, mem);
     406           0 :         if (ret)
     407             :                 return ret;
     408             : 
     409           0 :         if (mem->bus.is_iomem) {
     410             :                 void __iomem *vaddr_iomem;
     411             : 
     412           0 :                 if (mem->bus.addr)
     413             :                         vaddr_iomem = (void __iomem *)mem->bus.addr;
     414           0 :                 else if (mem->bus.caching == ttm_write_combined)
     415           0 :                         vaddr_iomem = ioremap_wc(mem->bus.offset,
     416             :                                                  bo->base.size);
     417             : #ifdef CONFIG_X86
     418             :                 else if (mem->bus.caching == ttm_cached)
     419             :                         vaddr_iomem = ioremap_cache(mem->bus.offset,
     420             :                                                   bo->base.size);
     421             : #endif
     422             :                 else
     423           0 :                         vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
     424             : 
     425           0 :                 if (!vaddr_iomem)
     426             :                         return -ENOMEM;
     427             : 
     428             :                 iosys_map_set_vaddr_iomem(map, vaddr_iomem);
     429             : 
     430             :         } else {
     431           0 :                 struct ttm_operation_ctx ctx = {
     432             :                         .interruptible = false,
     433             :                         .no_wait_gpu = false
     434             :                 };
     435           0 :                 struct ttm_tt *ttm = bo->ttm;
     436             :                 pgprot_t prot;
     437             :                 void *vaddr;
     438             : 
     439           0 :                 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
     440           0 :                 if (ret)
     441           0 :                         return ret;
     442             : 
     443             :                 /*
     444             :                  * We need to use vmap to get the desired page protection
     445             :                  * or to make the buffer object look contiguous.
     446             :                  */
     447           0 :                 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
     448           0 :                 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
     449           0 :                 if (!vaddr)
     450             :                         return -ENOMEM;
     451             : 
     452           0 :                 iosys_map_set_vaddr(map, vaddr);
     453             :         }
     454             : 
     455             :         return 0;
     456             : }
     457             : EXPORT_SYMBOL(ttm_bo_vmap);
     458             : 
     459           0 : void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
     460             : {
     461           0 :         struct ttm_resource *mem = bo->resource;
     462             : 
     463           0 :         if (iosys_map_is_null(map))
     464             :                 return;
     465             : 
     466           0 :         if (!map->is_iomem)
     467           0 :                 vunmap(map->vaddr);
     468           0 :         else if (!mem->bus.addr)
     469           0 :                 iounmap(map->vaddr_iomem);
     470           0 :         iosys_map_clear(map);
     471             : 
     472           0 :         ttm_mem_io_free(bo->bdev, bo->resource);
     473             : }
     474             : EXPORT_SYMBOL(ttm_bo_vunmap);
     475             : 
     476           0 : static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
     477             :                                  bool dst_use_tt)
     478             : {
     479             :         int ret;
     480           0 :         ret = ttm_bo_wait(bo, false, false);
     481           0 :         if (ret)
     482             :                 return ret;
     483             : 
     484           0 :         if (!dst_use_tt)
     485           0 :                 ttm_bo_tt_destroy(bo);
     486           0 :         ttm_resource_free(bo, &bo->resource);
     487           0 :         return 0;
     488             : }
     489             : 
     490           0 : static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
     491             :                                 struct dma_fence *fence,
     492             :                                 bool dst_use_tt)
     493             : {
     494             :         struct ttm_buffer_object *ghost_obj;
     495             :         int ret;
     496             : 
     497             :         /**
     498             :          * This should help pipeline ordinary buffer moves.
     499             :          *
     500             :          * Hang old buffer memory on a new buffer object,
     501             :          * and leave it to be released when the GPU
     502             :          * operation has completed.
     503             :          */
     504             : 
     505           0 :         ret = ttm_buffer_object_transfer(bo, &ghost_obj);
     506           0 :         if (ret)
     507             :                 return ret;
     508             : 
     509           0 :         dma_resv_add_fence(&ghost_obj->base._resv, fence,
     510             :                            DMA_RESV_USAGE_KERNEL);
     511             : 
     512             :         /**
     513             :          * If we're not moving to fixed memory, the TTM object
     514             :          * needs to stay alive. Otherwhise hang it on the ghost
     515             :          * bo to be unbound and destroyed.
     516             :          */
     517             : 
     518           0 :         if (dst_use_tt)
     519           0 :                 ghost_obj->ttm = NULL;
     520             :         else
     521           0 :                 bo->ttm = NULL;
     522             : 
     523           0 :         dma_resv_unlock(&ghost_obj->base._resv);
     524           0 :         ttm_bo_put(ghost_obj);
     525           0 :         return 0;
     526             : }
     527             : 
     528           0 : static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
     529             :                                        struct dma_fence *fence)
     530             : {
     531           0 :         struct ttm_device *bdev = bo->bdev;
     532             :         struct ttm_resource_manager *from;
     533             : 
     534           0 :         from = ttm_manager_type(bdev, bo->resource->mem_type);
     535             : 
     536             :         /**
     537             :          * BO doesn't have a TTM we need to bind/unbind. Just remember
     538             :          * this eviction and free up the allocation
     539             :          */
     540           0 :         spin_lock(&from->move_lock);
     541           0 :         if (!from->move || dma_fence_is_later(fence, from->move)) {
     542           0 :                 dma_fence_put(from->move);
     543           0 :                 from->move = dma_fence_get(fence);
     544             :         }
     545           0 :         spin_unlock(&from->move_lock);
     546             : 
     547           0 :         ttm_resource_free(bo, &bo->resource);
     548           0 : }
     549             : 
     550           0 : int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
     551             :                               struct dma_fence *fence,
     552             :                               bool evict,
     553             :                               bool pipeline,
     554             :                               struct ttm_resource *new_mem)
     555             : {
     556           0 :         struct ttm_device *bdev = bo->bdev;
     557           0 :         struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
     558           0 :         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
     559           0 :         int ret = 0;
     560             : 
     561           0 :         dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
     562           0 :         if (!evict)
     563           0 :                 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
     564           0 :         else if (!from->use_tt && pipeline)
     565           0 :                 ttm_bo_move_pipeline_evict(bo, fence);
     566             :         else
     567           0 :                 ret = ttm_bo_wait_free_node(bo, man->use_tt);
     568             : 
     569           0 :         if (ret)
     570             :                 return ret;
     571             : 
     572           0 :         ttm_bo_assign_mem(bo, new_mem);
     573             : 
     574           0 :         return 0;
     575             : }
     576             : EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
     577             : 
     578           0 : void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
     579             :                               struct ttm_resource *new_mem)
     580             : {
     581           0 :         struct ttm_device *bdev = bo->bdev;
     582           0 :         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
     583             :         int ret;
     584             : 
     585           0 :         ret = ttm_bo_wait_free_node(bo, man->use_tt);
     586           0 :         if (WARN_ON(ret))
     587             :                 return;
     588             : 
     589           0 :         ttm_bo_assign_mem(bo, new_mem);
     590             : }
     591             : EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
     592             : 
     593             : /**
     594             :  * ttm_bo_pipeline_gutting - purge the contents of a bo
     595             :  * @bo: The buffer object
     596             :  *
     597             :  * Purge the contents of a bo, async if the bo is not idle.
     598             :  * After a successful call, the bo is left unpopulated in
     599             :  * system placement. The function may wait uninterruptible
     600             :  * for idle on OOM.
     601             :  *
     602             :  * Return: 0 if successful, negative error code on failure.
     603             :  */
     604           0 : int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
     605             : {
     606             :         static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
     607             :         struct ttm_buffer_object *ghost;
     608             :         struct ttm_resource *sys_res;
     609             :         struct ttm_tt *ttm;
     610             :         int ret;
     611             : 
     612           0 :         ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
     613           0 :         if (ret)
     614             :                 return ret;
     615             : 
     616             :         /* If already idle, no need for ghost object dance. */
     617           0 :         ret = ttm_bo_wait(bo, false, true);
     618           0 :         if (ret != -EBUSY) {
     619           0 :                 if (!bo->ttm) {
     620             :                         /* See comment below about clearing. */
     621           0 :                         ret = ttm_tt_create(bo, true);
     622           0 :                         if (ret)
     623             :                                 goto error_free_sys_mem;
     624             :                 } else {
     625           0 :                         ttm_tt_unpopulate(bo->bdev, bo->ttm);
     626           0 :                         if (bo->type == ttm_bo_type_device)
     627           0 :                                 ttm_tt_mark_for_clear(bo->ttm);
     628             :                 }
     629           0 :                 ttm_resource_free(bo, &bo->resource);
     630           0 :                 ttm_bo_assign_mem(bo, sys_res);
     631           0 :                 return 0;
     632             :         }
     633             : 
     634             :         /*
     635             :          * We need an unpopulated ttm_tt after giving our current one,
     636             :          * if any, to the ghost object. And we can't afford to fail
     637             :          * creating one *after* the operation. If the bo subsequently gets
     638             :          * resurrected, make sure it's cleared (if ttm_bo_type_device)
     639             :          * to avoid leaking sensitive information to user-space.
     640             :          */
     641             : 
     642           0 :         ttm = bo->ttm;
     643           0 :         bo->ttm = NULL;
     644           0 :         ret = ttm_tt_create(bo, true);
     645           0 :         swap(bo->ttm, ttm);
     646           0 :         if (ret)
     647             :                 goto error_free_sys_mem;
     648             : 
     649           0 :         ret = ttm_buffer_object_transfer(bo, &ghost);
     650           0 :         if (ret)
     651             :                 goto error_destroy_tt;
     652             : 
     653           0 :         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
     654             :         /* Last resort, wait for the BO to be idle when we are OOM */
     655           0 :         if (ret)
     656           0 :                 ttm_bo_wait(bo, false, false);
     657             : 
     658           0 :         dma_resv_unlock(&ghost->base._resv);
     659           0 :         ttm_bo_put(ghost);
     660           0 :         bo->ttm = ttm;
     661           0 :         ttm_bo_assign_mem(bo, sys_res);
     662           0 :         return 0;
     663             : 
     664             : error_destroy_tt:
     665           0 :         ttm_tt_destroy(bo->bdev, ttm);
     666             : 
     667             : error_free_sys_mem:
     668           0 :         ttm_resource_free(bo, &sys_res);
     669           0 :         return ret;
     670             : }

Generated by: LCOV version 1.14