LCOV - code coverage report
Current view: top level - drivers/gpu/drm - drm_prime.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 290 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 27 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright © 2012 Red Hat
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice (including the next
      12             :  * paragraph) shall be included in all copies or substantial portions of the
      13             :  * Software.
      14             :  *
      15             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      16             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      17             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      18             :  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      19             :  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
      20             :  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
      21             :  * IN THE SOFTWARE.
      22             :  *
      23             :  * Authors:
      24             :  *      Dave Airlie <airlied@redhat.com>
      25             :  *      Rob Clark <rob.clark@linaro.org>
      26             :  *
      27             :  */
      28             : 
      29             : #include <linux/export.h>
      30             : #include <linux/dma-buf.h>
      31             : #include <linux/rbtree.h>
      32             : #include <linux/module.h>
      33             : 
      34             : #include <drm/drm.h>
      35             : #include <drm/drm_drv.h>
      36             : #include <drm/drm_file.h>
      37             : #include <drm/drm_framebuffer.h>
      38             : #include <drm/drm_gem.h>
      39             : #include <drm/drm_prime.h>
      40             : 
      41             : #include "drm_internal.h"
      42             : 
      43             : MODULE_IMPORT_NS(DMA_BUF);
      44             : 
      45             : /**
      46             :  * DOC: overview and lifetime rules
      47             :  *
      48             :  * Similar to GEM global names, PRIME file descriptors are also used to share
      49             :  * buffer objects across processes. They offer additional security: as file
      50             :  * descriptors must be explicitly sent over UNIX domain sockets to be shared
      51             :  * between applications, they can't be guessed like the globally unique GEM
      52             :  * names.
      53             :  *
      54             :  * Drivers that support the PRIME API implement the
      55             :  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
      56             :  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
      57             :  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
      58             :  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
      59             :  * and &drm_driver.gem_prime_import hooks.
      60             :  *
      61             :  * &dma_buf_ops implementations for GEM drivers are all individually exported
      62             :  * for drivers which need to overwrite or reimplement some of them.
      63             :  *
      64             :  * Reference Counting for GEM Drivers
      65             :  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      66             :  *
      67             :  * On the export the &dma_buf holds a reference to the exported buffer object,
      68             :  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
      69             :  * IOCTL, when it first calls &drm_gem_object_funcs.export
      70             :  * and stores the exporting GEM object in the &dma_buf.priv field. This
      71             :  * reference needs to be released when the final reference to the &dma_buf
      72             :  * itself is dropped and its &dma_buf_ops.release function is called.  For
      73             :  * GEM-based drivers, the &dma_buf should be exported using
      74             :  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
      75             :  *
      76             :  * Thus the chain of references always flows in one direction, avoiding loops:
      77             :  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
      78             :  * are the lookup caches for import and export. These are required to guarantee
      79             :  * that any given object will always have only one unique userspace handle. This
      80             :  * is required to allow userspace to detect duplicated imports, since some GEM
      81             :  * drivers do fail command submissions if a given buffer object is listed more
      82             :  * than once. These import and export caches in &drm_prime_file_private only
      83             :  * retain a weak reference, which is cleaned up when the corresponding object is
      84             :  * released.
      85             :  *
      86             :  * Self-importing: If userspace is using PRIME as a replacement for flink then
      87             :  * it will get a fd->handle request for a GEM object that it created.  Drivers
      88             :  * should detect this situation and return back the underlying object from the
      89             :  * dma-buf private. For GEM based drivers this is handled in
      90             :  * drm_gem_prime_import() already.
      91             :  */
      92             : 
      93             : struct drm_prime_member {
      94             :         struct dma_buf *dma_buf;
      95             :         uint32_t handle;
      96             : 
      97             :         struct rb_node dmabuf_rb;
      98             :         struct rb_node handle_rb;
      99             : };
     100             : 
     101           0 : static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
     102             :                                     struct dma_buf *dma_buf, uint32_t handle)
     103             : {
     104             :         struct drm_prime_member *member;
     105             :         struct rb_node **p, *rb;
     106             : 
     107           0 :         member = kmalloc(sizeof(*member), GFP_KERNEL);
     108           0 :         if (!member)
     109             :                 return -ENOMEM;
     110             : 
     111           0 :         get_dma_buf(dma_buf);
     112           0 :         member->dma_buf = dma_buf;
     113           0 :         member->handle = handle;
     114             : 
     115           0 :         rb = NULL;
     116           0 :         p = &prime_fpriv->dmabufs.rb_node;
     117           0 :         while (*p) {
     118             :                 struct drm_prime_member *pos;
     119             : 
     120           0 :                 rb = *p;
     121           0 :                 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     122           0 :                 if (dma_buf > pos->dma_buf)
     123           0 :                         p = &rb->rb_right;
     124             :                 else
     125           0 :                         p = &rb->rb_left;
     126             :         }
     127           0 :         rb_link_node(&member->dmabuf_rb, rb, p);
     128           0 :         rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
     129             : 
     130           0 :         rb = NULL;
     131           0 :         p = &prime_fpriv->handles.rb_node;
     132           0 :         while (*p) {
     133             :                 struct drm_prime_member *pos;
     134             : 
     135           0 :                 rb = *p;
     136           0 :                 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
     137           0 :                 if (handle > pos->handle)
     138           0 :                         p = &rb->rb_right;
     139             :                 else
     140           0 :                         p = &rb->rb_left;
     141             :         }
     142           0 :         rb_link_node(&member->handle_rb, rb, p);
     143           0 :         rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
     144             : 
     145           0 :         return 0;
     146             : }
     147             : 
     148             : static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
     149             :                                                       uint32_t handle)
     150             : {
     151             :         struct rb_node *rb;
     152             : 
     153           0 :         rb = prime_fpriv->handles.rb_node;
     154           0 :         while (rb) {
     155             :                 struct drm_prime_member *member;
     156             : 
     157           0 :                 member = rb_entry(rb, struct drm_prime_member, handle_rb);
     158           0 :                 if (member->handle == handle)
     159           0 :                         return member->dma_buf;
     160           0 :                 else if (member->handle < handle)
     161           0 :                         rb = rb->rb_right;
     162             :                 else
     163           0 :                         rb = rb->rb_left;
     164             :         }
     165             : 
     166             :         return NULL;
     167             : }
     168             : 
     169             : static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
     170             :                                        struct dma_buf *dma_buf,
     171             :                                        uint32_t *handle)
     172             : {
     173             :         struct rb_node *rb;
     174             : 
     175           0 :         rb = prime_fpriv->dmabufs.rb_node;
     176           0 :         while (rb) {
     177             :                 struct drm_prime_member *member;
     178             : 
     179           0 :                 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     180           0 :                 if (member->dma_buf == dma_buf) {
     181           0 :                         *handle = member->handle;
     182             :                         return 0;
     183           0 :                 } else if (member->dma_buf < dma_buf) {
     184           0 :                         rb = rb->rb_right;
     185             :                 } else {
     186           0 :                         rb = rb->rb_left;
     187             :                 }
     188             :         }
     189             : 
     190             :         return -ENOENT;
     191             : }
     192             : 
     193           0 : void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
     194             :                                         struct dma_buf *dma_buf)
     195             : {
     196             :         struct rb_node *rb;
     197             : 
     198           0 :         rb = prime_fpriv->dmabufs.rb_node;
     199           0 :         while (rb) {
     200             :                 struct drm_prime_member *member;
     201             : 
     202           0 :                 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     203           0 :                 if (member->dma_buf == dma_buf) {
     204           0 :                         rb_erase(&member->handle_rb, &prime_fpriv->handles);
     205           0 :                         rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
     206             : 
     207           0 :                         dma_buf_put(dma_buf);
     208           0 :                         kfree(member);
     209           0 :                         return;
     210           0 :                 } else if (member->dma_buf < dma_buf) {
     211           0 :                         rb = rb->rb_right;
     212             :                 } else {
     213           0 :                         rb = rb->rb_left;
     214             :                 }
     215             :         }
     216             : }
     217             : 
     218           0 : void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
     219             : {
     220           0 :         mutex_init(&prime_fpriv->lock);
     221           0 :         prime_fpriv->dmabufs = RB_ROOT;
     222           0 :         prime_fpriv->handles = RB_ROOT;
     223           0 : }
     224             : 
     225           0 : void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
     226             : {
     227             :         /* by now drm_gem_release should've made sure the list is empty */
     228           0 :         WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
     229           0 : }
     230             : 
     231             : /**
     232             :  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
     233             :  * @dev: parent device for the exported dmabuf
     234             :  * @exp_info: the export information used by dma_buf_export()
     235             :  *
     236             :  * This wraps dma_buf_export() for use by generic GEM drivers that are using
     237             :  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
     238             :  * a reference to the &drm_device and the exported &drm_gem_object (stored in
     239             :  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
     240             :  *
     241             :  * Returns the new dmabuf.
     242             :  */
     243           0 : struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
     244             :                                       struct dma_buf_export_info *exp_info)
     245             : {
     246           0 :         struct drm_gem_object *obj = exp_info->priv;
     247             :         struct dma_buf *dma_buf;
     248             : 
     249           0 :         dma_buf = dma_buf_export(exp_info);
     250           0 :         if (IS_ERR(dma_buf))
     251             :                 return dma_buf;
     252             : 
     253           0 :         drm_dev_get(dev);
     254           0 :         drm_gem_object_get(obj);
     255           0 :         dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
     256             : 
     257           0 :         return dma_buf;
     258             : }
     259             : EXPORT_SYMBOL(drm_gem_dmabuf_export);
     260             : 
     261             : /**
     262             :  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
     263             :  * @dma_buf: buffer to be released
     264             :  *
     265             :  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
     266             :  * must use this in their &dma_buf_ops structure as the release callback.
     267             :  * drm_gem_dmabuf_release() should be used in conjunction with
     268             :  * drm_gem_dmabuf_export().
     269             :  */
     270           0 : void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
     271             : {
     272           0 :         struct drm_gem_object *obj = dma_buf->priv;
     273           0 :         struct drm_device *dev = obj->dev;
     274             : 
     275             :         /* drop the reference on the export fd holds */
     276           0 :         drm_gem_object_put(obj);
     277             : 
     278           0 :         drm_dev_put(dev);
     279           0 : }
     280             : EXPORT_SYMBOL(drm_gem_dmabuf_release);
     281             : 
     282             : /**
     283             :  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
     284             :  * @dev: dev to export the buffer from
     285             :  * @file_priv: drm file-private structure
     286             :  * @prime_fd: fd id of the dma-buf which should be imported
     287             :  * @handle: pointer to storage for the handle of the imported buffer object
     288             :  *
     289             :  * This is the PRIME import function which must be used mandatorily by GEM
     290             :  * drivers to ensure correct lifetime management of the underlying GEM object.
     291             :  * The actual importing of GEM object from the dma-buf is done through the
     292             :  * &drm_driver.gem_prime_import driver callback.
     293             :  *
     294             :  * Returns 0 on success or a negative error code on failure.
     295             :  */
     296           0 : int drm_gem_prime_fd_to_handle(struct drm_device *dev,
     297             :                                struct drm_file *file_priv, int prime_fd,
     298             :                                uint32_t *handle)
     299             : {
     300             :         struct dma_buf *dma_buf;
     301             :         struct drm_gem_object *obj;
     302             :         int ret;
     303             : 
     304           0 :         dma_buf = dma_buf_get(prime_fd);
     305           0 :         if (IS_ERR(dma_buf))
     306           0 :                 return PTR_ERR(dma_buf);
     307             : 
     308           0 :         mutex_lock(&file_priv->prime.lock);
     309             : 
     310           0 :         ret = drm_prime_lookup_buf_handle(&file_priv->prime,
     311             :                         dma_buf, handle);
     312           0 :         if (ret == 0)
     313             :                 goto out_put;
     314             : 
     315             :         /* never seen this one, need to import */
     316           0 :         mutex_lock(&dev->object_name_lock);
     317           0 :         if (dev->driver->gem_prime_import)
     318           0 :                 obj = dev->driver->gem_prime_import(dev, dma_buf);
     319             :         else
     320           0 :                 obj = drm_gem_prime_import(dev, dma_buf);
     321           0 :         if (IS_ERR(obj)) {
     322           0 :                 ret = PTR_ERR(obj);
     323             :                 goto out_unlock;
     324             :         }
     325             : 
     326           0 :         if (obj->dma_buf) {
     327           0 :                 WARN_ON(obj->dma_buf != dma_buf);
     328             :         } else {
     329           0 :                 obj->dma_buf = dma_buf;
     330           0 :                 get_dma_buf(dma_buf);
     331             :         }
     332             : 
     333             :         /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
     334           0 :         ret = drm_gem_handle_create_tail(file_priv, obj, handle);
     335           0 :         drm_gem_object_put(obj);
     336           0 :         if (ret)
     337             :                 goto out_put;
     338             : 
     339           0 :         ret = drm_prime_add_buf_handle(&file_priv->prime,
     340             :                         dma_buf, *handle);
     341           0 :         mutex_unlock(&file_priv->prime.lock);
     342           0 :         if (ret)
     343             :                 goto fail;
     344             : 
     345           0 :         dma_buf_put(dma_buf);
     346             : 
     347           0 :         return 0;
     348             : 
     349             : fail:
     350             :         /* hmm, if driver attached, we are relying on the free-object path
     351             :          * to detach.. which seems ok..
     352             :          */
     353           0 :         drm_gem_handle_delete(file_priv, *handle);
     354           0 :         dma_buf_put(dma_buf);
     355           0 :         return ret;
     356             : 
     357             : out_unlock:
     358           0 :         mutex_unlock(&dev->object_name_lock);
     359             : out_put:
     360           0 :         mutex_unlock(&file_priv->prime.lock);
     361           0 :         dma_buf_put(dma_buf);
     362           0 :         return ret;
     363             : }
     364             : EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
     365             : 
     366           0 : int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
     367             :                                  struct drm_file *file_priv)
     368             : {
     369           0 :         struct drm_prime_handle *args = data;
     370             : 
     371           0 :         if (!dev->driver->prime_fd_to_handle)
     372             :                 return -ENOSYS;
     373             : 
     374           0 :         return dev->driver->prime_fd_to_handle(dev, file_priv,
     375           0 :                         args->fd, &args->handle);
     376             : }
     377             : 
     378           0 : static struct dma_buf *export_and_register_object(struct drm_device *dev,
     379             :                                                   struct drm_gem_object *obj,
     380             :                                                   uint32_t flags)
     381             : {
     382             :         struct dma_buf *dmabuf;
     383             : 
     384             :         /* prevent races with concurrent gem_close. */
     385           0 :         if (obj->handle_count == 0) {
     386             :                 dmabuf = ERR_PTR(-ENOENT);
     387             :                 return dmabuf;
     388             :         }
     389             : 
     390           0 :         if (obj->funcs && obj->funcs->export)
     391           0 :                 dmabuf = obj->funcs->export(obj, flags);
     392             :         else
     393           0 :                 dmabuf = drm_gem_prime_export(obj, flags);
     394           0 :         if (IS_ERR(dmabuf)) {
     395             :                 /* normally the created dma-buf takes ownership of the ref,
     396             :                  * but if that fails then drop the ref
     397             :                  */
     398             :                 return dmabuf;
     399             :         }
     400             : 
     401             :         /*
     402             :          * Note that callers do not need to clean up the export cache
     403             :          * since the check for obj->handle_count guarantees that someone
     404             :          * will clean it up.
     405             :          */
     406           0 :         obj->dma_buf = dmabuf;
     407           0 :         get_dma_buf(obj->dma_buf);
     408             : 
     409             :         return dmabuf;
     410             : }
     411             : 
     412             : /**
     413             :  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
     414             :  * @dev: dev to export the buffer from
     415             :  * @file_priv: drm file-private structure
     416             :  * @handle: buffer handle to export
     417             :  * @flags: flags like DRM_CLOEXEC
     418             :  * @prime_fd: pointer to storage for the fd id of the create dma-buf
     419             :  *
     420             :  * This is the PRIME export function which must be used mandatorily by GEM
     421             :  * drivers to ensure correct lifetime management of the underlying GEM object.
     422             :  * The actual exporting from GEM object to a dma-buf is done through the
     423             :  * &drm_gem_object_funcs.export callback.
     424             :  */
     425           0 : int drm_gem_prime_handle_to_fd(struct drm_device *dev,
     426             :                                struct drm_file *file_priv, uint32_t handle,
     427             :                                uint32_t flags,
     428             :                                int *prime_fd)
     429             : {
     430             :         struct drm_gem_object *obj;
     431           0 :         int ret = 0;
     432             :         struct dma_buf *dmabuf;
     433             : 
     434           0 :         mutex_lock(&file_priv->prime.lock);
     435           0 :         obj = drm_gem_object_lookup(file_priv, handle);
     436           0 :         if (!obj)  {
     437             :                 ret = -ENOENT;
     438             :                 goto out_unlock;
     439             :         }
     440             : 
     441           0 :         dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
     442           0 :         if (dmabuf) {
     443             :                 get_dma_buf(dmabuf);
     444             :                 goto out_have_handle;
     445             :         }
     446             : 
     447           0 :         mutex_lock(&dev->object_name_lock);
     448             :         /* re-export the original imported object */
     449           0 :         if (obj->import_attach) {
     450           0 :                 dmabuf = obj->import_attach->dmabuf;
     451           0 :                 get_dma_buf(dmabuf);
     452             :                 goto out_have_obj;
     453             :         }
     454             : 
     455           0 :         if (obj->dma_buf) {
     456           0 :                 get_dma_buf(obj->dma_buf);
     457           0 :                 dmabuf = obj->dma_buf;
     458           0 :                 goto out_have_obj;
     459             :         }
     460             : 
     461           0 :         dmabuf = export_and_register_object(dev, obj, flags);
     462           0 :         if (IS_ERR(dmabuf)) {
     463             :                 /* normally the created dma-buf takes ownership of the ref,
     464             :                  * but if that fails then drop the ref
     465             :                  */
     466           0 :                 ret = PTR_ERR(dmabuf);
     467           0 :                 mutex_unlock(&dev->object_name_lock);
     468           0 :                 goto out;
     469             :         }
     470             : 
     471             : out_have_obj:
     472             :         /*
     473             :          * If we've exported this buffer then cheat and add it to the import list
     474             :          * so we get the correct handle back. We must do this under the
     475             :          * protection of dev->object_name_lock to ensure that a racing gem close
     476             :          * ioctl doesn't miss to remove this buffer handle from the cache.
     477             :          */
     478           0 :         ret = drm_prime_add_buf_handle(&file_priv->prime,
     479             :                                        dmabuf, handle);
     480           0 :         mutex_unlock(&dev->object_name_lock);
     481           0 :         if (ret)
     482             :                 goto fail_put_dmabuf;
     483             : 
     484             : out_have_handle:
     485           0 :         ret = dma_buf_fd(dmabuf, flags);
     486             :         /*
     487             :          * We must _not_ remove the buffer from the handle cache since the newly
     488             :          * created dma buf is already linked in the global obj->dma_buf pointer,
     489             :          * and that is invariant as long as a userspace gem handle exists.
     490             :          * Closing the handle will clean out the cache anyway, so we don't leak.
     491             :          */
     492           0 :         if (ret < 0) {
     493             :                 goto fail_put_dmabuf;
     494             :         } else {
     495           0 :                 *prime_fd = ret;
     496           0 :                 ret = 0;
     497             :         }
     498             : 
     499           0 :         goto out;
     500             : 
     501             : fail_put_dmabuf:
     502           0 :         dma_buf_put(dmabuf);
     503             : out:
     504             :         drm_gem_object_put(obj);
     505             : out_unlock:
     506           0 :         mutex_unlock(&file_priv->prime.lock);
     507             : 
     508           0 :         return ret;
     509             : }
     510             : EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
     511             : 
     512           0 : int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
     513             :                                  struct drm_file *file_priv)
     514             : {
     515           0 :         struct drm_prime_handle *args = data;
     516             : 
     517           0 :         if (!dev->driver->prime_handle_to_fd)
     518             :                 return -ENOSYS;
     519             : 
     520             :         /* check flags are valid */
     521           0 :         if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
     522             :                 return -EINVAL;
     523             : 
     524           0 :         return dev->driver->prime_handle_to_fd(dev, file_priv,
     525           0 :                         args->handle, args->flags, &args->fd);
     526             : }
     527             : 
     528             : /**
     529             :  * DOC: PRIME Helpers
     530             :  *
     531             :  * Drivers can implement &drm_gem_object_funcs.export and
     532             :  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
     533             :  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
     534             :  * implement dma-buf support in terms of some lower-level helpers, which are
     535             :  * again exported for drivers to use individually:
     536             :  *
     537             :  * Exporting buffers
     538             :  * ~~~~~~~~~~~~~~~~~
     539             :  *
     540             :  * Optional pinning of buffers is handled at dma-buf attach and detach time in
     541             :  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
     542             :  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
     543             :  * &drm_gem_object_funcs.get_sg_table.
     544             :  *
     545             :  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
     546             :  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
     547             :  * drm_gem_dmabuf_mmap().
     548             :  *
     549             :  * Note that these export helpers can only be used if the underlying backing
     550             :  * storage is fully coherent and either permanently pinned, or it is safe to pin
     551             :  * it indefinitely.
     552             :  *
     553             :  * FIXME: The underlying helper functions are named rather inconsistently.
     554             :  *
     555             :  * Importing buffers
     556             :  * ~~~~~~~~~~~~~~~~~
     557             :  *
     558             :  * Importing dma-bufs using drm_gem_prime_import() relies on
     559             :  * &drm_driver.gem_prime_import_sg_table.
     560             :  *
     561             :  * Note that similarly to the export helpers this permanently pins the
     562             :  * underlying backing storage. Which is ok for scanout, but is not the best
     563             :  * option for sharing lots of buffers for rendering.
     564             :  */
     565             : 
     566             : /**
     567             :  * drm_gem_map_attach - dma_buf attach implementation for GEM
     568             :  * @dma_buf: buffer to attach device to
     569             :  * @attach: buffer attachment data
     570             :  *
     571             :  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
     572             :  * used as the &dma_buf_ops.attach callback. Must be used together with
     573             :  * drm_gem_map_detach().
     574             :  *
     575             :  * Returns 0 on success, negative error code on failure.
     576             :  */
     577           0 : int drm_gem_map_attach(struct dma_buf *dma_buf,
     578             :                        struct dma_buf_attachment *attach)
     579             : {
     580           0 :         struct drm_gem_object *obj = dma_buf->priv;
     581             : 
     582           0 :         return drm_gem_pin(obj);
     583             : }
     584             : EXPORT_SYMBOL(drm_gem_map_attach);
     585             : 
     586             : /**
     587             :  * drm_gem_map_detach - dma_buf detach implementation for GEM
     588             :  * @dma_buf: buffer to detach from
     589             :  * @attach: attachment to be detached
     590             :  *
     591             :  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
     592             :  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
     593             :  * &dma_buf_ops.detach callback.
     594             :  */
     595           0 : void drm_gem_map_detach(struct dma_buf *dma_buf,
     596             :                         struct dma_buf_attachment *attach)
     597             : {
     598           0 :         struct drm_gem_object *obj = dma_buf->priv;
     599             : 
     600           0 :         drm_gem_unpin(obj);
     601           0 : }
     602             : EXPORT_SYMBOL(drm_gem_map_detach);
     603             : 
     604             : /**
     605             :  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
     606             :  * @attach: attachment whose scatterlist is to be returned
     607             :  * @dir: direction of DMA transfer
     608             :  *
     609             :  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
     610             :  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
     611             :  * with drm_gem_unmap_dma_buf().
     612             :  *
     613             :  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
     614             :  * on error. May return -EINTR if it is interrupted by a signal.
     615             :  */
     616           0 : struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
     617             :                                      enum dma_data_direction dir)
     618             : {
     619           0 :         struct drm_gem_object *obj = attach->dmabuf->priv;
     620             :         struct sg_table *sgt;
     621             :         int ret;
     622             : 
     623           0 :         if (WARN_ON(dir == DMA_NONE))
     624             :                 return ERR_PTR(-EINVAL);
     625             : 
     626           0 :         if (WARN_ON(!obj->funcs->get_sg_table))
     627             :                 return ERR_PTR(-ENOSYS);
     628             : 
     629           0 :         sgt = obj->funcs->get_sg_table(obj);
     630           0 :         if (IS_ERR(sgt))
     631             :                 return sgt;
     632             : 
     633           0 :         ret = dma_map_sgtable(attach->dev, sgt, dir,
     634             :                               DMA_ATTR_SKIP_CPU_SYNC);
     635           0 :         if (ret) {
     636           0 :                 sg_free_table(sgt);
     637           0 :                 kfree(sgt);
     638           0 :                 sgt = ERR_PTR(ret);
     639             :         }
     640             : 
     641             :         return sgt;
     642             : }
     643             : EXPORT_SYMBOL(drm_gem_map_dma_buf);
     644             : 
     645             : /**
     646             :  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
     647             :  * @attach: attachment to unmap buffer from
     648             :  * @sgt: scatterlist info of the buffer to unmap
     649             :  * @dir: direction of DMA transfer
     650             :  *
     651             :  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
     652             :  */
     653           0 : void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
     654             :                            struct sg_table *sgt,
     655             :                            enum dma_data_direction dir)
     656             : {
     657           0 :         if (!sgt)
     658             :                 return;
     659             : 
     660           0 :         dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
     661           0 :         sg_free_table(sgt);
     662           0 :         kfree(sgt);
     663             : }
     664             : EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
     665             : 
     666             : /**
     667             :  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
     668             :  * @dma_buf: buffer to be mapped
     669             :  * @map: the virtual address of the buffer
     670             :  *
     671             :  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
     672             :  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
     673             :  * The kernel virtual address is returned in map.
     674             :  *
     675             :  * Returns 0 on success or a negative errno code otherwise.
     676             :  */
     677           0 : int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
     678             : {
     679           0 :         struct drm_gem_object *obj = dma_buf->priv;
     680             : 
     681           0 :         return drm_gem_vmap(obj, map);
     682             : }
     683             : EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
     684             : 
     685             : /**
     686             :  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
     687             :  * @dma_buf: buffer to be unmapped
     688             :  * @map: the virtual address of the buffer
     689             :  *
     690             :  * Releases a kernel virtual mapping. This can be used as the
     691             :  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
     692             :  */
     693           0 : void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
     694             : {
     695           0 :         struct drm_gem_object *obj = dma_buf->priv;
     696             : 
     697           0 :         drm_gem_vunmap(obj, map);
     698           0 : }
     699             : EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
     700             : 
     701             : /**
     702             :  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
     703             :  * @obj: GEM object
     704             :  * @vma: Virtual address range
     705             :  *
     706             :  * This function sets up a userspace mapping for PRIME exported buffers using
     707             :  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
     708             :  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
     709             :  * called to set up the mapping.
     710             :  *
     711             :  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
     712             :  */
     713           0 : int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
     714             : {
     715             :         struct drm_file *priv;
     716             :         struct file *fil;
     717             :         int ret;
     718             : 
     719             :         /* Add the fake offset */
     720           0 :         vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
     721             : 
     722           0 :         if (obj->funcs && obj->funcs->mmap) {
     723           0 :                 vma->vm_ops = obj->funcs->vm_ops;
     724             : 
     725           0 :                 drm_gem_object_get(obj);
     726           0 :                 ret = obj->funcs->mmap(obj, vma);
     727           0 :                 if (ret) {
     728             :                         drm_gem_object_put(obj);
     729             :                         return ret;
     730             :                 }
     731           0 :                 vma->vm_private_data = obj;
     732           0 :                 return 0;
     733             :         }
     734             : 
     735           0 :         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
     736           0 :         fil = kzalloc(sizeof(*fil), GFP_KERNEL);
     737           0 :         if (!priv || !fil) {
     738             :                 ret = -ENOMEM;
     739             :                 goto out;
     740             :         }
     741             : 
     742             :         /* Used by drm_gem_mmap() to lookup the GEM object */
     743           0 :         priv->minor = obj->dev->primary;
     744           0 :         fil->private_data = priv;
     745             : 
     746           0 :         ret = drm_vma_node_allow(&obj->vma_node, priv);
     747           0 :         if (ret)
     748             :                 goto out;
     749             : 
     750           0 :         ret = obj->dev->driver->fops->mmap(fil, vma);
     751             : 
     752           0 :         drm_vma_node_revoke(&obj->vma_node, priv);
     753             : out:
     754           0 :         kfree(priv);
     755           0 :         kfree(fil);
     756             : 
     757           0 :         return ret;
     758             : }
     759             : EXPORT_SYMBOL(drm_gem_prime_mmap);
     760             : 
     761             : /**
     762             :  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
     763             :  * @dma_buf: buffer to be mapped
     764             :  * @vma: virtual address range
     765             :  *
     766             :  * Provides memory mapping for the buffer. This can be used as the
     767             :  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
     768             :  * which should be set to drm_gem_prime_mmap().
     769             :  *
     770             :  * FIXME: There's really no point to this wrapper, drivers which need anything
     771             :  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
     772             :  *
     773             :  * Returns 0 on success or a negative error code on failure.
     774             :  */
     775           0 : int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
     776             : {
     777           0 :         struct drm_gem_object *obj = dma_buf->priv;
     778           0 :         struct drm_device *dev = obj->dev;
     779             : 
     780           0 :         if (!dev->driver->gem_prime_mmap)
     781             :                 return -ENOSYS;
     782             : 
     783           0 :         return dev->driver->gem_prime_mmap(obj, vma);
     784             : }
     785             : EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
     786             : 
     787             : static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
     788             :         .cache_sgt_mapping = true,
     789             :         .attach = drm_gem_map_attach,
     790             :         .detach = drm_gem_map_detach,
     791             :         .map_dma_buf = drm_gem_map_dma_buf,
     792             :         .unmap_dma_buf = drm_gem_unmap_dma_buf,
     793             :         .release = drm_gem_dmabuf_release,
     794             :         .mmap = drm_gem_dmabuf_mmap,
     795             :         .vmap = drm_gem_dmabuf_vmap,
     796             :         .vunmap = drm_gem_dmabuf_vunmap,
     797             : };
     798             : 
     799             : /**
     800             :  * drm_prime_pages_to_sg - converts a page array into an sg list
     801             :  * @dev: DRM device
     802             :  * @pages: pointer to the array of page pointers to convert
     803             :  * @nr_pages: length of the page vector
     804             :  *
     805             :  * This helper creates an sg table object from a set of pages
     806             :  * the driver is responsible for mapping the pages into the
     807             :  * importers address space for use with dma_buf itself.
     808             :  *
     809             :  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
     810             :  */
     811           0 : struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
     812             :                                        struct page **pages, unsigned int nr_pages)
     813             : {
     814             :         struct sg_table *sg;
     815           0 :         size_t max_segment = 0;
     816             :         int err;
     817             : 
     818           0 :         sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
     819           0 :         if (!sg)
     820             :                 return ERR_PTR(-ENOMEM);
     821             : 
     822           0 :         if (dev)
     823           0 :                 max_segment = dma_max_mapping_size(dev->dev);
     824           0 :         if (max_segment == 0)
     825           0 :                 max_segment = UINT_MAX;
     826           0 :         err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
     827           0 :                                                 nr_pages << PAGE_SHIFT,
     828             :                                                 max_segment, GFP_KERNEL);
     829           0 :         if (err) {
     830           0 :                 kfree(sg);
     831           0 :                 sg = ERR_PTR(err);
     832             :         }
     833             :         return sg;
     834             : }
     835             : EXPORT_SYMBOL(drm_prime_pages_to_sg);
     836             : 
     837             : /**
     838             :  * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
     839             :  * @sgt: sg_table describing the buffer to check
     840             :  *
     841             :  * This helper calculates the contiguous size in the DMA address space
     842             :  * of the the buffer described by the provided sg_table.
     843             :  *
     844             :  * This is useful for implementing
     845             :  * &drm_gem_object_funcs.gem_prime_import_sg_table.
     846             :  */
     847           0 : unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
     848             : {
     849           0 :         dma_addr_t expected = sg_dma_address(sgt->sgl);
     850             :         struct scatterlist *sg;
     851           0 :         unsigned long size = 0;
     852             :         int i;
     853             : 
     854           0 :         for_each_sgtable_dma_sg(sgt, sg, i) {
     855           0 :                 unsigned int len = sg_dma_len(sg);
     856             : 
     857           0 :                 if (!len)
     858             :                         break;
     859           0 :                 if (sg_dma_address(sg) != expected)
     860             :                         break;
     861           0 :                 expected += len;
     862           0 :                 size += len;
     863             :         }
     864           0 :         return size;
     865             : }
     866             : EXPORT_SYMBOL(drm_prime_get_contiguous_size);
     867             : 
     868             : /**
     869             :  * drm_gem_prime_export - helper library implementation of the export callback
     870             :  * @obj: GEM object to export
     871             :  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
     872             :  *
     873             :  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
     874             :  * using the PRIME helpers. It is used as the default in
     875             :  * drm_gem_prime_handle_to_fd().
     876             :  */
     877           0 : struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
     878             :                                      int flags)
     879             : {
     880           0 :         struct drm_device *dev = obj->dev;
     881           0 :         struct dma_buf_export_info exp_info = {
     882             :                 .exp_name = KBUILD_MODNAME, /* white lie for debug */
     883           0 :                 .owner = dev->driver->fops->owner,
     884             :                 .ops = &drm_gem_prime_dmabuf_ops,
     885           0 :                 .size = obj->size,
     886             :                 .flags = flags,
     887             :                 .priv = obj,
     888           0 :                 .resv = obj->resv,
     889             :         };
     890             : 
     891           0 :         return drm_gem_dmabuf_export(dev, &exp_info);
     892             : }
     893             : EXPORT_SYMBOL(drm_gem_prime_export);
     894             : 
     895             : /**
     896             :  * drm_gem_prime_import_dev - core implementation of the import callback
     897             :  * @dev: drm_device to import into
     898             :  * @dma_buf: dma-buf object to import
     899             :  * @attach_dev: struct device to dma_buf attach
     900             :  *
     901             :  * This is the core of drm_gem_prime_import(). It's designed to be called by
     902             :  * drivers who want to use a different device structure than &drm_device.dev for
     903             :  * attaching via dma_buf. This function calls
     904             :  * &drm_driver.gem_prime_import_sg_table internally.
     905             :  *
     906             :  * Drivers must arrange to call drm_prime_gem_destroy() from their
     907             :  * &drm_gem_object_funcs.free hook when using this function.
     908             :  */
     909           0 : struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
     910             :                                             struct dma_buf *dma_buf,
     911             :                                             struct device *attach_dev)
     912             : {
     913             :         struct dma_buf_attachment *attach;
     914             :         struct sg_table *sgt;
     915             :         struct drm_gem_object *obj;
     916             :         int ret;
     917             : 
     918           0 :         if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
     919           0 :                 obj = dma_buf->priv;
     920           0 :                 if (obj->dev == dev) {
     921             :                         /*
     922             :                          * Importing dmabuf exported from out own gem increases
     923             :                          * refcount on gem itself instead of f_count of dmabuf.
     924             :                          */
     925           0 :                         drm_gem_object_get(obj);
     926           0 :                         return obj;
     927             :                 }
     928             :         }
     929             : 
     930           0 :         if (!dev->driver->gem_prime_import_sg_table)
     931             :                 return ERR_PTR(-EINVAL);
     932             : 
     933           0 :         attach = dma_buf_attach(dma_buf, attach_dev);
     934           0 :         if (IS_ERR(attach))
     935             :                 return ERR_CAST(attach);
     936             : 
     937           0 :         get_dma_buf(dma_buf);
     938             : 
     939           0 :         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
     940           0 :         if (IS_ERR(sgt)) {
     941           0 :                 ret = PTR_ERR(sgt);
     942           0 :                 goto fail_detach;
     943             :         }
     944             : 
     945           0 :         obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
     946           0 :         if (IS_ERR(obj)) {
     947           0 :                 ret = PTR_ERR(obj);
     948             :                 goto fail_unmap;
     949             :         }
     950             : 
     951           0 :         obj->import_attach = attach;
     952           0 :         obj->resv = dma_buf->resv;
     953             : 
     954           0 :         return obj;
     955             : 
     956             : fail_unmap:
     957           0 :         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
     958             : fail_detach:
     959           0 :         dma_buf_detach(dma_buf, attach);
     960           0 :         dma_buf_put(dma_buf);
     961             : 
     962           0 :         return ERR_PTR(ret);
     963             : }
     964             : EXPORT_SYMBOL(drm_gem_prime_import_dev);
     965             : 
     966             : /**
     967             :  * drm_gem_prime_import - helper library implementation of the import callback
     968             :  * @dev: drm_device to import into
     969             :  * @dma_buf: dma-buf object to import
     970             :  *
     971             :  * This is the implementation of the gem_prime_import functions for GEM drivers
     972             :  * using the PRIME helpers. Drivers can use this as their
     973             :  * &drm_driver.gem_prime_import implementation. It is used as the default
     974             :  * implementation in drm_gem_prime_fd_to_handle().
     975             :  *
     976             :  * Drivers must arrange to call drm_prime_gem_destroy() from their
     977             :  * &drm_gem_object_funcs.free hook when using this function.
     978             :  */
     979           0 : struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
     980             :                                             struct dma_buf *dma_buf)
     981             : {
     982           0 :         return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
     983             : }
     984             : EXPORT_SYMBOL(drm_gem_prime_import);
     985             : 
     986             : /**
     987             :  * drm_prime_sg_to_page_array - convert an sg table into a page array
     988             :  * @sgt: scatter-gather table to convert
     989             :  * @pages: array of page pointers to store the pages in
     990             :  * @max_entries: size of the passed-in array
     991             :  *
     992             :  * Exports an sg table into an array of pages.
     993             :  *
     994             :  * This function is deprecated and strongly discouraged to be used.
     995             :  * The page array is only useful for page faults and those can corrupt fields
     996             :  * in the struct page if they are not handled by the exporting driver.
     997             :  */
     998           0 : int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
     999             :                                             struct page **pages,
    1000             :                                             int max_entries)
    1001             : {
    1002             :         struct sg_page_iter page_iter;
    1003           0 :         struct page **p = pages;
    1004             : 
    1005           0 :         for_each_sgtable_page(sgt, &page_iter, 0) {
    1006           0 :                 if (WARN_ON(p - pages >= max_entries))
    1007             :                         return -1;
    1008           0 :                 *p++ = sg_page_iter_page(&page_iter);
    1009             :         }
    1010             :         return 0;
    1011             : }
    1012             : EXPORT_SYMBOL(drm_prime_sg_to_page_array);
    1013             : 
    1014             : /**
    1015             :  * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
    1016             :  * @sgt: scatter-gather table to convert
    1017             :  * @addrs: array to store the dma bus address of each page
    1018             :  * @max_entries: size of both the passed-in arrays
    1019             :  *
    1020             :  * Exports an sg table into an array of addresses.
    1021             :  *
    1022             :  * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
    1023             :  * implementation.
    1024             :  */
    1025           0 : int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
    1026             :                                    int max_entries)
    1027             : {
    1028             :         struct sg_dma_page_iter dma_iter;
    1029           0 :         dma_addr_t *a = addrs;
    1030             : 
    1031           0 :         for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
    1032           0 :                 if (WARN_ON(a - addrs >= max_entries))
    1033             :                         return -1;
    1034           0 :                 *a++ = sg_page_iter_dma_address(&dma_iter);
    1035             :         }
    1036             :         return 0;
    1037             : }
    1038             : EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
    1039             : 
    1040             : /**
    1041             :  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
    1042             :  * @obj: GEM object which was created from a dma-buf
    1043             :  * @sg: the sg-table which was pinned at import time
    1044             :  *
    1045             :  * This is the cleanup functions which GEM drivers need to call when they use
    1046             :  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
    1047             :  */
    1048           0 : void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
    1049             : {
    1050             :         struct dma_buf_attachment *attach;
    1051             :         struct dma_buf *dma_buf;
    1052             : 
    1053           0 :         attach = obj->import_attach;
    1054           0 :         if (sg)
    1055           0 :                 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
    1056           0 :         dma_buf = attach->dmabuf;
    1057           0 :         dma_buf_detach(attach->dmabuf, attach);
    1058             :         /* remove the reference */
    1059           0 :         dma_buf_put(dma_buf);
    1060           0 : }
    1061             : EXPORT_SYMBOL(drm_prime_gem_destroy);

Generated by: LCOV version 1.14