LCOV - code coverage report
Current view: top level - block - blk-map.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 256 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 16 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to mapping data to requests
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/sched/task_stack.h>
       7             : #include <linux/module.h>
       8             : #include <linux/bio.h>
       9             : #include <linux/blkdev.h>
      10             : #include <linux/uio.h>
      11             : 
      12             : #include "blk.h"
      13             : 
      14             : struct bio_map_data {
      15             :         bool is_our_pages : 1;
      16             :         bool is_null_mapped : 1;
      17             :         struct iov_iter iter;
      18             :         struct iovec iov[];
      19             : };
      20             : 
      21           0 : static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
      22             :                                                gfp_t gfp_mask)
      23             : {
      24             :         struct bio_map_data *bmd;
      25             : 
      26           0 :         if (data->nr_segs > UIO_MAXIOV)
      27             :                 return NULL;
      28             : 
      29           0 :         bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
      30           0 :         if (!bmd)
      31             :                 return NULL;
      32           0 :         memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
      33           0 :         bmd->iter = *data;
      34           0 :         bmd->iter.iov = bmd->iov;
      35           0 :         return bmd;
      36             : }
      37             : 
      38             : /**
      39             :  * bio_copy_from_iter - copy all pages from iov_iter to bio
      40             :  * @bio: The &struct bio which describes the I/O as destination
      41             :  * @iter: iov_iter as source
      42             :  *
      43             :  * Copy all pages from iov_iter to bio.
      44             :  * Returns 0 on success, or error on failure.
      45             :  */
      46           0 : static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
      47             : {
      48             :         struct bio_vec *bvec;
      49             :         struct bvec_iter_all iter_all;
      50             : 
      51           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
      52             :                 ssize_t ret;
      53             : 
      54           0 :                 ret = copy_page_from_iter(bvec->bv_page,
      55           0 :                                           bvec->bv_offset,
      56           0 :                                           bvec->bv_len,
      57             :                                           iter);
      58             : 
      59           0 :                 if (!iov_iter_count(iter))
      60             :                         break;
      61             : 
      62           0 :                 if (ret < bvec->bv_len)
      63             :                         return -EFAULT;
      64             :         }
      65             : 
      66             :         return 0;
      67             : }
      68             : 
      69             : /**
      70             :  * bio_copy_to_iter - copy all pages from bio to iov_iter
      71             :  * @bio: The &struct bio which describes the I/O as source
      72             :  * @iter: iov_iter as destination
      73             :  *
      74             :  * Copy all pages from bio to iov_iter.
      75             :  * Returns 0 on success, or error on failure.
      76             :  */
      77           0 : static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
      78             : {
      79             :         struct bio_vec *bvec;
      80             :         struct bvec_iter_all iter_all;
      81             : 
      82           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
      83             :                 ssize_t ret;
      84             : 
      85           0 :                 ret = copy_page_to_iter(bvec->bv_page,
      86           0 :                                         bvec->bv_offset,
      87           0 :                                         bvec->bv_len,
      88             :                                         &iter);
      89             : 
      90           0 :                 if (!iov_iter_count(&iter))
      91             :                         break;
      92             : 
      93           0 :                 if (ret < bvec->bv_len)
      94             :                         return -EFAULT;
      95             :         }
      96             : 
      97             :         return 0;
      98             : }
      99             : 
     100             : /**
     101             :  *      bio_uncopy_user -       finish previously mapped bio
     102             :  *      @bio: bio being terminated
     103             :  *
     104             :  *      Free pages allocated from bio_copy_user_iov() and write back data
     105             :  *      to user space in case of a read.
     106             :  */
     107           0 : static int bio_uncopy_user(struct bio *bio)
     108             : {
     109           0 :         struct bio_map_data *bmd = bio->bi_private;
     110           0 :         int ret = 0;
     111             : 
     112           0 :         if (!bmd->is_null_mapped) {
     113             :                 /*
     114             :                  * if we're in a workqueue, the request is orphaned, so
     115             :                  * don't copy into a random user address space, just free
     116             :                  * and return -EINTR so user space doesn't expect any data.
     117             :                  */
     118           0 :                 if (!current->mm)
     119             :                         ret = -EINTR;
     120           0 :                 else if (bio_data_dir(bio) == READ)
     121           0 :                         ret = bio_copy_to_iter(bio, bmd->iter);
     122           0 :                 if (bmd->is_our_pages)
     123           0 :                         bio_free_pages(bio);
     124             :         }
     125           0 :         kfree(bmd);
     126           0 :         return ret;
     127             : }
     128             : 
     129           0 : static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
     130             :                 struct iov_iter *iter, gfp_t gfp_mask)
     131             : {
     132             :         struct bio_map_data *bmd;
     133             :         struct page *page;
     134             :         struct bio *bio;
     135           0 :         int i = 0, ret;
     136             :         int nr_pages;
     137           0 :         unsigned int len = iter->count;
     138           0 :         unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
     139             : 
     140           0 :         bmd = bio_alloc_map_data(iter, gfp_mask);
     141           0 :         if (!bmd)
     142             :                 return -ENOMEM;
     143             : 
     144             :         /*
     145             :          * We need to do a deep copy of the iov_iter including the iovecs.
     146             :          * The caller provided iov might point to an on-stack or otherwise
     147             :          * shortlived one.
     148             :          */
     149           0 :         bmd->is_our_pages = !map_data;
     150           0 :         bmd->is_null_mapped = (map_data && map_data->null_mapped);
     151             : 
     152           0 :         nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
     153             : 
     154           0 :         ret = -ENOMEM;
     155           0 :         bio = bio_kmalloc(gfp_mask, nr_pages);
     156           0 :         if (!bio)
     157             :                 goto out_bmd;
     158           0 :         bio->bi_opf |= req_op(rq);
     159             : 
     160           0 :         if (map_data) {
     161           0 :                 nr_pages = 1 << map_data->page_order;
     162           0 :                 i = map_data->offset / PAGE_SIZE;
     163             :         }
     164           0 :         while (len) {
     165           0 :                 unsigned int bytes = PAGE_SIZE;
     166             : 
     167           0 :                 bytes -= offset;
     168             : 
     169           0 :                 if (bytes > len)
     170           0 :                         bytes = len;
     171             : 
     172           0 :                 if (map_data) {
     173           0 :                         if (i == map_data->nr_entries * nr_pages) {
     174             :                                 ret = -ENOMEM;
     175             :                                 goto cleanup;
     176             :                         }
     177             : 
     178           0 :                         page = map_data->pages[i / nr_pages];
     179           0 :                         page += (i % nr_pages);
     180             : 
     181           0 :                         i++;
     182             :                 } else {
     183           0 :                         page = alloc_page(GFP_NOIO | gfp_mask);
     184           0 :                         if (!page) {
     185             :                                 ret = -ENOMEM;
     186             :                                 goto cleanup;
     187             :                         }
     188             :                 }
     189             : 
     190           0 :                 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
     191           0 :                         if (!map_data)
     192           0 :                                 __free_page(page);
     193             :                         break;
     194             :                 }
     195             : 
     196           0 :                 len -= bytes;
     197           0 :                 offset = 0;
     198             :         }
     199             : 
     200           0 :         if (map_data)
     201           0 :                 map_data->offset += bio->bi_iter.bi_size;
     202             : 
     203             :         /*
     204             :          * success
     205             :          */
     206           0 :         if ((iov_iter_rw(iter) == WRITE &&
     207           0 :              (!map_data || !map_data->null_mapped)) ||
     208           0 :             (map_data && map_data->from_user)) {
     209           0 :                 ret = bio_copy_from_iter(bio, iter);
     210           0 :                 if (ret)
     211             :                         goto cleanup;
     212             :         } else {
     213           0 :                 if (bmd->is_our_pages)
     214           0 :                         zero_fill_bio(bio);
     215           0 :                 iov_iter_advance(iter, bio->bi_iter.bi_size);
     216             :         }
     217             : 
     218           0 :         bio->bi_private = bmd;
     219             : 
     220           0 :         ret = blk_rq_append_bio(rq, bio);
     221           0 :         if (ret)
     222             :                 goto cleanup;
     223             :         return 0;
     224             : cleanup:
     225           0 :         if (!map_data)
     226           0 :                 bio_free_pages(bio);
     227           0 :         bio_put(bio);
     228             : out_bmd:
     229           0 :         kfree(bmd);
     230           0 :         return ret;
     231             : }
     232             : 
     233           0 : static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
     234             :                 gfp_t gfp_mask)
     235             : {
     236           0 :         unsigned int max_sectors = queue_max_hw_sectors(rq->q);
     237             :         struct bio *bio;
     238             :         int ret;
     239             :         int j;
     240             : 
     241           0 :         if (!iov_iter_count(iter))
     242             :                 return -EINVAL;
     243             : 
     244           0 :         bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
     245           0 :         if (!bio)
     246             :                 return -ENOMEM;
     247           0 :         bio->bi_opf |= req_op(rq);
     248             : 
     249           0 :         while (iov_iter_count(iter)) {
     250             :                 struct page **pages;
     251             :                 ssize_t bytes;
     252           0 :                 size_t offs, added = 0;
     253             :                 int npages;
     254             : 
     255           0 :                 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
     256           0 :                 if (unlikely(bytes <= 0)) {
     257           0 :                         ret = bytes ? bytes : -EFAULT;
     258           0 :                         goto out_unmap;
     259             :                 }
     260             : 
     261           0 :                 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
     262             : 
     263           0 :                 if (unlikely(offs & queue_dma_alignment(rq->q))) {
     264             :                         ret = -EINVAL;
     265             :                         j = 0;
     266             :                 } else {
     267           0 :                         for (j = 0; j < npages; j++) {
     268           0 :                                 struct page *page = pages[j];
     269           0 :                                 unsigned int n = PAGE_SIZE - offs;
     270           0 :                                 bool same_page = false;
     271             : 
     272           0 :                                 if (n > bytes)
     273           0 :                                         n = bytes;
     274             : 
     275           0 :                                 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
     276             :                                                      max_sectors, &same_page)) {
     277           0 :                                         if (same_page)
     278           0 :                                                 put_page(page);
     279           0 :                                         break;
     280             :                                 }
     281             : 
     282           0 :                                 added += n;
     283           0 :                                 bytes -= n;
     284           0 :                                 offs = 0;
     285             :                         }
     286           0 :                         iov_iter_advance(iter, added);
     287             :                 }
     288             :                 /*
     289             :                  * release the pages we didn't map into the bio, if any
     290             :                  */
     291           0 :                 while (j < npages)
     292           0 :                         put_page(pages[j++]);
     293           0 :                 kvfree(pages);
     294             :                 /* couldn't stuff something into bio? */
     295           0 :                 if (bytes)
     296             :                         break;
     297             :         }
     298             : 
     299           0 :         ret = blk_rq_append_bio(rq, bio);
     300           0 :         if (ret)
     301             :                 goto out_unmap;
     302             :         return 0;
     303             : 
     304             :  out_unmap:
     305           0 :         bio_release_pages(bio, false);
     306           0 :         bio_put(bio);
     307           0 :         return ret;
     308             : }
     309             : 
     310             : static void bio_invalidate_vmalloc_pages(struct bio *bio)
     311             : {
     312             : #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
     313             :         if (bio->bi_private && !op_is_write(bio_op(bio))) {
     314             :                 unsigned long i, len = 0;
     315             : 
     316             :                 for (i = 0; i < bio->bi_vcnt; i++)
     317             :                         len += bio->bi_io_vec[i].bv_len;
     318             :                 invalidate_kernel_vmap_range(bio->bi_private, len);
     319             :         }
     320             : #endif
     321             : }
     322             : 
     323           0 : static void bio_map_kern_endio(struct bio *bio)
     324             : {
     325           0 :         bio_invalidate_vmalloc_pages(bio);
     326           0 :         bio_put(bio);
     327           0 : }
     328             : 
     329             : /**
     330             :  *      bio_map_kern    -       map kernel address into bio
     331             :  *      @q: the struct request_queue for the bio
     332             :  *      @data: pointer to buffer to map
     333             :  *      @len: length in bytes
     334             :  *      @gfp_mask: allocation flags for bio allocation
     335             :  *
     336             :  *      Map the kernel address into a bio suitable for io to a block
     337             :  *      device. Returns an error pointer in case of error.
     338             :  */
     339           0 : static struct bio *bio_map_kern(struct request_queue *q, void *data,
     340             :                 unsigned int len, gfp_t gfp_mask)
     341             : {
     342           0 :         unsigned long kaddr = (unsigned long)data;
     343           0 :         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
     344           0 :         unsigned long start = kaddr >> PAGE_SHIFT;
     345           0 :         const int nr_pages = end - start;
     346           0 :         bool is_vmalloc = is_vmalloc_addr(data);
     347             :         struct page *page;
     348             :         int offset, i;
     349             :         struct bio *bio;
     350             : 
     351           0 :         bio = bio_kmalloc(gfp_mask, nr_pages);
     352           0 :         if (!bio)
     353             :                 return ERR_PTR(-ENOMEM);
     354             : 
     355           0 :         if (is_vmalloc) {
     356           0 :                 flush_kernel_vmap_range(data, len);
     357           0 :                 bio->bi_private = data;
     358             :         }
     359             : 
     360           0 :         offset = offset_in_page(kaddr);
     361           0 :         for (i = 0; i < nr_pages; i++) {
     362           0 :                 unsigned int bytes = PAGE_SIZE - offset;
     363             : 
     364           0 :                 if (len <= 0)
     365             :                         break;
     366             : 
     367           0 :                 if (bytes > len)
     368           0 :                         bytes = len;
     369             : 
     370           0 :                 if (!is_vmalloc)
     371           0 :                         page = virt_to_page(data);
     372             :                 else
     373           0 :                         page = vmalloc_to_page(data);
     374           0 :                 if (bio_add_pc_page(q, bio, page, bytes,
     375             :                                     offset) < bytes) {
     376             :                         /* we don't support partial mappings */
     377           0 :                         bio_put(bio);
     378           0 :                         return ERR_PTR(-EINVAL);
     379             :                 }
     380             : 
     381           0 :                 data += bytes;
     382           0 :                 len -= bytes;
     383           0 :                 offset = 0;
     384             :         }
     385             : 
     386           0 :         bio->bi_end_io = bio_map_kern_endio;
     387           0 :         return bio;
     388             : }
     389             : 
     390           0 : static void bio_copy_kern_endio(struct bio *bio)
     391             : {
     392           0 :         bio_free_pages(bio);
     393           0 :         bio_put(bio);
     394           0 : }
     395             : 
     396           0 : static void bio_copy_kern_endio_read(struct bio *bio)
     397             : {
     398           0 :         char *p = bio->bi_private;
     399             :         struct bio_vec *bvec;
     400             :         struct bvec_iter_all iter_all;
     401             : 
     402           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
     403           0 :                 memcpy_from_bvec(p, bvec);
     404           0 :                 p += bvec->bv_len;
     405             :         }
     406             : 
     407           0 :         bio_copy_kern_endio(bio);
     408           0 : }
     409             : 
     410             : /**
     411             :  *      bio_copy_kern   -       copy kernel address into bio
     412             :  *      @q: the struct request_queue for the bio
     413             :  *      @data: pointer to buffer to copy
     414             :  *      @len: length in bytes
     415             :  *      @gfp_mask: allocation flags for bio and page allocation
     416             :  *      @reading: data direction is READ
     417             :  *
     418             :  *      copy the kernel address into a bio suitable for io to a block
     419             :  *      device. Returns an error pointer in case of error.
     420             :  */
     421           0 : static struct bio *bio_copy_kern(struct request_queue *q, void *data,
     422             :                 unsigned int len, gfp_t gfp_mask, int reading)
     423             : {
     424           0 :         unsigned long kaddr = (unsigned long)data;
     425           0 :         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
     426           0 :         unsigned long start = kaddr >> PAGE_SHIFT;
     427             :         struct bio *bio;
     428           0 :         void *p = data;
     429           0 :         int nr_pages = 0;
     430             : 
     431             :         /*
     432             :          * Overflow, abort
     433             :          */
     434           0 :         if (end < start)
     435             :                 return ERR_PTR(-EINVAL);
     436             : 
     437           0 :         nr_pages = end - start;
     438           0 :         bio = bio_kmalloc(gfp_mask, nr_pages);
     439           0 :         if (!bio)
     440             :                 return ERR_PTR(-ENOMEM);
     441             : 
     442           0 :         while (len) {
     443             :                 struct page *page;
     444           0 :                 unsigned int bytes = PAGE_SIZE;
     445             : 
     446           0 :                 if (bytes > len)
     447           0 :                         bytes = len;
     448             : 
     449           0 :                 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
     450           0 :                 if (!page)
     451             :                         goto cleanup;
     452             : 
     453           0 :                 if (!reading)
     454           0 :                         memcpy(page_address(page), p, bytes);
     455             : 
     456           0 :                 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
     457             :                         break;
     458             : 
     459           0 :                 len -= bytes;
     460           0 :                 p += bytes;
     461             :         }
     462             : 
     463           0 :         if (reading) {
     464           0 :                 bio->bi_end_io = bio_copy_kern_endio_read;
     465           0 :                 bio->bi_private = data;
     466             :         } else {
     467           0 :                 bio->bi_end_io = bio_copy_kern_endio;
     468             :         }
     469             : 
     470             :         return bio;
     471             : 
     472             : cleanup:
     473           0 :         bio_free_pages(bio);
     474           0 :         bio_put(bio);
     475           0 :         return ERR_PTR(-ENOMEM);
     476             : }
     477             : 
     478             : /*
     479             :  * Append a bio to a passthrough request.  Only works if the bio can be merged
     480             :  * into the request based on the driver constraints.
     481             :  */
     482           0 : int blk_rq_append_bio(struct request *rq, struct bio *bio)
     483             : {
     484             :         struct bvec_iter iter;
     485             :         struct bio_vec bv;
     486           0 :         unsigned int nr_segs = 0;
     487             : 
     488           0 :         bio_for_each_bvec(bv, bio, iter)
     489           0 :                 nr_segs++;
     490             : 
     491           0 :         if (!rq->bio) {
     492             :                 blk_rq_bio_prep(rq, bio, nr_segs);
     493             :         } else {
     494           0 :                 if (!ll_back_merge_fn(rq, bio, nr_segs))
     495             :                         return -EINVAL;
     496           0 :                 rq->biotail->bi_next = bio;
     497           0 :                 rq->biotail = bio;
     498           0 :                 rq->__data_len += (bio)->bi_iter.bi_size;
     499           0 :                 bio_crypt_free_ctx(bio);
     500             :         }
     501             : 
     502             :         return 0;
     503             : }
     504             : EXPORT_SYMBOL(blk_rq_append_bio);
     505             : 
     506             : /**
     507             :  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
     508             :  * @q:          request queue where request should be inserted
     509             :  * @rq:         request to map data to
     510             :  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
     511             :  * @iter:       iovec iterator
     512             :  * @gfp_mask:   memory allocation flags
     513             :  *
     514             :  * Description:
     515             :  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
     516             :  *    a kernel bounce buffer is used.
     517             :  *
     518             :  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
     519             :  *    still in process context.
     520             :  */
     521           0 : int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
     522             :                         struct rq_map_data *map_data,
     523             :                         const struct iov_iter *iter, gfp_t gfp_mask)
     524             : {
     525           0 :         bool copy = false;
     526           0 :         unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
     527           0 :         struct bio *bio = NULL;
     528             :         struct iov_iter i;
     529           0 :         int ret = -EINVAL;
     530             : 
     531           0 :         if (!iter_is_iovec(iter))
     532             :                 goto fail;
     533             : 
     534           0 :         if (map_data)
     535             :                 copy = true;
     536           0 :         else if (blk_queue_may_bounce(q))
     537             :                 copy = true;
     538           0 :         else if (iov_iter_alignment(iter) & align)
     539             :                 copy = true;
     540           0 :         else if (queue_virt_boundary(q))
     541           0 :                 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
     542             : 
     543           0 :         i = *iter;
     544             :         do {
     545           0 :                 if (copy)
     546           0 :                         ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
     547             :                 else
     548           0 :                         ret = bio_map_user_iov(rq, &i, gfp_mask);
     549           0 :                 if (ret)
     550             :                         goto unmap_rq;
     551           0 :                 if (!bio)
     552           0 :                         bio = rq->bio;
     553           0 :         } while (iov_iter_count(&i));
     554             : 
     555             :         return 0;
     556             : 
     557             : unmap_rq:
     558           0 :         blk_rq_unmap_user(bio);
     559             : fail:
     560           0 :         rq->bio = NULL;
     561           0 :         return ret;
     562             : }
     563             : EXPORT_SYMBOL(blk_rq_map_user_iov);
     564             : 
     565           0 : int blk_rq_map_user(struct request_queue *q, struct request *rq,
     566             :                     struct rq_map_data *map_data, void __user *ubuf,
     567             :                     unsigned long len, gfp_t gfp_mask)
     568             : {
     569             :         struct iovec iov;
     570             :         struct iov_iter i;
     571           0 :         int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
     572             : 
     573           0 :         if (unlikely(ret < 0))
     574             :                 return ret;
     575             : 
     576           0 :         return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
     577             : }
     578             : EXPORT_SYMBOL(blk_rq_map_user);
     579             : 
     580             : /**
     581             :  * blk_rq_unmap_user - unmap a request with user data
     582             :  * @bio:               start of bio list
     583             :  *
     584             :  * Description:
     585             :  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
     586             :  *    supply the original rq->bio from the blk_rq_map_user() return, since
     587             :  *    the I/O completion may have changed rq->bio.
     588             :  */
     589           0 : int blk_rq_unmap_user(struct bio *bio)
     590             : {
     591             :         struct bio *next_bio;
     592           0 :         int ret = 0, ret2;
     593             : 
     594           0 :         while (bio) {
     595           0 :                 if (bio->bi_private) {
     596           0 :                         ret2 = bio_uncopy_user(bio);
     597           0 :                         if (ret2 && !ret)
     598           0 :                                 ret = ret2;
     599             :                 } else {
     600           0 :                         bio_release_pages(bio, bio_data_dir(bio) == READ);
     601             :                 }
     602             : 
     603           0 :                 next_bio = bio;
     604           0 :                 bio = bio->bi_next;
     605           0 :                 bio_put(next_bio);
     606             :         }
     607             : 
     608           0 :         return ret;
     609             : }
     610             : EXPORT_SYMBOL(blk_rq_unmap_user);
     611             : 
     612             : /**
     613             :  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
     614             :  * @q:          request queue where request should be inserted
     615             :  * @rq:         request to fill
     616             :  * @kbuf:       the kernel buffer
     617             :  * @len:        length of user data
     618             :  * @gfp_mask:   memory allocation flags
     619             :  *
     620             :  * Description:
     621             :  *    Data will be mapped directly if possible. Otherwise a bounce
     622             :  *    buffer is used. Can be called multiple times to append multiple
     623             :  *    buffers.
     624             :  */
     625           0 : int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
     626             :                     unsigned int len, gfp_t gfp_mask)
     627             : {
     628           0 :         int reading = rq_data_dir(rq) == READ;
     629           0 :         unsigned long addr = (unsigned long) kbuf;
     630             :         struct bio *bio;
     631             :         int ret;
     632             : 
     633           0 :         if (len > (queue_max_hw_sectors(q) << 9))
     634             :                 return -EINVAL;
     635           0 :         if (!len || !kbuf)
     636             :                 return -EINVAL;
     637             : 
     638           0 :         if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
     639           0 :             blk_queue_may_bounce(q))
     640           0 :                 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
     641             :         else
     642           0 :                 bio = bio_map_kern(q, kbuf, len, gfp_mask);
     643             : 
     644           0 :         if (IS_ERR(bio))
     645           0 :                 return PTR_ERR(bio);
     646             : 
     647           0 :         bio->bi_opf &= ~REQ_OP_MASK;
     648           0 :         bio->bi_opf |= req_op(rq);
     649             : 
     650           0 :         ret = blk_rq_append_bio(rq, bio);
     651           0 :         if (unlikely(ret))
     652           0 :                 bio_put(bio);
     653             :         return ret;
     654             : }
     655             : EXPORT_SYMBOL(blk_rq_map_kern);

Generated by: LCOV version 1.14