LCOV - code coverage report
Current view: top level - block - blk-mq.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 8 1823 0.4 %
Date: 2022-12-09 01:23:36 Functions: 1 168 0.6 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Block multiqueue core code
       4             :  *
       5             :  * Copyright (C) 2013-2014 Jens Axboe
       6             :  * Copyright (C) 2013-2014 Christoph Hellwig
       7             :  */
       8             : #include <linux/kernel.h>
       9             : #include <linux/module.h>
      10             : #include <linux/backing-dev.h>
      11             : #include <linux/bio.h>
      12             : #include <linux/blkdev.h>
      13             : #include <linux/blk-integrity.h>
      14             : #include <linux/kmemleak.h>
      15             : #include <linux/mm.h>
      16             : #include <linux/init.h>
      17             : #include <linux/slab.h>
      18             : #include <linux/workqueue.h>
      19             : #include <linux/smp.h>
      20             : #include <linux/interrupt.h>
      21             : #include <linux/llist.h>
      22             : #include <linux/cpu.h>
      23             : #include <linux/cache.h>
      24             : #include <linux/sched/sysctl.h>
      25             : #include <linux/sched/topology.h>
      26             : #include <linux/sched/signal.h>
      27             : #include <linux/delay.h>
      28             : #include <linux/crash_dump.h>
      29             : #include <linux/prefetch.h>
      30             : #include <linux/blk-crypto.h>
      31             : #include <linux/part_stat.h>
      32             : 
      33             : #include <trace/events/block.h>
      34             : 
      35             : #include <linux/blk-mq.h>
      36             : #include <linux/t10-pi.h>
      37             : #include "blk.h"
      38             : #include "blk-mq.h"
      39             : #include "blk-mq-debugfs.h"
      40             : #include "blk-mq-tag.h"
      41             : #include "blk-pm.h"
      42             : #include "blk-stat.h"
      43             : #include "blk-mq-sched.h"
      44             : #include "blk-rq-qos.h"
      45             : 
      46             : static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
      47             : 
      48             : static void blk_mq_poll_stats_start(struct request_queue *q);
      49             : static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
      50             : 
      51           0 : static int blk_mq_poll_stats_bkt(const struct request *rq)
      52             : {
      53             :         int ddir, sectors, bucket;
      54             : 
      55           0 :         ddir = rq_data_dir(rq);
      56           0 :         sectors = blk_rq_stats_sectors(rq);
      57             : 
      58           0 :         bucket = ddir + 2 * ilog2(sectors);
      59             : 
      60           0 :         if (bucket < 0)
      61             :                 return -1;
      62           0 :         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
      63           0 :                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
      64             : 
      65             :         return bucket;
      66             : }
      67             : 
      68             : #define BLK_QC_T_SHIFT          16
      69             : #define BLK_QC_T_INTERNAL       (1U << 31)
      70             : 
      71             : static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
      72             :                 blk_qc_t qc)
      73             : {
      74           0 :         return xa_load(&q->hctx_table,
      75           0 :                         (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
      76             : }
      77             : 
      78             : static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
      79             :                 blk_qc_t qc)
      80             : {
      81           0 :         unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);
      82             : 
      83           0 :         if (qc & BLK_QC_T_INTERNAL)
      84           0 :                 return blk_mq_tag_to_rq(hctx->sched_tags, tag);
      85           0 :         return blk_mq_tag_to_rq(hctx->tags, tag);
      86             : }
      87             : 
      88             : static inline blk_qc_t blk_rq_to_qc(struct request *rq)
      89             : {
      90           0 :         return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
      91           0 :                 (rq->tag != -1 ?
      92           0 :                  rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
      93             : }
      94             : 
      95             : /*
      96             :  * Check if any of the ctx, dispatch list or elevator
      97             :  * have pending work in this hardware queue.
      98             :  */
      99           0 : static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
     100             : {
     101           0 :         return !list_empty_careful(&hctx->dispatch) ||
     102           0 :                 sbitmap_any_bit_set(&hctx->ctx_map) ||
     103           0 :                         blk_mq_sched_has_work(hctx);
     104             : }
     105             : 
     106             : /*
     107             :  * Mark this ctx as having pending work in this hardware queue
     108             :  */
     109           0 : static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
     110             :                                      struct blk_mq_ctx *ctx)
     111             : {
     112           0 :         const int bit = ctx->index_hw[hctx->type];
     113             : 
     114           0 :         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
     115           0 :                 sbitmap_set_bit(&hctx->ctx_map, bit);
     116           0 : }
     117             : 
     118             : static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
     119             :                                       struct blk_mq_ctx *ctx)
     120             : {
     121           0 :         const int bit = ctx->index_hw[hctx->type];
     122             : 
     123           0 :         sbitmap_clear_bit(&hctx->ctx_map, bit);
     124             : }
     125             : 
     126             : struct mq_inflight {
     127             :         struct block_device *part;
     128             :         unsigned int inflight[2];
     129             : };
     130             : 
     131           0 : static bool blk_mq_check_inflight(struct request *rq, void *priv,
     132             :                                   bool reserved)
     133             : {
     134           0 :         struct mq_inflight *mi = priv;
     135             : 
     136           0 :         if ((!mi->part->bd_partno || rq->part == mi->part) &&
     137           0 :             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
     138           0 :                 mi->inflight[rq_data_dir(rq)]++;
     139             : 
     140           0 :         return true;
     141             : }
     142             : 
     143           0 : unsigned int blk_mq_in_flight(struct request_queue *q,
     144             :                 struct block_device *part)
     145             : {
     146           0 :         struct mq_inflight mi = { .part = part };
     147             : 
     148           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
     149             : 
     150           0 :         return mi.inflight[0] + mi.inflight[1];
     151             : }
     152             : 
     153           0 : void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
     154             :                 unsigned int inflight[2])
     155             : {
     156           0 :         struct mq_inflight mi = { .part = part };
     157             : 
     158           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
     159           0 :         inflight[0] = mi.inflight[0];
     160           0 :         inflight[1] = mi.inflight[1];
     161           0 : }
     162             : 
     163           0 : void blk_freeze_queue_start(struct request_queue *q)
     164             : {
     165           0 :         mutex_lock(&q->mq_freeze_lock);
     166           0 :         if (++q->mq_freeze_depth == 1) {
     167           0 :                 percpu_ref_kill(&q->q_usage_counter);
     168           0 :                 mutex_unlock(&q->mq_freeze_lock);
     169           0 :                 if (queue_is_mq(q))
     170           0 :                         blk_mq_run_hw_queues(q, false);
     171             :         } else {
     172           0 :                 mutex_unlock(&q->mq_freeze_lock);
     173             :         }
     174           0 : }
     175             : EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
     176             : 
     177           0 : void blk_mq_freeze_queue_wait(struct request_queue *q)
     178             : {
     179           0 :         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
     180           0 : }
     181             : EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
     182             : 
     183           0 : int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
     184             :                                      unsigned long timeout)
     185             : {
     186           0 :         return wait_event_timeout(q->mq_freeze_wq,
     187             :                                         percpu_ref_is_zero(&q->q_usage_counter),
     188             :                                         timeout);
     189             : }
     190             : EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
     191             : 
     192             : /*
     193             :  * Guarantee no request is in use, so we can change any data structure of
     194             :  * the queue afterward.
     195             :  */
     196           0 : void blk_freeze_queue(struct request_queue *q)
     197             : {
     198             :         /*
     199             :          * In the !blk_mq case we are only calling this to kill the
     200             :          * q_usage_counter, otherwise this increases the freeze depth
     201             :          * and waits for it to return to zero.  For this reason there is
     202             :          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
     203             :          * exported to drivers as the only user for unfreeze is blk_mq.
     204             :          */
     205           0 :         blk_freeze_queue_start(q);
     206           0 :         blk_mq_freeze_queue_wait(q);
     207           0 : }
     208             : 
     209           0 : void blk_mq_freeze_queue(struct request_queue *q)
     210             : {
     211             :         /*
     212             :          * ...just an alias to keep freeze and unfreeze actions balanced
     213             :          * in the blk_mq_* namespace
     214             :          */
     215           0 :         blk_freeze_queue(q);
     216           0 : }
     217             : EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
     218             : 
     219           0 : void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
     220             : {
     221           0 :         mutex_lock(&q->mq_freeze_lock);
     222           0 :         if (force_atomic)
     223           0 :                 q->q_usage_counter.data->force_atomic = true;
     224           0 :         q->mq_freeze_depth--;
     225           0 :         WARN_ON_ONCE(q->mq_freeze_depth < 0);
     226           0 :         if (!q->mq_freeze_depth) {
     227           0 :                 percpu_ref_resurrect(&q->q_usage_counter);
     228           0 :                 wake_up_all(&q->mq_freeze_wq);
     229             :         }
     230           0 :         mutex_unlock(&q->mq_freeze_lock);
     231           0 : }
     232             : 
     233           0 : void blk_mq_unfreeze_queue(struct request_queue *q)
     234             : {
     235           0 :         __blk_mq_unfreeze_queue(q, false);
     236           0 : }
     237             : EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
     238             : 
     239             : /*
     240             :  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
     241             :  * mpt3sas driver such that this function can be removed.
     242             :  */
     243           0 : void blk_mq_quiesce_queue_nowait(struct request_queue *q)
     244             : {
     245             :         unsigned long flags;
     246             : 
     247           0 :         spin_lock_irqsave(&q->queue_lock, flags);
     248           0 :         if (!q->quiesce_depth++)
     249           0 :                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
     250           0 :         spin_unlock_irqrestore(&q->queue_lock, flags);
     251           0 : }
     252             : EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
     253             : 
     254             : /**
     255             :  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
     256             :  * @q: request queue.
     257             :  *
     258             :  * Note: it is driver's responsibility for making sure that quiesce has
     259             :  * been started.
     260             :  */
     261           0 : void blk_mq_wait_quiesce_done(struct request_queue *q)
     262             : {
     263           0 :         if (blk_queue_has_srcu(q))
     264           0 :                 synchronize_srcu(q->srcu);
     265             :         else
     266           0 :                 synchronize_rcu();
     267           0 : }
     268             : EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
     269             : 
     270             : /**
     271             :  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
     272             :  * @q: request queue.
     273             :  *
     274             :  * Note: this function does not prevent that the struct request end_io()
     275             :  * callback function is invoked. Once this function is returned, we make
     276             :  * sure no dispatch can happen until the queue is unquiesced via
     277             :  * blk_mq_unquiesce_queue().
     278             :  */
     279           0 : void blk_mq_quiesce_queue(struct request_queue *q)
     280             : {
     281           0 :         blk_mq_quiesce_queue_nowait(q);
     282           0 :         blk_mq_wait_quiesce_done(q);
     283           0 : }
     284             : EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
     285             : 
     286             : /*
     287             :  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
     288             :  * @q: request queue.
     289             :  *
     290             :  * This function recovers queue into the state before quiescing
     291             :  * which is done by blk_mq_quiesce_queue.
     292             :  */
     293           0 : void blk_mq_unquiesce_queue(struct request_queue *q)
     294             : {
     295             :         unsigned long flags;
     296           0 :         bool run_queue = false;
     297             : 
     298           0 :         spin_lock_irqsave(&q->queue_lock, flags);
     299           0 :         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
     300             :                 ;
     301           0 :         } else if (!--q->quiesce_depth) {
     302           0 :                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
     303           0 :                 run_queue = true;
     304             :         }
     305           0 :         spin_unlock_irqrestore(&q->queue_lock, flags);
     306             : 
     307             :         /* dispatch requests which are inserted during quiescing */
     308           0 :         if (run_queue)
     309           0 :                 blk_mq_run_hw_queues(q, true);
     310           0 : }
     311             : EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
     312             : 
     313           0 : void blk_mq_wake_waiters(struct request_queue *q)
     314             : {
     315             :         struct blk_mq_hw_ctx *hctx;
     316             :         unsigned long i;
     317             : 
     318           0 :         queue_for_each_hw_ctx(q, hctx, i)
     319           0 :                 if (blk_mq_hw_queue_mapped(hctx))
     320           0 :                         blk_mq_tag_wakeup_all(hctx->tags, true);
     321           0 : }
     322             : 
     323           0 : void blk_rq_init(struct request_queue *q, struct request *rq)
     324             : {
     325           0 :         memset(rq, 0, sizeof(*rq));
     326             : 
     327           0 :         INIT_LIST_HEAD(&rq->queuelist);
     328           0 :         rq->q = q;
     329           0 :         rq->__sector = (sector_t) -1;
     330           0 :         INIT_HLIST_NODE(&rq->hash);
     331           0 :         RB_CLEAR_NODE(&rq->rb_node);
     332           0 :         rq->tag = BLK_MQ_NO_TAG;
     333           0 :         rq->internal_tag = BLK_MQ_NO_TAG;
     334           0 :         rq->start_time_ns = ktime_get_ns();
     335           0 :         rq->part = NULL;
     336           0 :         blk_crypto_rq_set_defaults(rq);
     337           0 : }
     338             : EXPORT_SYMBOL(blk_rq_init);
     339             : 
     340           0 : static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
     341             :                 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
     342             : {
     343           0 :         struct blk_mq_ctx *ctx = data->ctx;
     344           0 :         struct blk_mq_hw_ctx *hctx = data->hctx;
     345           0 :         struct request_queue *q = data->q;
     346           0 :         struct request *rq = tags->static_rqs[tag];
     347             : 
     348           0 :         rq->q = q;
     349           0 :         rq->mq_ctx = ctx;
     350           0 :         rq->mq_hctx = hctx;
     351           0 :         rq->cmd_flags = data->cmd_flags;
     352             : 
     353           0 :         if (data->flags & BLK_MQ_REQ_PM)
     354           0 :                 data->rq_flags |= RQF_PM;
     355           0 :         if (blk_queue_io_stat(q))
     356           0 :                 data->rq_flags |= RQF_IO_STAT;
     357           0 :         rq->rq_flags = data->rq_flags;
     358             : 
     359           0 :         if (!(data->rq_flags & RQF_ELV)) {
     360           0 :                 rq->tag = tag;
     361           0 :                 rq->internal_tag = BLK_MQ_NO_TAG;
     362             :         } else {
     363           0 :                 rq->tag = BLK_MQ_NO_TAG;
     364           0 :                 rq->internal_tag = tag;
     365             :         }
     366           0 :         rq->timeout = 0;
     367             : 
     368           0 :         if (blk_mq_need_time_stamp(rq))
     369           0 :                 rq->start_time_ns = ktime_get_ns();
     370             :         else
     371           0 :                 rq->start_time_ns = 0;
     372           0 :         rq->part = NULL;
     373             : #ifdef CONFIG_BLK_RQ_ALLOC_TIME
     374             :         rq->alloc_time_ns = alloc_time_ns;
     375             : #endif
     376           0 :         rq->io_start_time_ns = 0;
     377           0 :         rq->stats_sectors = 0;
     378           0 :         rq->nr_phys_segments = 0;
     379             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
     380             :         rq->nr_integrity_segments = 0;
     381             : #endif
     382           0 :         rq->end_io = NULL;
     383           0 :         rq->end_io_data = NULL;
     384             : 
     385           0 :         blk_crypto_rq_set_defaults(rq);
     386           0 :         INIT_LIST_HEAD(&rq->queuelist);
     387             :         /* tag was already set */
     388           0 :         WRITE_ONCE(rq->deadline, 0);
     389           0 :         req_ref_set(rq, 1);
     390             : 
     391           0 :         if (rq->rq_flags & RQF_ELV) {
     392           0 :                 struct elevator_queue *e = data->q->elevator;
     393             : 
     394           0 :                 INIT_HLIST_NODE(&rq->hash);
     395           0 :                 RB_CLEAR_NODE(&rq->rb_node);
     396             : 
     397           0 :                 if (!op_is_flush(data->cmd_flags) &&
     398           0 :                     e->type->ops.prepare_request) {
     399           0 :                         e->type->ops.prepare_request(rq);
     400           0 :                         rq->rq_flags |= RQF_ELVPRIV;
     401             :                 }
     402             :         }
     403             : 
     404           0 :         return rq;
     405             : }
     406             : 
     407             : static inline struct request *
     408           0 : __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
     409             :                 u64 alloc_time_ns)
     410             : {
     411             :         unsigned int tag, tag_offset;
     412             :         struct blk_mq_tags *tags;
     413             :         struct request *rq;
     414             :         unsigned long tag_mask;
     415           0 :         int i, nr = 0;
     416             : 
     417           0 :         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
     418           0 :         if (unlikely(!tag_mask))
     419             :                 return NULL;
     420             : 
     421           0 :         tags = blk_mq_tags_from_data(data);
     422           0 :         for (i = 0; tag_mask; i++) {
     423           0 :                 if (!(tag_mask & (1UL << i)))
     424           0 :                         continue;
     425           0 :                 tag = tag_offset + i;
     426           0 :                 prefetch(tags->static_rqs[tag]);
     427           0 :                 tag_mask &= ~(1UL << i);
     428           0 :                 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
     429           0 :                 rq_list_add(data->cached_rq, rq);
     430           0 :                 nr++;
     431             :         }
     432             :         /* caller already holds a reference, add for remainder */
     433           0 :         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
     434           0 :         data->nr_tags -= nr;
     435             : 
     436           0 :         return rq_list_pop(data->cached_rq);
     437             : }
     438             : 
     439           0 : static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
     440             : {
     441           0 :         struct request_queue *q = data->q;
     442           0 :         u64 alloc_time_ns = 0;
     443             :         struct request *rq;
     444             :         unsigned int tag;
     445             : 
     446             :         /* alloc_time includes depth and tag waits */
     447             :         if (blk_queue_rq_alloc_time(q))
     448             :                 alloc_time_ns = ktime_get_ns();
     449             : 
     450           0 :         if (data->cmd_flags & REQ_NOWAIT)
     451           0 :                 data->flags |= BLK_MQ_REQ_NOWAIT;
     452             : 
     453           0 :         if (q->elevator) {
     454           0 :                 struct elevator_queue *e = q->elevator;
     455             : 
     456           0 :                 data->rq_flags |= RQF_ELV;
     457             : 
     458             :                 /*
     459             :                  * Flush/passthrough requests are special and go directly to the
     460             :                  * dispatch list. Don't include reserved tags in the
     461             :                  * limiting, as it isn't useful.
     462             :                  */
     463           0 :                 if (!op_is_flush(data->cmd_flags) &&
     464           0 :                     !blk_op_is_passthrough(data->cmd_flags) &&
     465           0 :                     e->type->ops.limit_depth &&
     466           0 :                     !(data->flags & BLK_MQ_REQ_RESERVED))
     467           0 :                         e->type->ops.limit_depth(data->cmd_flags, data);
     468             :         }
     469             : 
     470             : retry:
     471           0 :         data->ctx = blk_mq_get_ctx(q);
     472           0 :         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
     473           0 :         if (!(data->rq_flags & RQF_ELV))
     474           0 :                 blk_mq_tag_busy(data->hctx);
     475             : 
     476             :         /*
     477             :          * Try batched alloc if we want more than 1 tag.
     478             :          */
     479           0 :         if (data->nr_tags > 1) {
     480           0 :                 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
     481           0 :                 if (rq)
     482             :                         return rq;
     483           0 :                 data->nr_tags = 1;
     484             :         }
     485             : 
     486             :         /*
     487             :          * Waiting allocations only fail because of an inactive hctx.  In that
     488             :          * case just retry the hctx assignment and tag allocation as CPU hotplug
     489             :          * should have migrated us to an online CPU by now.
     490             :          */
     491           0 :         tag = blk_mq_get_tag(data);
     492           0 :         if (tag == BLK_MQ_NO_TAG) {
     493           0 :                 if (data->flags & BLK_MQ_REQ_NOWAIT)
     494             :                         return NULL;
     495             :                 /*
     496             :                  * Give up the CPU and sleep for a random short time to
     497             :                  * ensure that thread using a realtime scheduling class
     498             :                  * are migrated off the CPU, and thus off the hctx that
     499             :                  * is going away.
     500             :                  */
     501           0 :                 msleep(3);
     502           0 :                 goto retry;
     503             :         }
     504             : 
     505           0 :         return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
     506             :                                         alloc_time_ns);
     507             : }
     508             : 
     509           0 : struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
     510             :                 blk_mq_req_flags_t flags)
     511             : {
     512           0 :         struct blk_mq_alloc_data data = {
     513             :                 .q              = q,
     514             :                 .flags          = flags,
     515             :                 .cmd_flags      = op,
     516             :                 .nr_tags        = 1,
     517             :         };
     518             :         struct request *rq;
     519             :         int ret;
     520             : 
     521           0 :         ret = blk_queue_enter(q, flags);
     522           0 :         if (ret)
     523           0 :                 return ERR_PTR(ret);
     524             : 
     525           0 :         rq = __blk_mq_alloc_requests(&data);
     526           0 :         if (!rq)
     527             :                 goto out_queue_exit;
     528           0 :         rq->__data_len = 0;
     529           0 :         rq->__sector = (sector_t) -1;
     530           0 :         rq->bio = rq->biotail = NULL;
     531           0 :         return rq;
     532             : out_queue_exit:
     533           0 :         blk_queue_exit(q);
     534           0 :         return ERR_PTR(-EWOULDBLOCK);
     535             : }
     536             : EXPORT_SYMBOL(blk_mq_alloc_request);
     537             : 
     538           0 : struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
     539             :         unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
     540             : {
     541           0 :         struct blk_mq_alloc_data data = {
     542             :                 .q              = q,
     543             :                 .flags          = flags,
     544             :                 .cmd_flags      = op,
     545             :                 .nr_tags        = 1,
     546             :         };
     547           0 :         u64 alloc_time_ns = 0;
     548             :         unsigned int cpu;
     549             :         unsigned int tag;
     550             :         int ret;
     551             : 
     552             :         /* alloc_time includes depth and tag waits */
     553             :         if (blk_queue_rq_alloc_time(q))
     554             :                 alloc_time_ns = ktime_get_ns();
     555             : 
     556             :         /*
     557             :          * If the tag allocator sleeps we could get an allocation for a
     558             :          * different hardware context.  No need to complicate the low level
     559             :          * allocator for this for the rare use case of a command tied to
     560             :          * a specific queue.
     561             :          */
     562           0 :         if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
     563             :                 return ERR_PTR(-EINVAL);
     564             : 
     565           0 :         if (hctx_idx >= q->nr_hw_queues)
     566             :                 return ERR_PTR(-EIO);
     567             : 
     568           0 :         ret = blk_queue_enter(q, flags);
     569           0 :         if (ret)
     570           0 :                 return ERR_PTR(ret);
     571             : 
     572             :         /*
     573             :          * Check if the hardware context is actually mapped to anything.
     574             :          * If not tell the caller that it should skip this queue.
     575             :          */
     576           0 :         ret = -EXDEV;
     577           0 :         data.hctx = xa_load(&q->hctx_table, hctx_idx);
     578           0 :         if (!blk_mq_hw_queue_mapped(data.hctx))
     579             :                 goto out_queue_exit;
     580           0 :         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
     581           0 :         data.ctx = __blk_mq_get_ctx(q, cpu);
     582             : 
     583           0 :         if (!q->elevator)
     584           0 :                 blk_mq_tag_busy(data.hctx);
     585             :         else
     586           0 :                 data.rq_flags |= RQF_ELV;
     587             : 
     588           0 :         ret = -EWOULDBLOCK;
     589           0 :         tag = blk_mq_get_tag(&data);
     590           0 :         if (tag == BLK_MQ_NO_TAG)
     591             :                 goto out_queue_exit;
     592           0 :         return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
     593             :                                         alloc_time_ns);
     594             : 
     595             : out_queue_exit:
     596           0 :         blk_queue_exit(q);
     597           0 :         return ERR_PTR(ret);
     598             : }
     599             : EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
     600             : 
     601           0 : static void __blk_mq_free_request(struct request *rq)
     602             : {
     603           0 :         struct request_queue *q = rq->q;
     604           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
     605           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     606           0 :         const int sched_tag = rq->internal_tag;
     607             : 
     608           0 :         blk_crypto_free_request(rq);
     609           0 :         blk_pm_mark_last_busy(rq);
     610           0 :         rq->mq_hctx = NULL;
     611           0 :         if (rq->tag != BLK_MQ_NO_TAG)
     612           0 :                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
     613           0 :         if (sched_tag != BLK_MQ_NO_TAG)
     614           0 :                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
     615           0 :         blk_mq_sched_restart(hctx);
     616           0 :         blk_queue_exit(q);
     617           0 : }
     618             : 
     619           0 : void blk_mq_free_request(struct request *rq)
     620             : {
     621           0 :         struct request_queue *q = rq->q;
     622           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     623             : 
     624           0 :         if ((rq->rq_flags & RQF_ELVPRIV) &&
     625           0 :             q->elevator->type->ops.finish_request)
     626           0 :                 q->elevator->type->ops.finish_request(rq);
     627             : 
     628           0 :         if (rq->rq_flags & RQF_MQ_INFLIGHT)
     629             :                 __blk_mq_dec_active_requests(hctx);
     630             : 
     631           0 :         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
     632           0 :                 laptop_io_completion(q->disk->bdi);
     633             : 
     634           0 :         rq_qos_done(q, rq);
     635             : 
     636           0 :         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
     637           0 :         if (req_ref_put_and_test(rq))
     638           0 :                 __blk_mq_free_request(rq);
     639           0 : }
     640             : EXPORT_SYMBOL_GPL(blk_mq_free_request);
     641             : 
     642           0 : void blk_mq_free_plug_rqs(struct blk_plug *plug)
     643             : {
     644             :         struct request *rq;
     645             : 
     646           0 :         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
     647           0 :                 blk_mq_free_request(rq);
     648           0 : }
     649             : 
     650           0 : void blk_dump_rq_flags(struct request *rq, char *msg)
     651             : {
     652           0 :         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
     653             :                 rq->q->disk ? rq->q->disk->disk_name : "?",
     654             :                 (unsigned long long) rq->cmd_flags);
     655             : 
     656           0 :         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
     657             :                (unsigned long long)blk_rq_pos(rq),
     658             :                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
     659           0 :         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
     660             :                rq->bio, rq->biotail, blk_rq_bytes(rq));
     661           0 : }
     662             : EXPORT_SYMBOL(blk_dump_rq_flags);
     663             : 
     664           0 : static void req_bio_endio(struct request *rq, struct bio *bio,
     665             :                           unsigned int nbytes, blk_status_t error)
     666             : {
     667           0 :         if (unlikely(error)) {
     668           0 :                 bio->bi_status = error;
     669           0 :         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
     670             :                 /*
     671             :                  * Partial zone append completions cannot be supported as the
     672             :                  * BIO fragments may end up not being written sequentially.
     673             :                  */
     674           0 :                 if (bio->bi_iter.bi_size != nbytes)
     675           0 :                         bio->bi_status = BLK_STS_IOERR;
     676             :                 else
     677           0 :                         bio->bi_iter.bi_sector = rq->__sector;
     678             :         }
     679             : 
     680           0 :         bio_advance(bio, nbytes);
     681             : 
     682           0 :         if (unlikely(rq->rq_flags & RQF_QUIET))
     683             :                 bio_set_flag(bio, BIO_QUIET);
     684             :         /* don't actually finish bio if it's part of flush sequence */
     685           0 :         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
     686           0 :                 bio_endio(bio);
     687           0 : }
     688             : 
     689           0 : static void blk_account_io_completion(struct request *req, unsigned int bytes)
     690             : {
     691           0 :         if (req->part && blk_do_io_stat(req)) {
     692           0 :                 const int sgrp = op_stat_group(req_op(req));
     693             : 
     694           0 :                 part_stat_lock();
     695           0 :                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
     696           0 :                 part_stat_unlock();
     697             :         }
     698           0 : }
     699             : 
     700           0 : static void blk_print_req_error(struct request *req, blk_status_t status)
     701             : {
     702           0 :         printk_ratelimited(KERN_ERR
     703             :                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
     704             :                 "phys_seg %u prio class %u\n",
     705             :                 blk_status_to_str(status),
     706             :                 req->q->disk ? req->q->disk->disk_name : "?",
     707             :                 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
     708             :                 req->cmd_flags & ~REQ_OP_MASK,
     709             :                 req->nr_phys_segments,
     710             :                 IOPRIO_PRIO_CLASS(req->ioprio));
     711           0 : }
     712             : 
     713             : /*
     714             :  * Fully end IO on a request. Does not support partial completions, or
     715             :  * errors.
     716             :  */
     717           0 : static void blk_complete_request(struct request *req)
     718             : {
     719           0 :         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
     720           0 :         int total_bytes = blk_rq_bytes(req);
     721           0 :         struct bio *bio = req->bio;
     722             : 
     723           0 :         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
     724             : 
     725           0 :         if (!bio)
     726             :                 return;
     727             : 
     728             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     729             :         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
     730             :                 req->q->integrity.profile->complete_fn(req, total_bytes);
     731             : #endif
     732             : 
     733           0 :         blk_account_io_completion(req, total_bytes);
     734             : 
     735             :         do {
     736           0 :                 struct bio *next = bio->bi_next;
     737             : 
     738             :                 /* Completion has already been traced */
     739           0 :                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
     740             : 
     741           0 :                 if (req_op(req) == REQ_OP_ZONE_APPEND)
     742           0 :                         bio->bi_iter.bi_sector = req->__sector;
     743             : 
     744           0 :                 if (!is_flush)
     745           0 :                         bio_endio(bio);
     746           0 :                 bio = next;
     747           0 :         } while (bio);
     748             : 
     749             :         /*
     750             :          * Reset counters so that the request stacking driver
     751             :          * can find how many bytes remain in the request
     752             :          * later.
     753             :          */
     754           0 :         req->bio = NULL;
     755           0 :         req->__data_len = 0;
     756             : }
     757             : 
     758             : /**
     759             :  * blk_update_request - Complete multiple bytes without completing the request
     760             :  * @req:      the request being processed
     761             :  * @error:    block status code
     762             :  * @nr_bytes: number of bytes to complete for @req
     763             :  *
     764             :  * Description:
     765             :  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
     766             :  *     the request structure even if @req doesn't have leftover.
     767             :  *     If @req has leftover, sets it up for the next range of segments.
     768             :  *
     769             :  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
     770             :  *     %false return from this function.
     771             :  *
     772             :  * Note:
     773             :  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
     774             :  *      except in the consistency check at the end of this function.
     775             :  *
     776             :  * Return:
     777             :  *     %false - this request doesn't have any more data
     778             :  *     %true  - this request has more data
     779             :  **/
     780           0 : bool blk_update_request(struct request *req, blk_status_t error,
     781             :                 unsigned int nr_bytes)
     782             : {
     783             :         int total_bytes;
     784             : 
     785           0 :         trace_block_rq_complete(req, error, nr_bytes);
     786             : 
     787           0 :         if (!req->bio)
     788             :                 return false;
     789             : 
     790             : #ifdef CONFIG_BLK_DEV_INTEGRITY
     791             :         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
     792             :             error == BLK_STS_OK)
     793             :                 req->q->integrity.profile->complete_fn(req, nr_bytes);
     794             : #endif
     795             : 
     796           0 :         if (unlikely(error && !blk_rq_is_passthrough(req) &&
     797           0 :                      !(req->rq_flags & RQF_QUIET)) &&
     798           0 :                      !test_bit(GD_DEAD, &req->q->disk->state)) {
     799           0 :                 blk_print_req_error(req, error);
     800           0 :                 trace_block_rq_error(req, error, nr_bytes);
     801             :         }
     802             : 
     803           0 :         blk_account_io_completion(req, nr_bytes);
     804             : 
     805           0 :         total_bytes = 0;
     806           0 :         while (req->bio) {
     807           0 :                 struct bio *bio = req->bio;
     808           0 :                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
     809             : 
     810           0 :                 if (bio_bytes == bio->bi_iter.bi_size)
     811           0 :                         req->bio = bio->bi_next;
     812             : 
     813             :                 /* Completion has already been traced */
     814           0 :                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
     815           0 :                 req_bio_endio(req, bio, bio_bytes, error);
     816             : 
     817           0 :                 total_bytes += bio_bytes;
     818           0 :                 nr_bytes -= bio_bytes;
     819             : 
     820           0 :                 if (!nr_bytes)
     821             :                         break;
     822             :         }
     823             : 
     824             :         /*
     825             :          * completely done
     826             :          */
     827           0 :         if (!req->bio) {
     828             :                 /*
     829             :                  * Reset counters so that the request stacking driver
     830             :                  * can find how many bytes remain in the request
     831             :                  * later.
     832             :                  */
     833           0 :                 req->__data_len = 0;
     834           0 :                 return false;
     835             :         }
     836             : 
     837           0 :         req->__data_len -= total_bytes;
     838             : 
     839             :         /* update sector only for requests with clear definition of sector */
     840           0 :         if (!blk_rq_is_passthrough(req))
     841           0 :                 req->__sector += total_bytes >> 9;
     842             : 
     843             :         /* mixed attributes always follow the first bio */
     844           0 :         if (req->rq_flags & RQF_MIXED_MERGE) {
     845           0 :                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
     846           0 :                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
     847             :         }
     848             : 
     849           0 :         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
     850             :                 /*
     851             :                  * If total number of sectors is less than the first segment
     852             :                  * size, something has gone terribly wrong.
     853             :                  */
     854           0 :                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
     855           0 :                         blk_dump_rq_flags(req, "request botched");
     856           0 :                         req->__data_len = blk_rq_cur_bytes(req);
     857             :                 }
     858             : 
     859             :                 /* recalculate the number of segments */
     860           0 :                 req->nr_phys_segments = blk_recalc_rq_segments(req);
     861             :         }
     862             : 
     863             :         return true;
     864             : }
     865             : EXPORT_SYMBOL_GPL(blk_update_request);
     866             : 
     867           0 : static void __blk_account_io_done(struct request *req, u64 now)
     868             : {
     869           0 :         const int sgrp = op_stat_group(req_op(req));
     870             : 
     871           0 :         part_stat_lock();
     872           0 :         update_io_ticks(req->part, jiffies, true);
     873           0 :         part_stat_inc(req->part, ios[sgrp]);
     874           0 :         part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
     875           0 :         part_stat_unlock();
     876           0 : }
     877             : 
     878           0 : static inline void blk_account_io_done(struct request *req, u64 now)
     879             : {
     880             :         /*
     881             :          * Account IO completion.  flush_rq isn't accounted as a
     882             :          * normal IO on queueing nor completion.  Accounting the
     883             :          * containing request is enough.
     884             :          */
     885           0 :         if (blk_do_io_stat(req) && req->part &&
     886           0 :             !(req->rq_flags & RQF_FLUSH_SEQ))
     887           0 :                 __blk_account_io_done(req, now);
     888           0 : }
     889             : 
     890           0 : static void __blk_account_io_start(struct request *rq)
     891             : {
     892             :         /*
     893             :          * All non-passthrough requests are created from a bio with one
     894             :          * exception: when a flush command that is part of a flush sequence
     895             :          * generated by the state machine in blk-flush.c is cloned onto the
     896             :          * lower device by dm-multipath we can get here without a bio.
     897             :          */
     898           0 :         if (rq->bio)
     899           0 :                 rq->part = rq->bio->bi_bdev;
     900             :         else
     901           0 :                 rq->part = rq->q->disk->part0;
     902             : 
     903           0 :         part_stat_lock();
     904           0 :         update_io_ticks(rq->part, jiffies, false);
     905           0 :         part_stat_unlock();
     906           0 : }
     907             : 
     908           0 : static inline void blk_account_io_start(struct request *req)
     909             : {
     910           0 :         if (blk_do_io_stat(req))
     911           0 :                 __blk_account_io_start(req);
     912           0 : }
     913             : 
     914           0 : static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
     915             : {
     916           0 :         if (rq->rq_flags & RQF_STATS) {
     917           0 :                 blk_mq_poll_stats_start(rq->q);
     918           0 :                 blk_stat_add(rq, now);
     919             :         }
     920             : 
     921           0 :         blk_mq_sched_completed_request(rq, now);
     922           0 :         blk_account_io_done(rq, now);
     923           0 : }
     924             : 
     925           0 : inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
     926             : {
     927           0 :         if (blk_mq_need_time_stamp(rq))
     928           0 :                 __blk_mq_end_request_acct(rq, ktime_get_ns());
     929             : 
     930           0 :         if (rq->end_io) {
     931           0 :                 rq_qos_done(rq->q, rq);
     932           0 :                 rq->end_io(rq, error);
     933             :         } else {
     934           0 :                 blk_mq_free_request(rq);
     935             :         }
     936           0 : }
     937             : EXPORT_SYMBOL(__blk_mq_end_request);
     938             : 
     939           0 : void blk_mq_end_request(struct request *rq, blk_status_t error)
     940             : {
     941           0 :         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
     942           0 :                 BUG();
     943           0 :         __blk_mq_end_request(rq, error);
     944           0 : }
     945             : EXPORT_SYMBOL(blk_mq_end_request);
     946             : 
     947             : #define TAG_COMP_BATCH          32
     948             : 
     949           0 : static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
     950             :                                           int *tag_array, int nr_tags)
     951             : {
     952           0 :         struct request_queue *q = hctx->queue;
     953             : 
     954             :         /*
     955             :          * All requests should have been marked as RQF_MQ_INFLIGHT, so
     956             :          * update hctx->nr_active in batch
     957             :          */
     958           0 :         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
     959             :                 __blk_mq_sub_active_requests(hctx, nr_tags);
     960             : 
     961           0 :         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
     962           0 :         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
     963           0 : }
     964             : 
     965           0 : void blk_mq_end_request_batch(struct io_comp_batch *iob)
     966             : {
     967           0 :         int tags[TAG_COMP_BATCH], nr_tags = 0;
     968           0 :         struct blk_mq_hw_ctx *cur_hctx = NULL;
     969             :         struct request *rq;
     970           0 :         u64 now = 0;
     971             : 
     972           0 :         if (iob->need_ts)
     973           0 :                 now = ktime_get_ns();
     974             : 
     975           0 :         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
     976           0 :                 prefetch(rq->bio);
     977           0 :                 prefetch(rq->rq_next);
     978             : 
     979           0 :                 blk_complete_request(rq);
     980           0 :                 if (iob->need_ts)
     981           0 :                         __blk_mq_end_request_acct(rq, now);
     982             : 
     983           0 :                 rq_qos_done(rq->q, rq);
     984             : 
     985           0 :                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
     986           0 :                 if (!req_ref_put_and_test(rq))
     987           0 :                         continue;
     988             : 
     989           0 :                 blk_crypto_free_request(rq);
     990           0 :                 blk_pm_mark_last_busy(rq);
     991             : 
     992           0 :                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
     993           0 :                         if (cur_hctx)
     994           0 :                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
     995           0 :                         nr_tags = 0;
     996           0 :                         cur_hctx = rq->mq_hctx;
     997             :                 }
     998           0 :                 tags[nr_tags++] = rq->tag;
     999             :         }
    1000             : 
    1001           0 :         if (nr_tags)
    1002           0 :                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
    1003           0 : }
    1004             : EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
    1005             : 
    1006           0 : static void blk_complete_reqs(struct llist_head *list)
    1007             : {
    1008           0 :         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
    1009             :         struct request *rq, *next;
    1010             : 
    1011           0 :         llist_for_each_entry_safe(rq, next, entry, ipi_list)
    1012           0 :                 rq->q->mq_ops->complete(rq);
    1013           0 : }
    1014             : 
    1015           0 : static __latent_entropy void blk_done_softirq(struct softirq_action *h)
    1016             : {
    1017           0 :         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
    1018           0 : }
    1019             : 
    1020           0 : static int blk_softirq_cpu_dead(unsigned int cpu)
    1021             : {
    1022           0 :         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
    1023           0 :         return 0;
    1024             : }
    1025             : 
    1026             : static void __blk_mq_complete_request_remote(void *data)
    1027             : {
    1028             :         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
    1029             : }
    1030             : 
    1031             : static inline bool blk_mq_complete_need_ipi(struct request *rq)
    1032             : {
    1033           0 :         int cpu = raw_smp_processor_id();
    1034             : 
    1035             :         if (!IS_ENABLED(CONFIG_SMP) ||
    1036             :             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
    1037             :                 return false;
    1038             :         /*
    1039             :          * With force threaded interrupts enabled, raising softirq from an SMP
    1040             :          * function call will always result in waking the ksoftirqd thread.
    1041             :          * This is probably worse than completing the request on a different
    1042             :          * cache domain.
    1043             :          */
    1044             :         if (force_irqthreads())
    1045             :                 return false;
    1046             : 
    1047             :         /* same CPU or cache domain?  Complete locally */
    1048             :         if (cpu == rq->mq_ctx->cpu ||
    1049             :             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
    1050             :              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
    1051             :                 return false;
    1052             : 
    1053             :         /* don't try to IPI to an offline CPU */
    1054             :         return cpu_online(rq->mq_ctx->cpu);
    1055             : }
    1056             : 
    1057             : static void blk_mq_complete_send_ipi(struct request *rq)
    1058             : {
    1059             :         struct llist_head *list;
    1060             :         unsigned int cpu;
    1061             : 
    1062             :         cpu = rq->mq_ctx->cpu;
    1063             :         list = &per_cpu(blk_cpu_done, cpu);
    1064             :         if (llist_add(&rq->ipi_list, list)) {
    1065             :                 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
    1066             :                 smp_call_function_single_async(cpu, &rq->csd);
    1067             :         }
    1068             : }
    1069             : 
    1070           0 : static void blk_mq_raise_softirq(struct request *rq)
    1071             : {
    1072             :         struct llist_head *list;
    1073             : 
    1074           0 :         preempt_disable();
    1075           0 :         list = this_cpu_ptr(&blk_cpu_done);
    1076           0 :         if (llist_add(&rq->ipi_list, list))
    1077           0 :                 raise_softirq(BLOCK_SOFTIRQ);
    1078           0 :         preempt_enable();
    1079           0 : }
    1080             : 
    1081           0 : bool blk_mq_complete_request_remote(struct request *rq)
    1082             : {
    1083           0 :         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
    1084             : 
    1085             :         /*
    1086             :          * For a polled request, always complete locallly, it's pointless
    1087             :          * to redirect the completion.
    1088             :          */
    1089           0 :         if (rq->cmd_flags & REQ_POLLED)
    1090             :                 return false;
    1091             : 
    1092           0 :         if (blk_mq_complete_need_ipi(rq)) {
    1093             :                 blk_mq_complete_send_ipi(rq);
    1094             :                 return true;
    1095             :         }
    1096             : 
    1097           0 :         if (rq->q->nr_hw_queues == 1) {
    1098           0 :                 blk_mq_raise_softirq(rq);
    1099           0 :                 return true;
    1100             :         }
    1101             :         return false;
    1102             : }
    1103             : EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
    1104             : 
    1105             : /**
    1106             :  * blk_mq_complete_request - end I/O on a request
    1107             :  * @rq:         the request being processed
    1108             :  *
    1109             :  * Description:
    1110             :  *      Complete a request by scheduling the ->complete_rq operation.
    1111             :  **/
    1112           0 : void blk_mq_complete_request(struct request *rq)
    1113             : {
    1114           0 :         if (!blk_mq_complete_request_remote(rq))
    1115           0 :                 rq->q->mq_ops->complete(rq);
    1116           0 : }
    1117             : EXPORT_SYMBOL(blk_mq_complete_request);
    1118             : 
    1119             : /**
    1120             :  * blk_mq_start_request - Start processing a request
    1121             :  * @rq: Pointer to request to be started
    1122             :  *
    1123             :  * Function used by device drivers to notify the block layer that a request
    1124             :  * is going to be processed now, so blk layer can do proper initializations
    1125             :  * such as starting the timeout timer.
    1126             :  */
    1127           0 : void blk_mq_start_request(struct request *rq)
    1128             : {
    1129           0 :         struct request_queue *q = rq->q;
    1130             : 
    1131           0 :         trace_block_rq_issue(rq);
    1132             : 
    1133           0 :         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
    1134           0 :                 rq->io_start_time_ns = ktime_get_ns();
    1135           0 :                 rq->stats_sectors = blk_rq_sectors(rq);
    1136           0 :                 rq->rq_flags |= RQF_STATS;
    1137             :                 rq_qos_issue(q, rq);
    1138             :         }
    1139             : 
    1140           0 :         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
    1141             : 
    1142           0 :         blk_add_timer(rq);
    1143           0 :         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
    1144             : 
    1145             : #ifdef CONFIG_BLK_DEV_INTEGRITY
    1146             :         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
    1147             :                 q->integrity.profile->prepare_fn(rq);
    1148             : #endif
    1149           0 :         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
    1150           0 :                 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
    1151           0 : }
    1152             : EXPORT_SYMBOL(blk_mq_start_request);
    1153             : 
    1154             : /**
    1155             :  * blk_end_sync_rq - executes a completion event on a request
    1156             :  * @rq: request to complete
    1157             :  * @error: end I/O status of the request
    1158             :  */
    1159           0 : static void blk_end_sync_rq(struct request *rq, blk_status_t error)
    1160             : {
    1161           0 :         struct completion *waiting = rq->end_io_data;
    1162             : 
    1163           0 :         rq->end_io_data = (void *)(uintptr_t)error;
    1164             : 
    1165             :         /*
    1166             :          * complete last, if this is a stack request the process (and thus
    1167             :          * the rq pointer) could be invalid right after this complete()
    1168             :          */
    1169           0 :         complete(waiting);
    1170           0 : }
    1171             : 
    1172             : /**
    1173             :  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
    1174             :  * @rq:         request to insert
    1175             :  * @at_head:    insert request at head or tail of queue
    1176             :  * @done:       I/O completion handler
    1177             :  *
    1178             :  * Description:
    1179             :  *    Insert a fully prepared request at the back of the I/O scheduler queue
    1180             :  *    for execution.  Don't wait for completion.
    1181             :  *
    1182             :  * Note:
    1183             :  *    This function will invoke @done directly if the queue is dead.
    1184             :  */
    1185           0 : void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
    1186             : {
    1187           0 :         WARN_ON(irqs_disabled());
    1188           0 :         WARN_ON(!blk_rq_is_passthrough(rq));
    1189             : 
    1190           0 :         rq->end_io = done;
    1191             : 
    1192           0 :         blk_account_io_start(rq);
    1193             : 
    1194             :         /*
    1195             :          * don't check dying flag for MQ because the request won't
    1196             :          * be reused after dying flag is set
    1197             :          */
    1198           0 :         blk_mq_sched_insert_request(rq, at_head, true, false);
    1199           0 : }
    1200             : EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
    1201             : 
    1202           0 : static bool blk_rq_is_poll(struct request *rq)
    1203             : {
    1204           0 :         if (!rq->mq_hctx)
    1205             :                 return false;
    1206           0 :         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
    1207             :                 return false;
    1208           0 :         if (WARN_ON_ONCE(!rq->bio))
    1209             :                 return false;
    1210             :         return true;
    1211             : }
    1212             : 
    1213           0 : static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
    1214             : {
    1215             :         do {
    1216           0 :                 bio_poll(rq->bio, NULL, 0);
    1217           0 :                 cond_resched();
    1218           0 :         } while (!completion_done(wait));
    1219           0 : }
    1220             : 
    1221             : /**
    1222             :  * blk_execute_rq - insert a request into queue for execution
    1223             :  * @rq:         request to insert
    1224             :  * @at_head:    insert request at head or tail of queue
    1225             :  *
    1226             :  * Description:
    1227             :  *    Insert a fully prepared request at the back of the I/O scheduler queue
    1228             :  *    for execution and wait for completion.
    1229             :  * Return: The blk_status_t result provided to blk_mq_end_request().
    1230             :  */
    1231           0 : blk_status_t blk_execute_rq(struct request *rq, bool at_head)
    1232             : {
    1233           0 :         DECLARE_COMPLETION_ONSTACK(wait);
    1234             :         unsigned long hang_check;
    1235             : 
    1236           0 :         rq->end_io_data = &wait;
    1237           0 :         blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
    1238             : 
    1239             :         /* Prevent hang_check timer from firing at us during very long I/O */
    1240           0 :         hang_check = sysctl_hung_task_timeout_secs;
    1241             : 
    1242           0 :         if (blk_rq_is_poll(rq))
    1243           0 :                 blk_rq_poll_completion(rq, &wait);
    1244             :         else if (hang_check)
    1245             :                 while (!wait_for_completion_io_timeout(&wait,
    1246             :                                 hang_check * (HZ/2)))
    1247             :                         ;
    1248             :         else
    1249           0 :                 wait_for_completion_io(&wait);
    1250             : 
    1251           0 :         return (blk_status_t)(uintptr_t)rq->end_io_data;
    1252             : }
    1253             : EXPORT_SYMBOL(blk_execute_rq);
    1254             : 
    1255           0 : static void __blk_mq_requeue_request(struct request *rq)
    1256             : {
    1257           0 :         struct request_queue *q = rq->q;
    1258             : 
    1259           0 :         blk_mq_put_driver_tag(rq);
    1260             : 
    1261           0 :         trace_block_rq_requeue(rq);
    1262           0 :         rq_qos_requeue(q, rq);
    1263             : 
    1264           0 :         if (blk_mq_request_started(rq)) {
    1265           0 :                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
    1266           0 :                 rq->rq_flags &= ~RQF_TIMED_OUT;
    1267             :         }
    1268           0 : }
    1269             : 
    1270           0 : void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
    1271             : {
    1272           0 :         __blk_mq_requeue_request(rq);
    1273             : 
    1274             :         /* this request will be re-inserted to io scheduler queue */
    1275           0 :         blk_mq_sched_requeue_request(rq);
    1276             : 
    1277           0 :         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
    1278           0 : }
    1279             : EXPORT_SYMBOL(blk_mq_requeue_request);
    1280             : 
    1281           0 : static void blk_mq_requeue_work(struct work_struct *work)
    1282             : {
    1283           0 :         struct request_queue *q =
    1284           0 :                 container_of(work, struct request_queue, requeue_work.work);
    1285           0 :         LIST_HEAD(rq_list);
    1286             :         struct request *rq, *next;
    1287             : 
    1288           0 :         spin_lock_irq(&q->requeue_lock);
    1289           0 :         list_splice_init(&q->requeue_list, &rq_list);
    1290           0 :         spin_unlock_irq(&q->requeue_lock);
    1291             : 
    1292           0 :         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
    1293           0 :                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
    1294           0 :                         continue;
    1295             : 
    1296           0 :                 rq->rq_flags &= ~RQF_SOFTBARRIER;
    1297           0 :                 list_del_init(&rq->queuelist);
    1298             :                 /*
    1299             :                  * If RQF_DONTPREP, rq has contained some driver specific
    1300             :                  * data, so insert it to hctx dispatch list to avoid any
    1301             :                  * merge.
    1302             :                  */
    1303           0 :                 if (rq->rq_flags & RQF_DONTPREP)
    1304           0 :                         blk_mq_request_bypass_insert(rq, false, false);
    1305             :                 else
    1306           0 :                         blk_mq_sched_insert_request(rq, true, false, false);
    1307             :         }
    1308             : 
    1309           0 :         while (!list_empty(&rq_list)) {
    1310           0 :                 rq = list_entry(rq_list.next, struct request, queuelist);
    1311           0 :                 list_del_init(&rq->queuelist);
    1312           0 :                 blk_mq_sched_insert_request(rq, false, false, false);
    1313             :         }
    1314             : 
    1315           0 :         blk_mq_run_hw_queues(q, false);
    1316           0 : }
    1317             : 
    1318           0 : void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
    1319             :                                 bool kick_requeue_list)
    1320             : {
    1321           0 :         struct request_queue *q = rq->q;
    1322             :         unsigned long flags;
    1323             : 
    1324             :         /*
    1325             :          * We abuse this flag that is otherwise used by the I/O scheduler to
    1326             :          * request head insertion from the workqueue.
    1327             :          */
    1328           0 :         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
    1329             : 
    1330           0 :         spin_lock_irqsave(&q->requeue_lock, flags);
    1331           0 :         if (at_head) {
    1332           0 :                 rq->rq_flags |= RQF_SOFTBARRIER;
    1333           0 :                 list_add(&rq->queuelist, &q->requeue_list);
    1334             :         } else {
    1335           0 :                 list_add_tail(&rq->queuelist, &q->requeue_list);
    1336             :         }
    1337           0 :         spin_unlock_irqrestore(&q->requeue_lock, flags);
    1338             : 
    1339           0 :         if (kick_requeue_list)
    1340             :                 blk_mq_kick_requeue_list(q);
    1341           0 : }
    1342             : 
    1343           0 : void blk_mq_kick_requeue_list(struct request_queue *q)
    1344             : {
    1345           0 :         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
    1346           0 : }
    1347             : EXPORT_SYMBOL(blk_mq_kick_requeue_list);
    1348             : 
    1349           0 : void blk_mq_delay_kick_requeue_list(struct request_queue *q,
    1350             :                                     unsigned long msecs)
    1351             : {
    1352           0 :         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
    1353             :                                     msecs_to_jiffies(msecs));
    1354           0 : }
    1355             : EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
    1356             : 
    1357           0 : static bool blk_mq_rq_inflight(struct request *rq, void *priv,
    1358             :                                bool reserved)
    1359             : {
    1360             :         /*
    1361             :          * If we find a request that isn't idle we know the queue is busy
    1362             :          * as it's checked in the iter.
    1363             :          * Return false to stop the iteration.
    1364             :          */
    1365           0 :         if (blk_mq_request_started(rq)) {
    1366           0 :                 bool *busy = priv;
    1367             : 
    1368           0 :                 *busy = true;
    1369           0 :                 return false;
    1370             :         }
    1371             : 
    1372             :         return true;
    1373             : }
    1374             : 
    1375           0 : bool blk_mq_queue_inflight(struct request_queue *q)
    1376             : {
    1377           0 :         bool busy = false;
    1378             : 
    1379           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
    1380           0 :         return busy;
    1381             : }
    1382             : EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
    1383             : 
    1384           0 : static void blk_mq_rq_timed_out(struct request *req, bool reserved)
    1385             : {
    1386           0 :         req->rq_flags |= RQF_TIMED_OUT;
    1387           0 :         if (req->q->mq_ops->timeout) {
    1388             :                 enum blk_eh_timer_return ret;
    1389             : 
    1390           0 :                 ret = req->q->mq_ops->timeout(req, reserved);
    1391           0 :                 if (ret == BLK_EH_DONE)
    1392             :                         return;
    1393           0 :                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
    1394             :         }
    1395             : 
    1396           0 :         blk_add_timer(req);
    1397             : }
    1398             : 
    1399             : static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
    1400             : {
    1401             :         unsigned long deadline;
    1402             : 
    1403           0 :         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
    1404             :                 return false;
    1405           0 :         if (rq->rq_flags & RQF_TIMED_OUT)
    1406             :                 return false;
    1407             : 
    1408           0 :         deadline = READ_ONCE(rq->deadline);
    1409           0 :         if (time_after_eq(jiffies, deadline))
    1410             :                 return true;
    1411             : 
    1412           0 :         if (*next == 0)
    1413           0 :                 *next = deadline;
    1414           0 :         else if (time_after(*next, deadline))
    1415           0 :                 *next = deadline;
    1416             :         return false;
    1417             : }
    1418             : 
    1419           0 : void blk_mq_put_rq_ref(struct request *rq)
    1420             : {
    1421           0 :         if (is_flush_rq(rq))
    1422           0 :                 rq->end_io(rq, 0);
    1423           0 :         else if (req_ref_put_and_test(rq))
    1424           0 :                 __blk_mq_free_request(rq);
    1425           0 : }
    1426             : 
    1427           0 : static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
    1428             : {
    1429           0 :         unsigned long *next = priv;
    1430             : 
    1431             :         /*
    1432             :          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
    1433             :          * be reallocated underneath the timeout handler's processing, then
    1434             :          * the expire check is reliable. If the request is not expired, then
    1435             :          * it was completed and reallocated as a new request after returning
    1436             :          * from blk_mq_check_expired().
    1437             :          */
    1438           0 :         if (blk_mq_req_expired(rq, next))
    1439           0 :                 blk_mq_rq_timed_out(rq, reserved);
    1440           0 :         return true;
    1441             : }
    1442             : 
    1443           0 : static void blk_mq_timeout_work(struct work_struct *work)
    1444             : {
    1445           0 :         struct request_queue *q =
    1446           0 :                 container_of(work, struct request_queue, timeout_work);
    1447           0 :         unsigned long next = 0;
    1448             :         struct blk_mq_hw_ctx *hctx;
    1449             :         unsigned long i;
    1450             : 
    1451             :         /* A deadlock might occur if a request is stuck requiring a
    1452             :          * timeout at the same time a queue freeze is waiting
    1453             :          * completion, since the timeout code would not be able to
    1454             :          * acquire the queue reference here.
    1455             :          *
    1456             :          * That's why we don't use blk_queue_enter here; instead, we use
    1457             :          * percpu_ref_tryget directly, because we need to be able to
    1458             :          * obtain a reference even in the short window between the queue
    1459             :          * starting to freeze, by dropping the first reference in
    1460             :          * blk_freeze_queue_start, and the moment the last request is
    1461             :          * consumed, marked by the instant q_usage_counter reaches
    1462             :          * zero.
    1463             :          */
    1464           0 :         if (!percpu_ref_tryget(&q->q_usage_counter))
    1465           0 :                 return;
    1466             : 
    1467           0 :         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
    1468             : 
    1469           0 :         if (next != 0) {
    1470           0 :                 mod_timer(&q->timeout, next);
    1471             :         } else {
    1472             :                 /*
    1473             :                  * Request timeouts are handled as a forward rolling timer. If
    1474             :                  * we end up here it means that no requests are pending and
    1475             :                  * also that no request has been pending for a while. Mark
    1476             :                  * each hctx as idle.
    1477             :                  */
    1478           0 :                 queue_for_each_hw_ctx(q, hctx, i) {
    1479             :                         /* the hctx may be unmapped, so check it here */
    1480           0 :                         if (blk_mq_hw_queue_mapped(hctx))
    1481             :                                 blk_mq_tag_idle(hctx);
    1482             :                 }
    1483             :         }
    1484           0 :         blk_queue_exit(q);
    1485             : }
    1486             : 
    1487             : struct flush_busy_ctx_data {
    1488             :         struct blk_mq_hw_ctx *hctx;
    1489             :         struct list_head *list;
    1490             : };
    1491             : 
    1492           0 : static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
    1493             : {
    1494           0 :         struct flush_busy_ctx_data *flush_data = data;
    1495           0 :         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
    1496           0 :         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
    1497           0 :         enum hctx_type type = hctx->type;
    1498             : 
    1499           0 :         spin_lock(&ctx->lock);
    1500           0 :         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
    1501           0 :         sbitmap_clear_bit(sb, bitnr);
    1502           0 :         spin_unlock(&ctx->lock);
    1503           0 :         return true;
    1504             : }
    1505             : 
    1506             : /*
    1507             :  * Process software queues that have been marked busy, splicing them
    1508             :  * to the for-dispatch
    1509             :  */
    1510           0 : void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
    1511             : {
    1512           0 :         struct flush_busy_ctx_data data = {
    1513             :                 .hctx = hctx,
    1514             :                 .list = list,
    1515             :         };
    1516             : 
    1517           0 :         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
    1518           0 : }
    1519             : EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
    1520             : 
    1521             : struct dispatch_rq_data {
    1522             :         struct blk_mq_hw_ctx *hctx;
    1523             :         struct request *rq;
    1524             : };
    1525             : 
    1526           0 : static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
    1527             :                 void *data)
    1528             : {
    1529           0 :         struct dispatch_rq_data *dispatch_data = data;
    1530           0 :         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
    1531           0 :         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
    1532           0 :         enum hctx_type type = hctx->type;
    1533             : 
    1534           0 :         spin_lock(&ctx->lock);
    1535           0 :         if (!list_empty(&ctx->rq_lists[type])) {
    1536           0 :                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
    1537           0 :                 list_del_init(&dispatch_data->rq->queuelist);
    1538           0 :                 if (list_empty(&ctx->rq_lists[type]))
    1539             :                         sbitmap_clear_bit(sb, bitnr);
    1540             :         }
    1541           0 :         spin_unlock(&ctx->lock);
    1542             : 
    1543           0 :         return !dispatch_data->rq;
    1544             : }
    1545             : 
    1546           0 : struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
    1547             :                                         struct blk_mq_ctx *start)
    1548             : {
    1549           0 :         unsigned off = start ? start->index_hw[hctx->type] : 0;
    1550           0 :         struct dispatch_rq_data data = {
    1551             :                 .hctx = hctx,
    1552             :                 .rq   = NULL,
    1553             :         };
    1554             : 
    1555           0 :         __sbitmap_for_each_set(&hctx->ctx_map, off,
    1556             :                                dispatch_rq_from_ctx, &data);
    1557             : 
    1558           0 :         return data.rq;
    1559             : }
    1560             : 
    1561           0 : static bool __blk_mq_alloc_driver_tag(struct request *rq)
    1562             : {
    1563           0 :         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
    1564           0 :         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
    1565             :         int tag;
    1566             : 
    1567           0 :         blk_mq_tag_busy(rq->mq_hctx);
    1568             : 
    1569           0 :         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
    1570           0 :                 bt = &rq->mq_hctx->tags->breserved_tags;
    1571           0 :                 tag_offset = 0;
    1572             :         } else {
    1573           0 :                 if (!hctx_may_queue(rq->mq_hctx, bt))
    1574             :                         return false;
    1575             :         }
    1576             : 
    1577           0 :         tag = __sbitmap_queue_get(bt);
    1578           0 :         if (tag == BLK_MQ_NO_TAG)
    1579             :                 return false;
    1580             : 
    1581           0 :         rq->tag = tag + tag_offset;
    1582           0 :         return true;
    1583             : }
    1584             : 
    1585           0 : bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
    1586             : {
    1587           0 :         if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
    1588             :                 return false;
    1589             : 
    1590           0 :         if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
    1591           0 :                         !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
    1592           0 :                 rq->rq_flags |= RQF_MQ_INFLIGHT;
    1593             :                 __blk_mq_inc_active_requests(hctx);
    1594             :         }
    1595           0 :         hctx->tags->rqs[rq->tag] = rq;
    1596           0 :         return true;
    1597             : }
    1598             : 
    1599           0 : static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
    1600             :                                 int flags, void *key)
    1601             : {
    1602             :         struct blk_mq_hw_ctx *hctx;
    1603             : 
    1604           0 :         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
    1605             : 
    1606           0 :         spin_lock(&hctx->dispatch_wait_lock);
    1607           0 :         if (!list_empty(&wait->entry)) {
    1608             :                 struct sbitmap_queue *sbq;
    1609             : 
    1610           0 :                 list_del_init(&wait->entry);
    1611           0 :                 sbq = &hctx->tags->bitmap_tags;
    1612           0 :                 atomic_dec(&sbq->ws_active);
    1613             :         }
    1614           0 :         spin_unlock(&hctx->dispatch_wait_lock);
    1615             : 
    1616           0 :         blk_mq_run_hw_queue(hctx, true);
    1617           0 :         return 1;
    1618             : }
    1619             : 
    1620             : /*
    1621             :  * Mark us waiting for a tag. For shared tags, this involves hooking us into
    1622             :  * the tag wakeups. For non-shared tags, we can simply mark us needing a
    1623             :  * restart. For both cases, take care to check the condition again after
    1624             :  * marking us as waiting.
    1625             :  */
    1626           0 : static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
    1627             :                                  struct request *rq)
    1628             : {
    1629           0 :         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
    1630             :         struct wait_queue_head *wq;
    1631             :         wait_queue_entry_t *wait;
    1632             :         bool ret;
    1633             : 
    1634           0 :         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
    1635           0 :                 blk_mq_sched_mark_restart_hctx(hctx);
    1636             : 
    1637             :                 /*
    1638             :                  * It's possible that a tag was freed in the window between the
    1639             :                  * allocation failure and adding the hardware queue to the wait
    1640             :                  * queue.
    1641             :                  *
    1642             :                  * Don't clear RESTART here, someone else could have set it.
    1643             :                  * At most this will cost an extra queue run.
    1644             :                  */
    1645           0 :                 return blk_mq_get_driver_tag(rq);
    1646             :         }
    1647             : 
    1648           0 :         wait = &hctx->dispatch_wait;
    1649           0 :         if (!list_empty_careful(&wait->entry))
    1650             :                 return false;
    1651             : 
    1652           0 :         wq = &bt_wait_ptr(sbq, hctx)->wait;
    1653             : 
    1654           0 :         spin_lock_irq(&wq->lock);
    1655           0 :         spin_lock(&hctx->dispatch_wait_lock);
    1656           0 :         if (!list_empty(&wait->entry)) {
    1657           0 :                 spin_unlock(&hctx->dispatch_wait_lock);
    1658           0 :                 spin_unlock_irq(&wq->lock);
    1659           0 :                 return false;
    1660             :         }
    1661             : 
    1662           0 :         atomic_inc(&sbq->ws_active);
    1663           0 :         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
    1664           0 :         __add_wait_queue(wq, wait);
    1665             : 
    1666             :         /*
    1667             :          * It's possible that a tag was freed in the window between the
    1668             :          * allocation failure and adding the hardware queue to the wait
    1669             :          * queue.
    1670             :          */
    1671           0 :         ret = blk_mq_get_driver_tag(rq);
    1672           0 :         if (!ret) {
    1673           0 :                 spin_unlock(&hctx->dispatch_wait_lock);
    1674           0 :                 spin_unlock_irq(&wq->lock);
    1675           0 :                 return false;
    1676             :         }
    1677             : 
    1678             :         /*
    1679             :          * We got a tag, remove ourselves from the wait queue to ensure
    1680             :          * someone else gets the wakeup.
    1681             :          */
    1682           0 :         list_del_init(&wait->entry);
    1683           0 :         atomic_dec(&sbq->ws_active);
    1684           0 :         spin_unlock(&hctx->dispatch_wait_lock);
    1685           0 :         spin_unlock_irq(&wq->lock);
    1686             : 
    1687           0 :         return true;
    1688             : }
    1689             : 
    1690             : #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
    1691             : #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
    1692             : /*
    1693             :  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
    1694             :  * - EWMA is one simple way to compute running average value
    1695             :  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
    1696             :  * - take 4 as factor for avoiding to get too small(0) result, and this
    1697             :  *   factor doesn't matter because EWMA decreases exponentially
    1698             :  */
    1699             : static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
    1700             : {
    1701             :         unsigned int ewma;
    1702             : 
    1703           0 :         ewma = hctx->dispatch_busy;
    1704             : 
    1705           0 :         if (!ewma && !busy)
    1706             :                 return;
    1707             : 
    1708           0 :         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
    1709             :         if (busy)
    1710           0 :                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
    1711           0 :         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
    1712             : 
    1713           0 :         hctx->dispatch_busy = ewma;
    1714             : }
    1715             : 
    1716             : #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
    1717             : 
    1718           0 : static void blk_mq_handle_dev_resource(struct request *rq,
    1719             :                                        struct list_head *list)
    1720             : {
    1721           0 :         struct request *next =
    1722           0 :                 list_first_entry_or_null(list, struct request, queuelist);
    1723             : 
    1724             :         /*
    1725             :          * If an I/O scheduler has been configured and we got a driver tag for
    1726             :          * the next request already, free it.
    1727             :          */
    1728           0 :         if (next)
    1729             :                 blk_mq_put_driver_tag(next);
    1730             : 
    1731           0 :         list_add(&rq->queuelist, list);
    1732           0 :         __blk_mq_requeue_request(rq);
    1733           0 : }
    1734             : 
    1735             : static void blk_mq_handle_zone_resource(struct request *rq,
    1736             :                                         struct list_head *zone_list)
    1737             : {
    1738             :         /*
    1739             :          * If we end up here it is because we cannot dispatch a request to a
    1740             :          * specific zone due to LLD level zone-write locking or other zone
    1741             :          * related resource not being available. In this case, set the request
    1742             :          * aside in zone_list for retrying it later.
    1743             :          */
    1744           0 :         list_add(&rq->queuelist, zone_list);
    1745           0 :         __blk_mq_requeue_request(rq);
    1746             : }
    1747             : 
    1748             : enum prep_dispatch {
    1749             :         PREP_DISPATCH_OK,
    1750             :         PREP_DISPATCH_NO_TAG,
    1751             :         PREP_DISPATCH_NO_BUDGET,
    1752             : };
    1753             : 
    1754           0 : static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
    1755             :                                                   bool need_budget)
    1756             : {
    1757           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
    1758           0 :         int budget_token = -1;
    1759             : 
    1760           0 :         if (need_budget) {
    1761           0 :                 budget_token = blk_mq_get_dispatch_budget(rq->q);
    1762           0 :                 if (budget_token < 0) {
    1763             :                         blk_mq_put_driver_tag(rq);
    1764             :                         return PREP_DISPATCH_NO_BUDGET;
    1765             :                 }
    1766             :                 blk_mq_set_rq_budget_token(rq, budget_token);
    1767             :         }
    1768             : 
    1769           0 :         if (!blk_mq_get_driver_tag(rq)) {
    1770             :                 /*
    1771             :                  * The initial allocation attempt failed, so we need to
    1772             :                  * rerun the hardware queue when a tag is freed. The
    1773             :                  * waitqueue takes care of that. If the queue is run
    1774             :                  * before we add this entry back on the dispatch list,
    1775             :                  * we'll re-run it below.
    1776             :                  */
    1777           0 :                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
    1778             :                         /*
    1779             :                          * All budgets not got from this function will be put
    1780             :                          * together during handling partial dispatch
    1781             :                          */
    1782           0 :                         if (need_budget)
    1783           0 :                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
    1784             :                         return PREP_DISPATCH_NO_TAG;
    1785             :                 }
    1786             :         }
    1787             : 
    1788             :         return PREP_DISPATCH_OK;
    1789             : }
    1790             : 
    1791             : /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
    1792           0 : static void blk_mq_release_budgets(struct request_queue *q,
    1793             :                 struct list_head *list)
    1794             : {
    1795             :         struct request *rq;
    1796             : 
    1797           0 :         list_for_each_entry(rq, list, queuelist) {
    1798           0 :                 int budget_token = blk_mq_get_rq_budget_token(rq);
    1799             : 
    1800           0 :                 if (budget_token >= 0)
    1801             :                         blk_mq_put_dispatch_budget(q, budget_token);
    1802             :         }
    1803           0 : }
    1804             : 
    1805             : /*
    1806             :  * Returns true if we did some work AND can potentially do more.
    1807             :  */
    1808           0 : bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
    1809             :                              unsigned int nr_budgets)
    1810             : {
    1811             :         enum prep_dispatch prep;
    1812           0 :         struct request_queue *q = hctx->queue;
    1813             :         struct request *rq, *nxt;
    1814             :         int errors, queued;
    1815           0 :         blk_status_t ret = BLK_STS_OK;
    1816           0 :         LIST_HEAD(zone_list);
    1817           0 :         bool needs_resource = false;
    1818             : 
    1819           0 :         if (list_empty(list))
    1820             :                 return false;
    1821             : 
    1822             :         /*
    1823             :          * Now process all the entries, sending them to the driver.
    1824             :          */
    1825             :         errors = queued = 0;
    1826             :         do {
    1827             :                 struct blk_mq_queue_data bd;
    1828             : 
    1829           0 :                 rq = list_first_entry(list, struct request, queuelist);
    1830             : 
    1831           0 :                 WARN_ON_ONCE(hctx != rq->mq_hctx);
    1832           0 :                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
    1833           0 :                 if (prep != PREP_DISPATCH_OK)
    1834             :                         break;
    1835             : 
    1836           0 :                 list_del_init(&rq->queuelist);
    1837             : 
    1838           0 :                 bd.rq = rq;
    1839             : 
    1840             :                 /*
    1841             :                  * Flag last if we have no more requests, or if we have more
    1842             :                  * but can't assign a driver tag to it.
    1843             :                  */
    1844           0 :                 if (list_empty(list))
    1845           0 :                         bd.last = true;
    1846             :                 else {
    1847           0 :                         nxt = list_first_entry(list, struct request, queuelist);
    1848           0 :                         bd.last = !blk_mq_get_driver_tag(nxt);
    1849             :                 }
    1850             : 
    1851             :                 /*
    1852             :                  * once the request is queued to lld, no need to cover the
    1853             :                  * budget any more
    1854             :                  */
    1855           0 :                 if (nr_budgets)
    1856           0 :                         nr_budgets--;
    1857           0 :                 ret = q->mq_ops->queue_rq(hctx, &bd);
    1858           0 :                 switch (ret) {
    1859             :                 case BLK_STS_OK:
    1860           0 :                         queued++;
    1861           0 :                         break;
    1862             :                 case BLK_STS_RESOURCE:
    1863           0 :                         needs_resource = true;
    1864             :                         fallthrough;
    1865             :                 case BLK_STS_DEV_RESOURCE:
    1866           0 :                         blk_mq_handle_dev_resource(rq, list);
    1867           0 :                         goto out;
    1868             :                 case BLK_STS_ZONE_RESOURCE:
    1869             :                         /*
    1870             :                          * Move the request to zone_list and keep going through
    1871             :                          * the dispatch list to find more requests the drive can
    1872             :                          * accept.
    1873             :                          */
    1874           0 :                         blk_mq_handle_zone_resource(rq, &zone_list);
    1875           0 :                         needs_resource = true;
    1876           0 :                         break;
    1877             :                 default:
    1878           0 :                         errors++;
    1879           0 :                         blk_mq_end_request(rq, ret);
    1880             :                 }
    1881           0 :         } while (!list_empty(list));
    1882             : out:
    1883           0 :         if (!list_empty(&zone_list))
    1884             :                 list_splice_tail_init(&zone_list, list);
    1885             : 
    1886             :         /* If we didn't flush the entire list, we could have told the driver
    1887             :          * there was more coming, but that turned out to be a lie.
    1888             :          */
    1889           0 :         if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
    1890           0 :                 q->mq_ops->commit_rqs(hctx);
    1891             :         /*
    1892             :          * Any items that need requeuing? Stuff them into hctx->dispatch,
    1893             :          * that is where we will continue on next queue run.
    1894             :          */
    1895           0 :         if (!list_empty(list)) {
    1896             :                 bool needs_restart;
    1897             :                 /* For non-shared tags, the RESTART check will suffice */
    1898           0 :                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
    1899           0 :                         (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
    1900             : 
    1901           0 :                 if (nr_budgets)
    1902           0 :                         blk_mq_release_budgets(q, list);
    1903             : 
    1904           0 :                 spin_lock(&hctx->lock);
    1905           0 :                 list_splice_tail_init(list, &hctx->dispatch);
    1906           0 :                 spin_unlock(&hctx->lock);
    1907             : 
    1908             :                 /*
    1909             :                  * Order adding requests to hctx->dispatch and checking
    1910             :                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
    1911             :                  * in blk_mq_sched_restart(). Avoid restart code path to
    1912             :                  * miss the new added requests to hctx->dispatch, meantime
    1913             :                  * SCHED_RESTART is observed here.
    1914             :                  */
    1915           0 :                 smp_mb();
    1916             : 
    1917             :                 /*
    1918             :                  * If SCHED_RESTART was set by the caller of this function and
    1919             :                  * it is no longer set that means that it was cleared by another
    1920             :                  * thread and hence that a queue rerun is needed.
    1921             :                  *
    1922             :                  * If 'no_tag' is set, that means that we failed getting
    1923             :                  * a driver tag with an I/O scheduler attached. If our dispatch
    1924             :                  * waitqueue is no longer active, ensure that we run the queue
    1925             :                  * AFTER adding our entries back to the list.
    1926             :                  *
    1927             :                  * If no I/O scheduler has been configured it is possible that
    1928             :                  * the hardware queue got stopped and restarted before requests
    1929             :                  * were pushed back onto the dispatch list. Rerun the queue to
    1930             :                  * avoid starvation. Notes:
    1931             :                  * - blk_mq_run_hw_queue() checks whether or not a queue has
    1932             :                  *   been stopped before rerunning a queue.
    1933             :                  * - Some but not all block drivers stop a queue before
    1934             :                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
    1935             :                  *   and dm-rq.
    1936             :                  *
    1937             :                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
    1938             :                  * bit is set, run queue after a delay to avoid IO stalls
    1939             :                  * that could otherwise occur if the queue is idle.  We'll do
    1940             :                  * similar if we couldn't get budget or couldn't lock a zone
    1941             :                  * and SCHED_RESTART is set.
    1942             :                  */
    1943           0 :                 needs_restart = blk_mq_sched_needs_restart(hctx);
    1944           0 :                 if (prep == PREP_DISPATCH_NO_BUDGET)
    1945           0 :                         needs_resource = true;
    1946           0 :                 if (!needs_restart ||
    1947           0 :                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
    1948           0 :                         blk_mq_run_hw_queue(hctx, true);
    1949           0 :                 else if (needs_restart && needs_resource)
    1950             :                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
    1951             : 
    1952           0 :                 blk_mq_update_dispatch_busy(hctx, true);
    1953           0 :                 return false;
    1954             :         } else
    1955           0 :                 blk_mq_update_dispatch_busy(hctx, false);
    1956             : 
    1957           0 :         return (queued + errors) != 0;
    1958             : }
    1959             : 
    1960             : /**
    1961             :  * __blk_mq_run_hw_queue - Run a hardware queue.
    1962             :  * @hctx: Pointer to the hardware queue to run.
    1963             :  *
    1964             :  * Send pending requests to the hardware.
    1965             :  */
    1966           0 : static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
    1967             : {
    1968             :         /*
    1969             :          * We can't run the queue inline with ints disabled. Ensure that
    1970             :          * we catch bad users of this early.
    1971             :          */
    1972           0 :         WARN_ON_ONCE(in_interrupt());
    1973             : 
    1974           0 :         blk_mq_run_dispatch_ops(hctx->queue,
    1975             :                         blk_mq_sched_dispatch_requests(hctx));
    1976           0 : }
    1977             : 
    1978             : static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
    1979             : {
    1980           0 :         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
    1981             : 
    1982             :         if (cpu >= nr_cpu_ids)
    1983             :                 cpu = cpumask_first(hctx->cpumask);
    1984             :         return cpu;
    1985             : }
    1986             : 
    1987             : /*
    1988             :  * It'd be great if the workqueue API had a way to pass
    1989             :  * in a mask and had some smarts for more clever placement.
    1990             :  * For now we just round-robin here, switching for every
    1991             :  * BLK_MQ_CPU_WORK_BATCH queued items.
    1992             :  */
    1993             : static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
    1994             : {
    1995           0 :         bool tried = false;
    1996           0 :         int next_cpu = hctx->next_cpu;
    1997             : 
    1998           0 :         if (hctx->queue->nr_hw_queues == 1)
    1999             :                 return WORK_CPU_UNBOUND;
    2000             : 
    2001           0 :         if (--hctx->next_cpu_batch <= 0) {
    2002             : select_cpu:
    2003           0 :                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
    2004             :                                 cpu_online_mask);
    2005           0 :                 if (next_cpu >= nr_cpu_ids)
    2006           0 :                         next_cpu = blk_mq_first_mapped_cpu(hctx);
    2007           0 :                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
    2008             :         }
    2009             : 
    2010             :         /*
    2011             :          * Do unbound schedule if we can't find a online CPU for this hctx,
    2012             :          * and it should only happen in the path of handling CPU DEAD.
    2013             :          */
    2014           0 :         if (!cpu_online(next_cpu)) {
    2015           0 :                 if (!tried) {
    2016             :                         tried = true;
    2017             :                         goto select_cpu;
    2018             :                 }
    2019             : 
    2020             :                 /*
    2021             :                  * Make sure to re-select CPU next time once after CPUs
    2022             :                  * in hctx->cpumask become online again.
    2023             :                  */
    2024           0 :                 hctx->next_cpu = next_cpu;
    2025           0 :                 hctx->next_cpu_batch = 1;
    2026             :                 return WORK_CPU_UNBOUND;
    2027             :         }
    2028             : 
    2029           0 :         hctx->next_cpu = next_cpu;
    2030             :         return next_cpu;
    2031             : }
    2032             : 
    2033             : /**
    2034             :  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
    2035             :  * @hctx: Pointer to the hardware queue to run.
    2036             :  * @async: If we want to run the queue asynchronously.
    2037             :  * @msecs: Milliseconds of delay to wait before running the queue.
    2038             :  *
    2039             :  * If !@async, try to run the queue now. Else, run the queue asynchronously and
    2040             :  * with a delay of @msecs.
    2041             :  */
    2042           0 : static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
    2043             :                                         unsigned long msecs)
    2044             : {
    2045           0 :         if (unlikely(blk_mq_hctx_stopped(hctx)))
    2046             :                 return;
    2047             : 
    2048           0 :         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
    2049           0 :                 int cpu = get_cpu();
    2050           0 :                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
    2051           0 :                         __blk_mq_run_hw_queue(hctx);
    2052           0 :                         put_cpu();
    2053           0 :                         return;
    2054             :                 }
    2055             : 
    2056           0 :                 put_cpu();
    2057             :         }
    2058             : 
    2059           0 :         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
    2060             :                                     msecs_to_jiffies(msecs));
    2061             : }
    2062             : 
    2063             : /**
    2064             :  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
    2065             :  * @hctx: Pointer to the hardware queue to run.
    2066             :  * @msecs: Milliseconds of delay to wait before running the queue.
    2067             :  *
    2068             :  * Run a hardware queue asynchronously with a delay of @msecs.
    2069             :  */
    2070           0 : void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
    2071             : {
    2072           0 :         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
    2073           0 : }
    2074             : EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
    2075             : 
    2076             : /**
    2077             :  * blk_mq_run_hw_queue - Start to run a hardware queue.
    2078             :  * @hctx: Pointer to the hardware queue to run.
    2079             :  * @async: If we want to run the queue asynchronously.
    2080             :  *
    2081             :  * Check if the request queue is not in a quiesced state and if there are
    2082             :  * pending requests to be sent. If this is true, run the queue to send requests
    2083             :  * to hardware.
    2084             :  */
    2085           0 : void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
    2086             : {
    2087             :         bool need_run;
    2088             : 
    2089             :         /*
    2090             :          * When queue is quiesced, we may be switching io scheduler, or
    2091             :          * updating nr_hw_queues, or other things, and we can't run queue
    2092             :          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
    2093             :          *
    2094             :          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
    2095             :          * quiesced.
    2096             :          */
    2097           0 :         __blk_mq_run_dispatch_ops(hctx->queue, false,
    2098             :                 need_run = !blk_queue_quiesced(hctx->queue) &&
    2099             :                 blk_mq_hctx_has_pending(hctx));
    2100             : 
    2101           0 :         if (need_run)
    2102           0 :                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
    2103           0 : }
    2104             : EXPORT_SYMBOL(blk_mq_run_hw_queue);
    2105             : 
    2106             : /*
    2107             :  * Is the request queue handled by an IO scheduler that does not respect
    2108             :  * hardware queues when dispatching?
    2109             :  */
    2110             : static bool blk_mq_has_sqsched(struct request_queue *q)
    2111             : {
    2112           0 :         struct elevator_queue *e = q->elevator;
    2113             : 
    2114           0 :         if (e && e->type->ops.dispatch_request &&
    2115           0 :             !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
    2116             :                 return true;
    2117             :         return false;
    2118             : }
    2119             : 
    2120             : /*
    2121             :  * Return prefered queue to dispatch from (if any) for non-mq aware IO
    2122             :  * scheduler.
    2123             :  */
    2124           0 : static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
    2125             : {
    2126             :         struct blk_mq_hw_ctx *hctx;
    2127             : 
    2128             :         /*
    2129             :          * If the IO scheduler does not respect hardware queues when
    2130             :          * dispatching, we just don't bother with multiple HW queues and
    2131             :          * dispatch from hctx for the current CPU since running multiple queues
    2132             :          * just causes lock contention inside the scheduler and pointless cache
    2133             :          * bouncing.
    2134             :          */
    2135           0 :         hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
    2136             :                                      raw_smp_processor_id());
    2137           0 :         if (!blk_mq_hctx_stopped(hctx))
    2138             :                 return hctx;
    2139           0 :         return NULL;
    2140             : }
    2141             : 
    2142             : /**
    2143             :  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
    2144             :  * @q: Pointer to the request queue to run.
    2145             :  * @async: If we want to run the queue asynchronously.
    2146             :  */
    2147           0 : void blk_mq_run_hw_queues(struct request_queue *q, bool async)
    2148             : {
    2149             :         struct blk_mq_hw_ctx *hctx, *sq_hctx;
    2150             :         unsigned long i;
    2151             : 
    2152           0 :         sq_hctx = NULL;
    2153           0 :         if (blk_mq_has_sqsched(q))
    2154           0 :                 sq_hctx = blk_mq_get_sq_hctx(q);
    2155           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    2156           0 :                 if (blk_mq_hctx_stopped(hctx))
    2157           0 :                         continue;
    2158             :                 /*
    2159             :                  * Dispatch from this hctx either if there's no hctx preferred
    2160             :                  * by IO scheduler or if it has requests that bypass the
    2161             :                  * scheduler.
    2162             :                  */
    2163           0 :                 if (!sq_hctx || sq_hctx == hctx ||
    2164           0 :                     !list_empty_careful(&hctx->dispatch))
    2165           0 :                         blk_mq_run_hw_queue(hctx, async);
    2166             :         }
    2167           0 : }
    2168             : EXPORT_SYMBOL(blk_mq_run_hw_queues);
    2169             : 
    2170             : /**
    2171             :  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
    2172             :  * @q: Pointer to the request queue to run.
    2173             :  * @msecs: Milliseconds of delay to wait before running the queues.
    2174             :  */
    2175           0 : void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
    2176             : {
    2177             :         struct blk_mq_hw_ctx *hctx, *sq_hctx;
    2178             :         unsigned long i;
    2179             : 
    2180           0 :         sq_hctx = NULL;
    2181           0 :         if (blk_mq_has_sqsched(q))
    2182           0 :                 sq_hctx = blk_mq_get_sq_hctx(q);
    2183           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    2184           0 :                 if (blk_mq_hctx_stopped(hctx))
    2185           0 :                         continue;
    2186             :                 /*
    2187             :                  * If there is already a run_work pending, leave the
    2188             :                  * pending delay untouched. Otherwise, a hctx can stall
    2189             :                  * if another hctx is re-delaying the other's work
    2190             :                  * before the work executes.
    2191             :                  */
    2192           0 :                 if (delayed_work_pending(&hctx->run_work))
    2193           0 :                         continue;
    2194             :                 /*
    2195             :                  * Dispatch from this hctx either if there's no hctx preferred
    2196             :                  * by IO scheduler or if it has requests that bypass the
    2197             :                  * scheduler.
    2198             :                  */
    2199           0 :                 if (!sq_hctx || sq_hctx == hctx ||
    2200           0 :                     !list_empty_careful(&hctx->dispatch))
    2201             :                         blk_mq_delay_run_hw_queue(hctx, msecs);
    2202             :         }
    2203           0 : }
    2204             : EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
    2205             : 
    2206             : /**
    2207             :  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
    2208             :  * @q: request queue.
    2209             :  *
    2210             :  * The caller is responsible for serializing this function against
    2211             :  * blk_mq_{start,stop}_hw_queue().
    2212             :  */
    2213           0 : bool blk_mq_queue_stopped(struct request_queue *q)
    2214             : {
    2215             :         struct blk_mq_hw_ctx *hctx;
    2216             :         unsigned long i;
    2217             : 
    2218           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2219           0 :                 if (blk_mq_hctx_stopped(hctx))
    2220             :                         return true;
    2221             : 
    2222             :         return false;
    2223             : }
    2224             : EXPORT_SYMBOL(blk_mq_queue_stopped);
    2225             : 
    2226             : /*
    2227             :  * This function is often used for pausing .queue_rq() by driver when
    2228             :  * there isn't enough resource or some conditions aren't satisfied, and
    2229             :  * BLK_STS_RESOURCE is usually returned.
    2230             :  *
    2231             :  * We do not guarantee that dispatch can be drained or blocked
    2232             :  * after blk_mq_stop_hw_queue() returns. Please use
    2233             :  * blk_mq_quiesce_queue() for that requirement.
    2234             :  */
    2235           0 : void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
    2236             : {
    2237           0 :         cancel_delayed_work(&hctx->run_work);
    2238             : 
    2239           0 :         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
    2240           0 : }
    2241             : EXPORT_SYMBOL(blk_mq_stop_hw_queue);
    2242             : 
    2243             : /*
    2244             :  * This function is often used for pausing .queue_rq() by driver when
    2245             :  * there isn't enough resource or some conditions aren't satisfied, and
    2246             :  * BLK_STS_RESOURCE is usually returned.
    2247             :  *
    2248             :  * We do not guarantee that dispatch can be drained or blocked
    2249             :  * after blk_mq_stop_hw_queues() returns. Please use
    2250             :  * blk_mq_quiesce_queue() for that requirement.
    2251             :  */
    2252           0 : void blk_mq_stop_hw_queues(struct request_queue *q)
    2253             : {
    2254             :         struct blk_mq_hw_ctx *hctx;
    2255             :         unsigned long i;
    2256             : 
    2257           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2258           0 :                 blk_mq_stop_hw_queue(hctx);
    2259           0 : }
    2260             : EXPORT_SYMBOL(blk_mq_stop_hw_queues);
    2261             : 
    2262           0 : void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
    2263             : {
    2264           0 :         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
    2265             : 
    2266           0 :         blk_mq_run_hw_queue(hctx, false);
    2267           0 : }
    2268             : EXPORT_SYMBOL(blk_mq_start_hw_queue);
    2269             : 
    2270           0 : void blk_mq_start_hw_queues(struct request_queue *q)
    2271             : {
    2272             :         struct blk_mq_hw_ctx *hctx;
    2273             :         unsigned long i;
    2274             : 
    2275           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2276           0 :                 blk_mq_start_hw_queue(hctx);
    2277           0 : }
    2278             : EXPORT_SYMBOL(blk_mq_start_hw_queues);
    2279             : 
    2280           0 : void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
    2281             : {
    2282           0 :         if (!blk_mq_hctx_stopped(hctx))
    2283             :                 return;
    2284             : 
    2285           0 :         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
    2286           0 :         blk_mq_run_hw_queue(hctx, async);
    2287             : }
    2288             : EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
    2289             : 
    2290           0 : void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
    2291             : {
    2292             :         struct blk_mq_hw_ctx *hctx;
    2293             :         unsigned long i;
    2294             : 
    2295           0 :         queue_for_each_hw_ctx(q, hctx, i)
    2296           0 :                 blk_mq_start_stopped_hw_queue(hctx, async);
    2297           0 : }
    2298             : EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
    2299             : 
    2300           0 : static void blk_mq_run_work_fn(struct work_struct *work)
    2301             : {
    2302             :         struct blk_mq_hw_ctx *hctx;
    2303             : 
    2304           0 :         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
    2305             : 
    2306             :         /*
    2307             :          * If we are stopped, don't run the queue.
    2308             :          */
    2309           0 :         if (blk_mq_hctx_stopped(hctx))
    2310             :                 return;
    2311             : 
    2312           0 :         __blk_mq_run_hw_queue(hctx);
    2313             : }
    2314             : 
    2315             : static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
    2316             :                                             struct request *rq,
    2317             :                                             bool at_head)
    2318             : {
    2319           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
    2320           0 :         enum hctx_type type = hctx->type;
    2321             : 
    2322             :         lockdep_assert_held(&ctx->lock);
    2323             : 
    2324           0 :         trace_block_rq_insert(rq);
    2325             : 
    2326           0 :         if (at_head)
    2327           0 :                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
    2328             :         else
    2329           0 :                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
    2330             : }
    2331             : 
    2332           0 : void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
    2333             :                              bool at_head)
    2334             : {
    2335           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
    2336             : 
    2337             :         lockdep_assert_held(&ctx->lock);
    2338             : 
    2339           0 :         __blk_mq_insert_req_list(hctx, rq, at_head);
    2340           0 :         blk_mq_hctx_mark_pending(hctx, ctx);
    2341           0 : }
    2342             : 
    2343             : /**
    2344             :  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
    2345             :  * @rq: Pointer to request to be inserted.
    2346             :  * @at_head: true if the request should be inserted at the head of the list.
    2347             :  * @run_queue: If we should run the hardware queue after inserting the request.
    2348             :  *
    2349             :  * Should only be used carefully, when the caller knows we want to
    2350             :  * bypass a potential IO scheduler on the target device.
    2351             :  */
    2352           0 : void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
    2353             :                                   bool run_queue)
    2354             : {
    2355           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
    2356             : 
    2357           0 :         spin_lock(&hctx->lock);
    2358           0 :         if (at_head)
    2359           0 :                 list_add(&rq->queuelist, &hctx->dispatch);
    2360             :         else
    2361           0 :                 list_add_tail(&rq->queuelist, &hctx->dispatch);
    2362           0 :         spin_unlock(&hctx->lock);
    2363             : 
    2364           0 :         if (run_queue)
    2365           0 :                 blk_mq_run_hw_queue(hctx, false);
    2366           0 : }
    2367             : 
    2368           0 : void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
    2369             :                             struct list_head *list)
    2370             : 
    2371             : {
    2372             :         struct request *rq;
    2373           0 :         enum hctx_type type = hctx->type;
    2374             : 
    2375             :         /*
    2376             :          * preemption doesn't flush plug list, so it's possible ctx->cpu is
    2377             :          * offline now
    2378             :          */
    2379           0 :         list_for_each_entry(rq, list, queuelist) {
    2380           0 :                 BUG_ON(rq->mq_ctx != ctx);
    2381           0 :                 trace_block_rq_insert(rq);
    2382             :         }
    2383             : 
    2384           0 :         spin_lock(&ctx->lock);
    2385           0 :         list_splice_tail_init(list, &ctx->rq_lists[type]);
    2386           0 :         blk_mq_hctx_mark_pending(hctx, ctx);
    2387           0 :         spin_unlock(&ctx->lock);
    2388           0 : }
    2389             : 
    2390             : static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
    2391             :                               bool from_schedule)
    2392             : {
    2393           0 :         if (hctx->queue->mq_ops->commit_rqs) {
    2394           0 :                 trace_block_unplug(hctx->queue, *queued, !from_schedule);
    2395           0 :                 hctx->queue->mq_ops->commit_rqs(hctx);
    2396             :         }
    2397             :         *queued = 0;
    2398             : }
    2399             : 
    2400           0 : static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
    2401             :                 unsigned int nr_segs)
    2402             : {
    2403             :         int err;
    2404             : 
    2405           0 :         if (bio->bi_opf & REQ_RAHEAD)
    2406           0 :                 rq->cmd_flags |= REQ_FAILFAST_MASK;
    2407             : 
    2408           0 :         rq->__sector = bio->bi_iter.bi_sector;
    2409           0 :         blk_rq_bio_prep(rq, bio, nr_segs);
    2410             : 
    2411             :         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
    2412           0 :         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
    2413           0 :         WARN_ON_ONCE(err);
    2414             : 
    2415           0 :         blk_account_io_start(rq);
    2416           0 : }
    2417             : 
    2418           0 : static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
    2419             :                                             struct request *rq, bool last)
    2420             : {
    2421           0 :         struct request_queue *q = rq->q;
    2422           0 :         struct blk_mq_queue_data bd = {
    2423             :                 .rq = rq,
    2424             :                 .last = last,
    2425             :         };
    2426             :         blk_status_t ret;
    2427             : 
    2428             :         /*
    2429             :          * For OK queue, we are done. For error, caller may kill it.
    2430             :          * Any other error (busy), just add it to our list as we
    2431             :          * previously would have done.
    2432             :          */
    2433           0 :         ret = q->mq_ops->queue_rq(hctx, &bd);
    2434           0 :         switch (ret) {
    2435             :         case BLK_STS_OK:
    2436             :                 blk_mq_update_dispatch_busy(hctx, false);
    2437             :                 break;
    2438             :         case BLK_STS_RESOURCE:
    2439             :         case BLK_STS_DEV_RESOURCE:
    2440           0 :                 blk_mq_update_dispatch_busy(hctx, true);
    2441           0 :                 __blk_mq_requeue_request(rq);
    2442           0 :                 break;
    2443             :         default:
    2444             :                 blk_mq_update_dispatch_busy(hctx, false);
    2445             :                 break;
    2446             :         }
    2447             : 
    2448           0 :         return ret;
    2449             : }
    2450             : 
    2451           0 : static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
    2452             :                                                 struct request *rq,
    2453             :                                                 bool bypass_insert, bool last)
    2454             : {
    2455           0 :         struct request_queue *q = rq->q;
    2456           0 :         bool run_queue = true;
    2457             :         int budget_token;
    2458             : 
    2459             :         /*
    2460             :          * RCU or SRCU read lock is needed before checking quiesced flag.
    2461             :          *
    2462             :          * When queue is stopped or quiesced, ignore 'bypass_insert' from
    2463             :          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
    2464             :          * and avoid driver to try to dispatch again.
    2465             :          */
    2466           0 :         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
    2467             :                 run_queue = false;
    2468             :                 bypass_insert = false;
    2469             :                 goto insert;
    2470             :         }
    2471             : 
    2472           0 :         if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
    2473             :                 goto insert;
    2474             : 
    2475           0 :         budget_token = blk_mq_get_dispatch_budget(q);
    2476           0 :         if (budget_token < 0)
    2477             :                 goto insert;
    2478             : 
    2479           0 :         blk_mq_set_rq_budget_token(rq, budget_token);
    2480             : 
    2481           0 :         if (!blk_mq_get_driver_tag(rq)) {
    2482             :                 blk_mq_put_dispatch_budget(q, budget_token);
    2483             :                 goto insert;
    2484             :         }
    2485             : 
    2486           0 :         return __blk_mq_issue_directly(hctx, rq, last);
    2487             : insert:
    2488           0 :         if (bypass_insert)
    2489             :                 return BLK_STS_RESOURCE;
    2490             : 
    2491           0 :         blk_mq_sched_insert_request(rq, false, run_queue, false);
    2492             : 
    2493           0 :         return BLK_STS_OK;
    2494             : }
    2495             : 
    2496             : /**
    2497             :  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
    2498             :  * @hctx: Pointer of the associated hardware queue.
    2499             :  * @rq: Pointer to request to be sent.
    2500             :  *
    2501             :  * If the device has enough resources to accept a new request now, send the
    2502             :  * request directly to device driver. Else, insert at hctx->dispatch queue, so
    2503             :  * we can try send it another time in the future. Requests inserted at this
    2504             :  * queue have higher priority.
    2505             :  */
    2506           0 : static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
    2507             :                 struct request *rq)
    2508             : {
    2509           0 :         blk_status_t ret =
    2510             :                 __blk_mq_try_issue_directly(hctx, rq, false, true);
    2511             : 
    2512           0 :         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
    2513           0 :                 blk_mq_request_bypass_insert(rq, false, true);
    2514           0 :         else if (ret != BLK_STS_OK)
    2515           0 :                 blk_mq_end_request(rq, ret);
    2516           0 : }
    2517             : 
    2518             : static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
    2519             : {
    2520           0 :         return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
    2521             : }
    2522             : 
    2523           0 : static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
    2524             : {
    2525           0 :         struct blk_mq_hw_ctx *hctx = NULL;
    2526             :         struct request *rq;
    2527           0 :         int queued = 0;
    2528           0 :         int errors = 0;
    2529             : 
    2530           0 :         while ((rq = rq_list_pop(&plug->mq_list))) {
    2531           0 :                 bool last = rq_list_empty(plug->mq_list);
    2532             :                 blk_status_t ret;
    2533             : 
    2534           0 :                 if (hctx != rq->mq_hctx) {
    2535           0 :                         if (hctx)
    2536           0 :                                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
    2537           0 :                         hctx = rq->mq_hctx;
    2538             :                 }
    2539             : 
    2540           0 :                 ret = blk_mq_request_issue_directly(rq, last);
    2541           0 :                 switch (ret) {
    2542             :                 case BLK_STS_OK:
    2543             :                         queued++;
    2544             :                         break;
    2545             :                 case BLK_STS_RESOURCE:
    2546             :                 case BLK_STS_DEV_RESOURCE:
    2547           0 :                         blk_mq_request_bypass_insert(rq, false, last);
    2548           0 :                         blk_mq_commit_rqs(hctx, &queued, from_schedule);
    2549             :                         return;
    2550             :                 default:
    2551           0 :                         blk_mq_end_request(rq, ret);
    2552           0 :                         errors++;
    2553           0 :                         break;
    2554             :                 }
    2555             :         }
    2556             : 
    2557             :         /*
    2558             :          * If we didn't flush the entire list, we could have told the driver
    2559             :          * there was more coming, but that turned out to be a lie.
    2560             :          */
    2561           0 :         if (errors)
    2562           0 :                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
    2563             : }
    2564             : 
    2565             : static void __blk_mq_flush_plug_list(struct request_queue *q,
    2566             :                                      struct blk_plug *plug)
    2567             : {
    2568           0 :         if (blk_queue_quiesced(q))
    2569             :                 return;
    2570           0 :         q->mq_ops->queue_rqs(&plug->mq_list);
    2571             : }
    2572             : 
    2573           0 : static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
    2574             : {
    2575           0 :         struct blk_mq_hw_ctx *this_hctx = NULL;
    2576           0 :         struct blk_mq_ctx *this_ctx = NULL;
    2577           0 :         struct request *requeue_list = NULL;
    2578           0 :         unsigned int depth = 0;
    2579           0 :         LIST_HEAD(list);
    2580             : 
    2581             :         do {
    2582           0 :                 struct request *rq = rq_list_pop(&plug->mq_list);
    2583             : 
    2584           0 :                 if (!this_hctx) {
    2585           0 :                         this_hctx = rq->mq_hctx;
    2586           0 :                         this_ctx = rq->mq_ctx;
    2587           0 :                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
    2588           0 :                         rq_list_add(&requeue_list, rq);
    2589           0 :                         continue;
    2590             :                 }
    2591           0 :                 list_add_tail(&rq->queuelist, &list);
    2592           0 :                 depth++;
    2593           0 :         } while (!rq_list_empty(plug->mq_list));
    2594             : 
    2595           0 :         plug->mq_list = requeue_list;
    2596           0 :         trace_block_unplug(this_hctx->queue, depth, !from_sched);
    2597           0 :         blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
    2598           0 : }
    2599             : 
    2600           0 : void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
    2601             : {
    2602             :         struct request *rq;
    2603             : 
    2604           0 :         if (rq_list_empty(plug->mq_list))
    2605             :                 return;
    2606           0 :         plug->rq_count = 0;
    2607             : 
    2608           0 :         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
    2609             :                 struct request_queue *q;
    2610             : 
    2611           0 :                 rq = rq_list_peek(&plug->mq_list);
    2612           0 :                 q = rq->q;
    2613             : 
    2614             :                 /*
    2615             :                  * Peek first request and see if we have a ->queue_rqs() hook.
    2616             :                  * If we do, we can dispatch the whole plug list in one go. We
    2617             :                  * already know at this point that all requests belong to the
    2618             :                  * same queue, caller must ensure that's the case.
    2619             :                  *
    2620             :                  * Since we pass off the full list to the driver at this point,
    2621             :                  * we do not increment the active request count for the queue.
    2622             :                  * Bypass shared tags for now because of that.
    2623             :                  */
    2624           0 :                 if (q->mq_ops->queue_rqs &&
    2625           0 :                     !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
    2626           0 :                         blk_mq_run_dispatch_ops(q,
    2627             :                                 __blk_mq_flush_plug_list(q, plug));
    2628           0 :                         if (rq_list_empty(plug->mq_list))
    2629             :                                 return;
    2630             :                 }
    2631             : 
    2632           0 :                 blk_mq_run_dispatch_ops(q,
    2633             :                                 blk_mq_plug_issue_direct(plug, false));
    2634           0 :                 if (rq_list_empty(plug->mq_list))
    2635             :                         return;
    2636             :         }
    2637             : 
    2638             :         do {
    2639           0 :                 blk_mq_dispatch_plug_list(plug, from_schedule);
    2640           0 :         } while (!rq_list_empty(plug->mq_list));
    2641             : }
    2642             : 
    2643           0 : void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
    2644             :                 struct list_head *list)
    2645             : {
    2646           0 :         int queued = 0;
    2647           0 :         int errors = 0;
    2648             : 
    2649           0 :         while (!list_empty(list)) {
    2650             :                 blk_status_t ret;
    2651           0 :                 struct request *rq = list_first_entry(list, struct request,
    2652             :                                 queuelist);
    2653             : 
    2654           0 :                 list_del_init(&rq->queuelist);
    2655           0 :                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
    2656           0 :                 if (ret != BLK_STS_OK) {
    2657           0 :                         if (ret == BLK_STS_RESOURCE ||
    2658           0 :                                         ret == BLK_STS_DEV_RESOURCE) {
    2659           0 :                                 blk_mq_request_bypass_insert(rq, false,
    2660           0 :                                                         list_empty(list));
    2661           0 :                                 break;
    2662             :                         }
    2663           0 :                         blk_mq_end_request(rq, ret);
    2664           0 :                         errors++;
    2665             :                 } else
    2666           0 :                         queued++;
    2667             :         }
    2668             : 
    2669             :         /*
    2670             :          * If we didn't flush the entire list, we could have told
    2671             :          * the driver there was more coming, but that turned out to
    2672             :          * be a lie.
    2673             :          */
    2674           0 :         if ((!list_empty(list) || errors) &&
    2675           0 :              hctx->queue->mq_ops->commit_rqs && queued)
    2676           0 :                 hctx->queue->mq_ops->commit_rqs(hctx);
    2677           0 : }
    2678             : 
    2679             : /*
    2680             :  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
    2681             :  * queues. This is important for md arrays to benefit from merging
    2682             :  * requests.
    2683             :  */
    2684             : static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
    2685             : {
    2686           0 :         if (plug->multiple_queues)
    2687             :                 return BLK_MAX_REQUEST_COUNT * 2;
    2688             :         return BLK_MAX_REQUEST_COUNT;
    2689             : }
    2690             : 
    2691           0 : static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
    2692             : {
    2693           0 :         struct request *last = rq_list_peek(&plug->mq_list);
    2694             : 
    2695           0 :         if (!plug->rq_count) {
    2696             :                 trace_block_plug(rq->q);
    2697           0 :         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
    2698           0 :                    (!blk_queue_nomerges(rq->q) &&
    2699           0 :                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
    2700           0 :                 blk_mq_flush_plug_list(plug, false);
    2701           0 :                 trace_block_plug(rq->q);
    2702             :         }
    2703             : 
    2704           0 :         if (!plug->multiple_queues && last && last->q != rq->q)
    2705           0 :                 plug->multiple_queues = true;
    2706           0 :         if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
    2707           0 :                 plug->has_elevator = true;
    2708           0 :         rq->rq_next = NULL;
    2709           0 :         rq_list_add(&plug->mq_list, rq);
    2710           0 :         plug->rq_count++;
    2711           0 : }
    2712             : 
    2713           0 : static bool blk_mq_attempt_bio_merge(struct request_queue *q,
    2714             :                                      struct bio *bio, unsigned int nr_segs)
    2715             : {
    2716           0 :         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
    2717           0 :                 if (blk_attempt_plug_merge(q, bio, nr_segs))
    2718             :                         return true;
    2719           0 :                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
    2720             :                         return true;
    2721             :         }
    2722             :         return false;
    2723             : }
    2724             : 
    2725           0 : static struct request *blk_mq_get_new_requests(struct request_queue *q,
    2726             :                                                struct blk_plug *plug,
    2727             :                                                struct bio *bio,
    2728             :                                                unsigned int nsegs)
    2729             : {
    2730           0 :         struct blk_mq_alloc_data data = {
    2731             :                 .q              = q,
    2732             :                 .nr_tags        = 1,
    2733           0 :                 .cmd_flags      = bio->bi_opf,
    2734             :         };
    2735             :         struct request *rq;
    2736             : 
    2737           0 :         if (unlikely(bio_queue_enter(bio)))
    2738             :                 return NULL;
    2739             : 
    2740           0 :         if (blk_mq_attempt_bio_merge(q, bio, nsegs))
    2741             :                 goto queue_exit;
    2742             : 
    2743           0 :         rq_qos_throttle(q, bio);
    2744             : 
    2745           0 :         if (plug) {
    2746           0 :                 data.nr_tags = plug->nr_ios;
    2747           0 :                 plug->nr_ios = 1;
    2748           0 :                 data.cached_rq = &plug->cached_rq;
    2749             :         }
    2750             : 
    2751           0 :         rq = __blk_mq_alloc_requests(&data);
    2752           0 :         if (rq)
    2753             :                 return rq;
    2754           0 :         rq_qos_cleanup(q, bio);
    2755           0 :         if (bio->bi_opf & REQ_NOWAIT)
    2756             :                 bio_wouldblock_error(bio);
    2757             : queue_exit:
    2758           0 :         blk_queue_exit(q);
    2759           0 :         return NULL;
    2760             : }
    2761             : 
    2762           0 : static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
    2763             :                 struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
    2764             : {
    2765             :         struct request *rq;
    2766             : 
    2767           0 :         if (!plug)
    2768             :                 return NULL;
    2769           0 :         rq = rq_list_peek(&plug->cached_rq);
    2770           0 :         if (!rq || rq->q != q)
    2771             :                 return NULL;
    2772             : 
    2773           0 :         if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
    2774           0 :                 *bio = NULL;
    2775           0 :                 return NULL;
    2776             :         }
    2777             : 
    2778           0 :         rq_qos_throttle(q, *bio);
    2779             : 
    2780           0 :         if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
    2781             :                 return NULL;
    2782           0 :         if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
    2783             :                 return NULL;
    2784             : 
    2785           0 :         rq->cmd_flags = (*bio)->bi_opf;
    2786           0 :         plug->cached_rq = rq_list_next(rq);
    2787           0 :         INIT_LIST_HEAD(&rq->queuelist);
    2788           0 :         return rq;
    2789             : }
    2790             : 
    2791             : /**
    2792             :  * blk_mq_submit_bio - Create and send a request to block device.
    2793             :  * @bio: Bio pointer.
    2794             :  *
    2795             :  * Builds up a request structure from @q and @bio and send to the device. The
    2796             :  * request may not be queued directly to hardware if:
    2797             :  * * This request can be merged with another one
    2798             :  * * We want to place request at plug queue for possible future merging
    2799             :  * * There is an IO scheduler active at this queue
    2800             :  *
    2801             :  * It will not queue the request if there is an error with the bio, or at the
    2802             :  * request creation.
    2803             :  */
    2804           0 : void blk_mq_submit_bio(struct bio *bio)
    2805             : {
    2806           0 :         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
    2807           0 :         struct blk_plug *plug = blk_mq_plug(q, bio);
    2808           0 :         const int is_sync = op_is_sync(bio->bi_opf);
    2809             :         struct request *rq;
    2810           0 :         unsigned int nr_segs = 1;
    2811             :         blk_status_t ret;
    2812             : 
    2813           0 :         blk_queue_bounce(q, &bio);
    2814           0 :         if (blk_may_split(q, bio))
    2815           0 :                 __blk_queue_split(q, &bio, &nr_segs);
    2816             : 
    2817           0 :         if (!bio_integrity_prep(bio))
    2818           0 :                 return;
    2819             : 
    2820           0 :         rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
    2821           0 :         if (!rq) {
    2822           0 :                 if (!bio)
    2823             :                         return;
    2824           0 :                 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
    2825           0 :                 if (unlikely(!rq))
    2826             :                         return;
    2827             :         }
    2828             : 
    2829           0 :         trace_block_getrq(bio);
    2830             : 
    2831           0 :         rq_qos_track(q, rq, bio);
    2832             : 
    2833           0 :         blk_mq_bio_to_request(rq, bio, nr_segs);
    2834             : 
    2835           0 :         ret = blk_crypto_init_request(rq);
    2836             :         if (ret != BLK_STS_OK) {
    2837             :                 bio->bi_status = ret;
    2838             :                 bio_endio(bio);
    2839             :                 blk_mq_free_request(rq);
    2840             :                 return;
    2841             :         }
    2842             : 
    2843           0 :         if (op_is_flush(bio->bi_opf)) {
    2844           0 :                 blk_insert_flush(rq);
    2845           0 :                 return;
    2846             :         }
    2847             : 
    2848           0 :         if (plug)
    2849           0 :                 blk_add_rq_to_plug(plug, rq);
    2850           0 :         else if ((rq->rq_flags & RQF_ELV) ||
    2851           0 :                  (rq->mq_hctx->dispatch_busy &&
    2852           0 :                   (q->nr_hw_queues == 1 || !is_sync)))
    2853           0 :                 blk_mq_sched_insert_request(rq, false, true, true);
    2854             :         else
    2855           0 :                 blk_mq_run_dispatch_ops(rq->q,
    2856             :                                 blk_mq_try_issue_directly(rq->mq_hctx, rq));
    2857             : }
    2858             : 
    2859             : #ifdef CONFIG_BLK_MQ_STACKING
    2860             : /**
    2861             :  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
    2862             :  * @rq: the request being queued
    2863             :  */
    2864             : blk_status_t blk_insert_cloned_request(struct request *rq)
    2865             : {
    2866             :         struct request_queue *q = rq->q;
    2867             :         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
    2868             :         blk_status_t ret;
    2869             : 
    2870             :         if (blk_rq_sectors(rq) > max_sectors) {
    2871             :                 /*
    2872             :                  * SCSI device does not have a good way to return if
    2873             :                  * Write Same/Zero is actually supported. If a device rejects
    2874             :                  * a non-read/write command (discard, write same,etc.) the
    2875             :                  * low-level device driver will set the relevant queue limit to
    2876             :                  * 0 to prevent blk-lib from issuing more of the offending
    2877             :                  * operations. Commands queued prior to the queue limit being
    2878             :                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
    2879             :                  * errors being propagated to upper layers.
    2880             :                  */
    2881             :                 if (max_sectors == 0)
    2882             :                         return BLK_STS_NOTSUPP;
    2883             : 
    2884             :                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
    2885             :                         __func__, blk_rq_sectors(rq), max_sectors);
    2886             :                 return BLK_STS_IOERR;
    2887             :         }
    2888             : 
    2889             :         /*
    2890             :          * The queue settings related to segment counting may differ from the
    2891             :          * original queue.
    2892             :          */
    2893             :         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
    2894             :         if (rq->nr_phys_segments > queue_max_segments(q)) {
    2895             :                 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
    2896             :                         __func__, rq->nr_phys_segments, queue_max_segments(q));
    2897             :                 return BLK_STS_IOERR;
    2898             :         }
    2899             : 
    2900             :         if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
    2901             :                 return BLK_STS_IOERR;
    2902             : 
    2903             :         if (blk_crypto_insert_cloned_request(rq))
    2904             :                 return BLK_STS_IOERR;
    2905             : 
    2906             :         blk_account_io_start(rq);
    2907             : 
    2908             :         /*
    2909             :          * Since we have a scheduler attached on the top device,
    2910             :          * bypass a potential scheduler on the bottom device for
    2911             :          * insert.
    2912             :          */
    2913             :         blk_mq_run_dispatch_ops(q,
    2914             :                         ret = blk_mq_request_issue_directly(rq, true));
    2915             :         if (ret)
    2916             :                 blk_account_io_done(rq, ktime_get_ns());
    2917             :         return ret;
    2918             : }
    2919             : EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
    2920             : 
    2921             : /**
    2922             :  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
    2923             :  * @rq: the clone request to be cleaned up
    2924             :  *
    2925             :  * Description:
    2926             :  *     Free all bios in @rq for a cloned request.
    2927             :  */
    2928             : void blk_rq_unprep_clone(struct request *rq)
    2929             : {
    2930             :         struct bio *bio;
    2931             : 
    2932             :         while ((bio = rq->bio) != NULL) {
    2933             :                 rq->bio = bio->bi_next;
    2934             : 
    2935             :                 bio_put(bio);
    2936             :         }
    2937             : }
    2938             : EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
    2939             : 
    2940             : /**
    2941             :  * blk_rq_prep_clone - Helper function to setup clone request
    2942             :  * @rq: the request to be setup
    2943             :  * @rq_src: original request to be cloned
    2944             :  * @bs: bio_set that bios for clone are allocated from
    2945             :  * @gfp_mask: memory allocation mask for bio
    2946             :  * @bio_ctr: setup function to be called for each clone bio.
    2947             :  *           Returns %0 for success, non %0 for failure.
    2948             :  * @data: private data to be passed to @bio_ctr
    2949             :  *
    2950             :  * Description:
    2951             :  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
    2952             :  *     Also, pages which the original bios are pointing to are not copied
    2953             :  *     and the cloned bios just point same pages.
    2954             :  *     So cloned bios must be completed before original bios, which means
    2955             :  *     the caller must complete @rq before @rq_src.
    2956             :  */
    2957             : int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
    2958             :                       struct bio_set *bs, gfp_t gfp_mask,
    2959             :                       int (*bio_ctr)(struct bio *, struct bio *, void *),
    2960             :                       void *data)
    2961             : {
    2962             :         struct bio *bio, *bio_src;
    2963             : 
    2964             :         if (!bs)
    2965             :                 bs = &fs_bio_set;
    2966             : 
    2967             :         __rq_for_each_bio(bio_src, rq_src) {
    2968             :                 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
    2969             :                                       bs);
    2970             :                 if (!bio)
    2971             :                         goto free_and_out;
    2972             : 
    2973             :                 if (bio_ctr && bio_ctr(bio, bio_src, data))
    2974             :                         goto free_and_out;
    2975             : 
    2976             :                 if (rq->bio) {
    2977             :                         rq->biotail->bi_next = bio;
    2978             :                         rq->biotail = bio;
    2979             :                 } else {
    2980             :                         rq->bio = rq->biotail = bio;
    2981             :                 }
    2982             :                 bio = NULL;
    2983             :         }
    2984             : 
    2985             :         /* Copy attributes of the original request to the clone request. */
    2986             :         rq->__sector = blk_rq_pos(rq_src);
    2987             :         rq->__data_len = blk_rq_bytes(rq_src);
    2988             :         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
    2989             :                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
    2990             :                 rq->special_vec = rq_src->special_vec;
    2991             :         }
    2992             :         rq->nr_phys_segments = rq_src->nr_phys_segments;
    2993             :         rq->ioprio = rq_src->ioprio;
    2994             : 
    2995             :         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
    2996             :                 goto free_and_out;
    2997             : 
    2998             :         return 0;
    2999             : 
    3000             : free_and_out:
    3001             :         if (bio)
    3002             :                 bio_put(bio);
    3003             :         blk_rq_unprep_clone(rq);
    3004             : 
    3005             :         return -ENOMEM;
    3006             : }
    3007             : EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
    3008             : #endif /* CONFIG_BLK_MQ_STACKING */
    3009             : 
    3010             : /*
    3011             :  * Steal bios from a request and add them to a bio list.
    3012             :  * The request must not have been partially completed before.
    3013             :  */
    3014           0 : void blk_steal_bios(struct bio_list *list, struct request *rq)
    3015             : {
    3016           0 :         if (rq->bio) {
    3017           0 :                 if (list->tail)
    3018           0 :                         list->tail->bi_next = rq->bio;
    3019             :                 else
    3020           0 :                         list->head = rq->bio;
    3021           0 :                 list->tail = rq->biotail;
    3022             : 
    3023           0 :                 rq->bio = NULL;
    3024           0 :                 rq->biotail = NULL;
    3025             :         }
    3026             : 
    3027           0 :         rq->__data_len = 0;
    3028           0 : }
    3029             : EXPORT_SYMBOL_GPL(blk_steal_bios);
    3030             : 
    3031             : static size_t order_to_size(unsigned int order)
    3032             : {
    3033           0 :         return (size_t)PAGE_SIZE << order;
    3034             : }
    3035             : 
    3036             : /* called before freeing request pool in @tags */
    3037           0 : static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
    3038             :                                     struct blk_mq_tags *tags)
    3039             : {
    3040             :         struct page *page;
    3041             :         unsigned long flags;
    3042             : 
    3043             :         /* There is no need to clear a driver tags own mapping */
    3044           0 :         if (drv_tags == tags)
    3045             :                 return;
    3046             : 
    3047           0 :         list_for_each_entry(page, &tags->page_list, lru) {
    3048           0 :                 unsigned long start = (unsigned long)page_address(page);
    3049           0 :                 unsigned long end = start + order_to_size(page->private);
    3050             :                 int i;
    3051             : 
    3052           0 :                 for (i = 0; i < drv_tags->nr_tags; i++) {
    3053           0 :                         struct request *rq = drv_tags->rqs[i];
    3054           0 :                         unsigned long rq_addr = (unsigned long)rq;
    3055             : 
    3056           0 :                         if (rq_addr >= start && rq_addr < end) {
    3057           0 :                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
    3058           0 :                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
    3059             :                         }
    3060             :                 }
    3061             :         }
    3062             : 
    3063             :         /*
    3064             :          * Wait until all pending iteration is done.
    3065             :          *
    3066             :          * Request reference is cleared and it is guaranteed to be observed
    3067             :          * after the ->lock is released.
    3068             :          */
    3069           0 :         spin_lock_irqsave(&drv_tags->lock, flags);
    3070           0 :         spin_unlock_irqrestore(&drv_tags->lock, flags);
    3071             : }
    3072             : 
    3073           0 : void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
    3074             :                      unsigned int hctx_idx)
    3075             : {
    3076             :         struct blk_mq_tags *drv_tags;
    3077             :         struct page *page;
    3078             : 
    3079           0 :         if (list_empty(&tags->page_list))
    3080             :                 return;
    3081             : 
    3082           0 :         if (blk_mq_is_shared_tags(set->flags))
    3083           0 :                 drv_tags = set->shared_tags;
    3084             :         else
    3085           0 :                 drv_tags = set->tags[hctx_idx];
    3086             : 
    3087           0 :         if (tags->static_rqs && set->ops->exit_request) {
    3088             :                 int i;
    3089             : 
    3090           0 :                 for (i = 0; i < tags->nr_tags; i++) {
    3091           0 :                         struct request *rq = tags->static_rqs[i];
    3092             : 
    3093           0 :                         if (!rq)
    3094           0 :                                 continue;
    3095           0 :                         set->ops->exit_request(set, rq, hctx_idx);
    3096           0 :                         tags->static_rqs[i] = NULL;
    3097             :                 }
    3098             :         }
    3099             : 
    3100           0 :         blk_mq_clear_rq_mapping(drv_tags, tags);
    3101             : 
    3102           0 :         while (!list_empty(&tags->page_list)) {
    3103           0 :                 page = list_first_entry(&tags->page_list, struct page, lru);
    3104           0 :                 list_del_init(&page->lru);
    3105             :                 /*
    3106             :                  * Remove kmemleak object previously allocated in
    3107             :                  * blk_mq_alloc_rqs().
    3108             :                  */
    3109           0 :                 kmemleak_free(page_address(page));
    3110           0 :                 __free_pages(page, page->private);
    3111             :         }
    3112             : }
    3113             : 
    3114           0 : void blk_mq_free_rq_map(struct blk_mq_tags *tags)
    3115             : {
    3116           0 :         kfree(tags->rqs);
    3117           0 :         tags->rqs = NULL;
    3118           0 :         kfree(tags->static_rqs);
    3119           0 :         tags->static_rqs = NULL;
    3120             : 
    3121           0 :         blk_mq_free_tags(tags);
    3122           0 : }
    3123             : 
    3124             : static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
    3125             :                 unsigned int hctx_idx)
    3126             : {
    3127             :         int i;
    3128             : 
    3129           0 :         for (i = 0; i < set->nr_maps; i++) {
    3130           0 :                 unsigned int start = set->map[i].queue_offset;
    3131           0 :                 unsigned int end = start + set->map[i].nr_queues;
    3132             : 
    3133           0 :                 if (hctx_idx >= start && hctx_idx < end)
    3134             :                         break;
    3135             :         }
    3136             : 
    3137           0 :         if (i >= set->nr_maps)
    3138           0 :                 i = HCTX_TYPE_DEFAULT;
    3139             : 
    3140           0 :         return i;
    3141             : }
    3142             : 
    3143           0 : static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
    3144             :                 unsigned int hctx_idx)
    3145             : {
    3146           0 :         enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
    3147             : 
    3148           0 :         return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
    3149             : }
    3150             : 
    3151           0 : static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
    3152             :                                                unsigned int hctx_idx,
    3153             :                                                unsigned int nr_tags,
    3154             :                                                unsigned int reserved_tags)
    3155             : {
    3156           0 :         int node = blk_mq_get_hctx_node(set, hctx_idx);
    3157             :         struct blk_mq_tags *tags;
    3158             : 
    3159           0 :         if (node == NUMA_NO_NODE)
    3160           0 :                 node = set->numa_node;
    3161             : 
    3162           0 :         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
    3163           0 :                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
    3164           0 :         if (!tags)
    3165             :                 return NULL;
    3166             : 
    3167           0 :         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
    3168             :                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
    3169             :                                  node);
    3170           0 :         if (!tags->rqs) {
    3171           0 :                 blk_mq_free_tags(tags);
    3172           0 :                 return NULL;
    3173             :         }
    3174             : 
    3175           0 :         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
    3176             :                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
    3177             :                                         node);
    3178           0 :         if (!tags->static_rqs) {
    3179           0 :                 kfree(tags->rqs);
    3180           0 :                 blk_mq_free_tags(tags);
    3181           0 :                 return NULL;
    3182             :         }
    3183             : 
    3184             :         return tags;
    3185             : }
    3186             : 
    3187             : static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
    3188             :                                unsigned int hctx_idx, int node)
    3189             : {
    3190             :         int ret;
    3191             : 
    3192           0 :         if (set->ops->init_request) {
    3193           0 :                 ret = set->ops->init_request(set, rq, hctx_idx, node);
    3194           0 :                 if (ret)
    3195             :                         return ret;
    3196             :         }
    3197             : 
    3198           0 :         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
    3199             :         return 0;
    3200             : }
    3201             : 
    3202           0 : static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
    3203             :                             struct blk_mq_tags *tags,
    3204             :                             unsigned int hctx_idx, unsigned int depth)
    3205             : {
    3206           0 :         unsigned int i, j, entries_per_page, max_order = 4;
    3207           0 :         int node = blk_mq_get_hctx_node(set, hctx_idx);
    3208             :         size_t rq_size, left;
    3209             : 
    3210           0 :         if (node == NUMA_NO_NODE)
    3211           0 :                 node = set->numa_node;
    3212             : 
    3213           0 :         INIT_LIST_HEAD(&tags->page_list);
    3214             : 
    3215             :         /*
    3216             :          * rq_size is the size of the request plus driver payload, rounded
    3217             :          * to the cacheline size
    3218             :          */
    3219           0 :         rq_size = round_up(sizeof(struct request) + set->cmd_size,
    3220             :                                 cache_line_size());
    3221           0 :         left = rq_size * depth;
    3222             : 
    3223           0 :         for (i = 0; i < depth; ) {
    3224             :                 int this_order = max_order;
    3225             :                 struct page *page;
    3226             :                 int to_do;
    3227             :                 void *p;
    3228             : 
    3229           0 :                 while (this_order && left < order_to_size(this_order - 1))
    3230             :                         this_order--;
    3231             : 
    3232             :                 do {
    3233           0 :                         page = alloc_pages_node(node,
    3234             :                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
    3235             :                                 this_order);
    3236           0 :                         if (page)
    3237             :                                 break;
    3238           0 :                         if (!this_order--)
    3239             :                                 break;
    3240           0 :                         if (order_to_size(this_order) < rq_size)
    3241             :                                 break;
    3242             :                 } while (1);
    3243             : 
    3244           0 :                 if (!page)
    3245             :                         goto fail;
    3246             : 
    3247           0 :                 page->private = this_order;
    3248           0 :                 list_add_tail(&page->lru, &tags->page_list);
    3249             : 
    3250           0 :                 p = page_address(page);
    3251             :                 /*
    3252             :                  * Allow kmemleak to scan these pages as they contain pointers
    3253             :                  * to additional allocations like via ops->init_request().
    3254             :                  */
    3255           0 :                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
    3256           0 :                 entries_per_page = order_to_size(this_order) / rq_size;
    3257           0 :                 to_do = min(entries_per_page, depth - i);
    3258           0 :                 left -= to_do * rq_size;
    3259           0 :                 for (j = 0; j < to_do; j++) {
    3260           0 :                         struct request *rq = p;
    3261             : 
    3262           0 :                         tags->static_rqs[i] = rq;
    3263           0 :                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
    3264           0 :                                 tags->static_rqs[i] = NULL;
    3265           0 :                                 goto fail;
    3266             :                         }
    3267             : 
    3268           0 :                         p += rq_size;
    3269           0 :                         i++;
    3270             :                 }
    3271             :         }
    3272             :         return 0;
    3273             : 
    3274             : fail:
    3275           0 :         blk_mq_free_rqs(set, tags, hctx_idx);
    3276           0 :         return -ENOMEM;
    3277             : }
    3278             : 
    3279             : struct rq_iter_data {
    3280             :         struct blk_mq_hw_ctx *hctx;
    3281             :         bool has_rq;
    3282             : };
    3283             : 
    3284           0 : static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
    3285             : {
    3286           0 :         struct rq_iter_data *iter_data = data;
    3287             : 
    3288           0 :         if (rq->mq_hctx != iter_data->hctx)
    3289             :                 return true;
    3290           0 :         iter_data->has_rq = true;
    3291           0 :         return false;
    3292             : }
    3293             : 
    3294           0 : static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
    3295             : {
    3296           0 :         struct blk_mq_tags *tags = hctx->sched_tags ?
    3297           0 :                         hctx->sched_tags : hctx->tags;
    3298           0 :         struct rq_iter_data data = {
    3299             :                 .hctx   = hctx,
    3300             :         };
    3301             : 
    3302           0 :         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
    3303           0 :         return data.has_rq;
    3304             : }
    3305             : 
    3306             : static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
    3307             :                 struct blk_mq_hw_ctx *hctx)
    3308             : {
    3309           0 :         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
    3310             :                 return false;
    3311           0 :         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
    3312             :                 return false;
    3313             :         return true;
    3314             : }
    3315             : 
    3316           0 : static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
    3317             : {
    3318           0 :         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
    3319             :                         struct blk_mq_hw_ctx, cpuhp_online);
    3320             : 
    3321           0 :         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
    3322           0 :             !blk_mq_last_cpu_in_hctx(cpu, hctx))
    3323             :                 return 0;
    3324             : 
    3325             :         /*
    3326             :          * Prevent new request from being allocated on the current hctx.
    3327             :          *
    3328             :          * The smp_mb__after_atomic() Pairs with the implied barrier in
    3329             :          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
    3330             :          * seen once we return from the tag allocator.
    3331             :          */
    3332           0 :         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
    3333           0 :         smp_mb__after_atomic();
    3334             : 
    3335             :         /*
    3336             :          * Try to grab a reference to the queue and wait for any outstanding
    3337             :          * requests.  If we could not grab a reference the queue has been
    3338             :          * frozen and there are no requests.
    3339             :          */
    3340           0 :         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
    3341           0 :                 while (blk_mq_hctx_has_requests(hctx))
    3342           0 :                         msleep(5);
    3343           0 :                 percpu_ref_put(&hctx->queue->q_usage_counter);
    3344             :         }
    3345             : 
    3346             :         return 0;
    3347             : }
    3348             : 
    3349           0 : static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
    3350             : {
    3351           0 :         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
    3352             :                         struct blk_mq_hw_ctx, cpuhp_online);
    3353             : 
    3354           0 :         if (cpumask_test_cpu(cpu, hctx->cpumask))
    3355           0 :                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
    3356           0 :         return 0;
    3357             : }
    3358             : 
    3359             : /*
    3360             :  * 'cpu' is going away. splice any existing rq_list entries from this
    3361             :  * software queue to the hw queue dispatch list, and ensure that it
    3362             :  * gets run.
    3363             :  */
    3364           0 : static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
    3365             : {
    3366             :         struct blk_mq_hw_ctx *hctx;
    3367             :         struct blk_mq_ctx *ctx;
    3368           0 :         LIST_HEAD(tmp);
    3369             :         enum hctx_type type;
    3370             : 
    3371           0 :         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
    3372           0 :         if (!cpumask_test_cpu(cpu, hctx->cpumask))
    3373             :                 return 0;
    3374             : 
    3375           0 :         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
    3376           0 :         type = hctx->type;
    3377             : 
    3378           0 :         spin_lock(&ctx->lock);
    3379           0 :         if (!list_empty(&ctx->rq_lists[type])) {
    3380           0 :                 list_splice_init(&ctx->rq_lists[type], &tmp);
    3381             :                 blk_mq_hctx_clear_pending(hctx, ctx);
    3382             :         }
    3383           0 :         spin_unlock(&ctx->lock);
    3384             : 
    3385           0 :         if (list_empty(&tmp))
    3386             :                 return 0;
    3387             : 
    3388           0 :         spin_lock(&hctx->lock);
    3389           0 :         list_splice_tail_init(&tmp, &hctx->dispatch);
    3390           0 :         spin_unlock(&hctx->lock);
    3391             : 
    3392           0 :         blk_mq_run_hw_queue(hctx, true);
    3393           0 :         return 0;
    3394             : }
    3395             : 
    3396           0 : static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
    3397             : {
    3398           0 :         if (!(hctx->flags & BLK_MQ_F_STACKING))
    3399           0 :                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
    3400             :                                                     &hctx->cpuhp_online);
    3401           0 :         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
    3402             :                                             &hctx->cpuhp_dead);
    3403           0 : }
    3404             : 
    3405             : /*
    3406             :  * Before freeing hw queue, clearing the flush request reference in
    3407             :  * tags->rqs[] for avoiding potential UAF.
    3408             :  */
    3409           0 : static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
    3410             :                 unsigned int queue_depth, struct request *flush_rq)
    3411             : {
    3412             :         int i;
    3413             :         unsigned long flags;
    3414             : 
    3415             :         /* The hw queue may not be mapped yet */
    3416           0 :         if (!tags)
    3417             :                 return;
    3418             : 
    3419           0 :         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
    3420             : 
    3421           0 :         for (i = 0; i < queue_depth; i++)
    3422           0 :                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
    3423             : 
    3424             :         /*
    3425             :          * Wait until all pending iteration is done.
    3426             :          *
    3427             :          * Request reference is cleared and it is guaranteed to be observed
    3428             :          * after the ->lock is released.
    3429             :          */
    3430           0 :         spin_lock_irqsave(&tags->lock, flags);
    3431           0 :         spin_unlock_irqrestore(&tags->lock, flags);
    3432             : }
    3433             : 
    3434             : /* hctx->ctxs will be freed in queue's release handler */
    3435           0 : static void blk_mq_exit_hctx(struct request_queue *q,
    3436             :                 struct blk_mq_tag_set *set,
    3437             :                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
    3438             : {
    3439           0 :         struct request *flush_rq = hctx->fq->flush_rq;
    3440             : 
    3441           0 :         if (blk_mq_hw_queue_mapped(hctx))
    3442             :                 blk_mq_tag_idle(hctx);
    3443             : 
    3444           0 :         blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
    3445             :                         set->queue_depth, flush_rq);
    3446           0 :         if (set->ops->exit_request)
    3447           0 :                 set->ops->exit_request(set, flush_rq, hctx_idx);
    3448             : 
    3449           0 :         if (set->ops->exit_hctx)
    3450           0 :                 set->ops->exit_hctx(hctx, hctx_idx);
    3451             : 
    3452           0 :         blk_mq_remove_cpuhp(hctx);
    3453             : 
    3454           0 :         xa_erase(&q->hctx_table, hctx_idx);
    3455             : 
    3456           0 :         spin_lock(&q->unused_hctx_lock);
    3457           0 :         list_add(&hctx->hctx_list, &q->unused_hctx_list);
    3458           0 :         spin_unlock(&q->unused_hctx_lock);
    3459           0 : }
    3460             : 
    3461           0 : static void blk_mq_exit_hw_queues(struct request_queue *q,
    3462             :                 struct blk_mq_tag_set *set, int nr_queue)
    3463             : {
    3464             :         struct blk_mq_hw_ctx *hctx;
    3465             :         unsigned long i;
    3466             : 
    3467           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3468           0 :                 if (i == nr_queue)
    3469             :                         break;
    3470           0 :                 blk_mq_exit_hctx(q, set, hctx, i);
    3471             :         }
    3472           0 : }
    3473             : 
    3474           0 : static int blk_mq_init_hctx(struct request_queue *q,
    3475             :                 struct blk_mq_tag_set *set,
    3476             :                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
    3477             : {
    3478           0 :         hctx->queue_num = hctx_idx;
    3479             : 
    3480           0 :         if (!(hctx->flags & BLK_MQ_F_STACKING))
    3481           0 :                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
    3482             :                                 &hctx->cpuhp_online);
    3483           0 :         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
    3484             : 
    3485           0 :         hctx->tags = set->tags[hctx_idx];
    3486             : 
    3487           0 :         if (set->ops->init_hctx &&
    3488           0 :             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
    3489             :                 goto unregister_cpu_notifier;
    3490             : 
    3491           0 :         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
    3492           0 :                                 hctx->numa_node))
    3493             :                 goto exit_hctx;
    3494             : 
    3495           0 :         if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
    3496             :                 goto exit_flush_rq;
    3497             : 
    3498             :         return 0;
    3499             : 
    3500             :  exit_flush_rq:
    3501           0 :         if (set->ops->exit_request)
    3502           0 :                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
    3503             :  exit_hctx:
    3504           0 :         if (set->ops->exit_hctx)
    3505           0 :                 set->ops->exit_hctx(hctx, hctx_idx);
    3506             :  unregister_cpu_notifier:
    3507           0 :         blk_mq_remove_cpuhp(hctx);
    3508           0 :         return -1;
    3509             : }
    3510             : 
    3511             : static struct blk_mq_hw_ctx *
    3512           0 : blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
    3513             :                 int node)
    3514             : {
    3515             :         struct blk_mq_hw_ctx *hctx;
    3516           0 :         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
    3517             : 
    3518           0 :         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
    3519           0 :         if (!hctx)
    3520             :                 goto fail_alloc_hctx;
    3521             : 
    3522           0 :         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
    3523             :                 goto free_hctx;
    3524             : 
    3525           0 :         atomic_set(&hctx->nr_active, 0);
    3526           0 :         if (node == NUMA_NO_NODE)
    3527           0 :                 node = set->numa_node;
    3528           0 :         hctx->numa_node = node;
    3529             : 
    3530           0 :         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
    3531           0 :         spin_lock_init(&hctx->lock);
    3532           0 :         INIT_LIST_HEAD(&hctx->dispatch);
    3533           0 :         hctx->queue = q;
    3534           0 :         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
    3535             : 
    3536           0 :         INIT_LIST_HEAD(&hctx->hctx_list);
    3537             : 
    3538             :         /*
    3539             :          * Allocate space for all possible cpus to avoid allocation at
    3540             :          * runtime
    3541             :          */
    3542           0 :         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
    3543             :                         gfp, node);
    3544           0 :         if (!hctx->ctxs)
    3545             :                 goto free_cpumask;
    3546             : 
    3547           0 :         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
    3548             :                                 gfp, node, false, false))
    3549             :                 goto free_ctxs;
    3550           0 :         hctx->nr_ctx = 0;
    3551             : 
    3552           0 :         spin_lock_init(&hctx->dispatch_wait_lock);
    3553           0 :         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
    3554           0 :         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
    3555             : 
    3556           0 :         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
    3557           0 :         if (!hctx->fq)
    3558             :                 goto free_bitmap;
    3559             : 
    3560           0 :         blk_mq_hctx_kobj_init(hctx);
    3561             : 
    3562           0 :         return hctx;
    3563             : 
    3564             :  free_bitmap:
    3565           0 :         sbitmap_free(&hctx->ctx_map);
    3566             :  free_ctxs:
    3567           0 :         kfree(hctx->ctxs);
    3568             :  free_cpumask:
    3569           0 :         free_cpumask_var(hctx->cpumask);
    3570             :  free_hctx:
    3571           0 :         kfree(hctx);
    3572             :  fail_alloc_hctx:
    3573             :         return NULL;
    3574             : }
    3575             : 
    3576           0 : static void blk_mq_init_cpu_queues(struct request_queue *q,
    3577             :                                    unsigned int nr_hw_queues)
    3578             : {
    3579           0 :         struct blk_mq_tag_set *set = q->tag_set;
    3580             :         unsigned int i, j;
    3581             : 
    3582           0 :         for_each_possible_cpu(i) {
    3583           0 :                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
    3584             :                 struct blk_mq_hw_ctx *hctx;
    3585             :                 int k;
    3586             : 
    3587           0 :                 __ctx->cpu = i;
    3588           0 :                 spin_lock_init(&__ctx->lock);
    3589           0 :                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
    3590           0 :                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
    3591             : 
    3592           0 :                 __ctx->queue = q;
    3593             : 
    3594             :                 /*
    3595             :                  * Set local node, IFF we have more than one hw queue. If
    3596             :                  * not, we remain on the home node of the device
    3597             :                  */
    3598           0 :                 for (j = 0; j < set->nr_maps; j++) {
    3599           0 :                         hctx = blk_mq_map_queue_type(q, j, i);
    3600           0 :                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
    3601           0 :                                 hctx->numa_node = cpu_to_node(i);
    3602             :                 }
    3603             :         }
    3604           0 : }
    3605             : 
    3606           0 : struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
    3607             :                                              unsigned int hctx_idx,
    3608             :                                              unsigned int depth)
    3609             : {
    3610             :         struct blk_mq_tags *tags;
    3611             :         int ret;
    3612             : 
    3613           0 :         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
    3614           0 :         if (!tags)
    3615             :                 return NULL;
    3616             : 
    3617           0 :         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
    3618           0 :         if (ret) {
    3619           0 :                 blk_mq_free_rq_map(tags);
    3620           0 :                 return NULL;
    3621             :         }
    3622             : 
    3623             :         return tags;
    3624             : }
    3625             : 
    3626           0 : static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
    3627             :                                        int hctx_idx)
    3628             : {
    3629           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    3630           0 :                 set->tags[hctx_idx] = set->shared_tags;
    3631             : 
    3632           0 :                 return true;
    3633             :         }
    3634             : 
    3635           0 :         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
    3636             :                                                        set->queue_depth);
    3637             : 
    3638           0 :         return set->tags[hctx_idx];
    3639             : }
    3640             : 
    3641           0 : void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
    3642             :                              struct blk_mq_tags *tags,
    3643             :                              unsigned int hctx_idx)
    3644             : {
    3645           0 :         if (tags) {
    3646           0 :                 blk_mq_free_rqs(set, tags, hctx_idx);
    3647           0 :                 blk_mq_free_rq_map(tags);
    3648             :         }
    3649           0 : }
    3650             : 
    3651           0 : static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
    3652             :                                       unsigned int hctx_idx)
    3653             : {
    3654           0 :         if (!blk_mq_is_shared_tags(set->flags))
    3655           0 :                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
    3656             : 
    3657           0 :         set->tags[hctx_idx] = NULL;
    3658           0 : }
    3659             : 
    3660           0 : static void blk_mq_map_swqueue(struct request_queue *q)
    3661             : {
    3662             :         unsigned int j, hctx_idx;
    3663             :         unsigned long i;
    3664             :         struct blk_mq_hw_ctx *hctx;
    3665             :         struct blk_mq_ctx *ctx;
    3666           0 :         struct blk_mq_tag_set *set = q->tag_set;
    3667             : 
    3668           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3669           0 :                 cpumask_clear(hctx->cpumask);
    3670           0 :                 hctx->nr_ctx = 0;
    3671           0 :                 hctx->dispatch_from = NULL;
    3672             :         }
    3673             : 
    3674             :         /*
    3675             :          * Map software to hardware queues.
    3676             :          *
    3677             :          * If the cpu isn't present, the cpu is mapped to first hctx.
    3678             :          */
    3679           0 :         for_each_possible_cpu(i) {
    3680             : 
    3681           0 :                 ctx = per_cpu_ptr(q->queue_ctx, i);
    3682           0 :                 for (j = 0; j < set->nr_maps; j++) {
    3683           0 :                         if (!set->map[j].nr_queues) {
    3684           0 :                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
    3685             :                                                 HCTX_TYPE_DEFAULT, i);
    3686           0 :                                 continue;
    3687             :                         }
    3688           0 :                         hctx_idx = set->map[j].mq_map[i];
    3689             :                         /* unmapped hw queue can be remapped after CPU topo changed */
    3690           0 :                         if (!set->tags[hctx_idx] &&
    3691           0 :                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
    3692             :                                 /*
    3693             :                                  * If tags initialization fail for some hctx,
    3694             :                                  * that hctx won't be brought online.  In this
    3695             :                                  * case, remap the current ctx to hctx[0] which
    3696             :                                  * is guaranteed to always have tags allocated
    3697             :                                  */
    3698           0 :                                 set->map[j].mq_map[i] = 0;
    3699             :                         }
    3700             : 
    3701           0 :                         hctx = blk_mq_map_queue_type(q, j, i);
    3702           0 :                         ctx->hctxs[j] = hctx;
    3703             :                         /*
    3704             :                          * If the CPU is already set in the mask, then we've
    3705             :                          * mapped this one already. This can happen if
    3706             :                          * devices share queues across queue maps.
    3707             :                          */
    3708           0 :                         if (cpumask_test_cpu(i, hctx->cpumask))
    3709           0 :                                 continue;
    3710             : 
    3711           0 :                         cpumask_set_cpu(i, hctx->cpumask);
    3712           0 :                         hctx->type = j;
    3713           0 :                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
    3714           0 :                         hctx->ctxs[hctx->nr_ctx++] = ctx;
    3715             : 
    3716             :                         /*
    3717             :                          * If the nr_ctx type overflows, we have exceeded the
    3718             :                          * amount of sw queues we can support.
    3719             :                          */
    3720           0 :                         BUG_ON(!hctx->nr_ctx);
    3721             :                 }
    3722             : 
    3723           0 :                 for (; j < HCTX_MAX_TYPES; j++)
    3724           0 :                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
    3725             :                                         HCTX_TYPE_DEFAULT, i);
    3726             :         }
    3727             : 
    3728           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3729             :                 /*
    3730             :                  * If no software queues are mapped to this hardware queue,
    3731             :                  * disable it and free the request entries.
    3732             :                  */
    3733           0 :                 if (!hctx->nr_ctx) {
    3734             :                         /* Never unmap queue 0.  We need it as a
    3735             :                          * fallback in case of a new remap fails
    3736             :                          * allocation
    3737             :                          */
    3738           0 :                         if (i)
    3739           0 :                                 __blk_mq_free_map_and_rqs(set, i);
    3740             : 
    3741           0 :                         hctx->tags = NULL;
    3742           0 :                         continue;
    3743             :                 }
    3744             : 
    3745           0 :                 hctx->tags = set->tags[i];
    3746           0 :                 WARN_ON(!hctx->tags);
    3747             : 
    3748             :                 /*
    3749             :                  * Set the map size to the number of mapped software queues.
    3750             :                  * This is more accurate and more efficient than looping
    3751             :                  * over all possibly mapped software queues.
    3752             :                  */
    3753           0 :                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
    3754             : 
    3755             :                 /*
    3756             :                  * Initialize batch roundrobin counts
    3757             :                  */
    3758           0 :                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
    3759           0 :                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
    3760             :         }
    3761           0 : }
    3762             : 
    3763             : /*
    3764             :  * Caller needs to ensure that we're either frozen/quiesced, or that
    3765             :  * the queue isn't live yet.
    3766             :  */
    3767           0 : static void queue_set_hctx_shared(struct request_queue *q, bool shared)
    3768             : {
    3769             :         struct blk_mq_hw_ctx *hctx;
    3770             :         unsigned long i;
    3771             : 
    3772           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    3773           0 :                 if (shared) {
    3774           0 :                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
    3775             :                 } else {
    3776           0 :                         blk_mq_tag_idle(hctx);
    3777           0 :                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
    3778             :                 }
    3779             :         }
    3780           0 : }
    3781             : 
    3782           0 : static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
    3783             :                                          bool shared)
    3784             : {
    3785             :         struct request_queue *q;
    3786             : 
    3787             :         lockdep_assert_held(&set->tag_list_lock);
    3788             : 
    3789           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    3790           0 :                 blk_mq_freeze_queue(q);
    3791           0 :                 queue_set_hctx_shared(q, shared);
    3792           0 :                 blk_mq_unfreeze_queue(q);
    3793             :         }
    3794           0 : }
    3795             : 
    3796           0 : static void blk_mq_del_queue_tag_set(struct request_queue *q)
    3797             : {
    3798           0 :         struct blk_mq_tag_set *set = q->tag_set;
    3799             : 
    3800           0 :         mutex_lock(&set->tag_list_lock);
    3801           0 :         list_del(&q->tag_set_list);
    3802           0 :         if (list_is_singular(&set->tag_list)) {
    3803             :                 /* just transitioned to unshared */
    3804           0 :                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
    3805             :                 /* update existing queue */
    3806           0 :                 blk_mq_update_tag_set_shared(set, false);
    3807             :         }
    3808           0 :         mutex_unlock(&set->tag_list_lock);
    3809           0 :         INIT_LIST_HEAD(&q->tag_set_list);
    3810           0 : }
    3811             : 
    3812           0 : static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
    3813             :                                      struct request_queue *q)
    3814             : {
    3815           0 :         mutex_lock(&set->tag_list_lock);
    3816             : 
    3817             :         /*
    3818             :          * Check to see if we're transitioning to shared (from 1 to 2 queues).
    3819             :          */
    3820           0 :         if (!list_empty(&set->tag_list) &&
    3821           0 :             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
    3822           0 :                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
    3823             :                 /* update existing queue */
    3824           0 :                 blk_mq_update_tag_set_shared(set, true);
    3825             :         }
    3826           0 :         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
    3827           0 :                 queue_set_hctx_shared(q, true);
    3828           0 :         list_add_tail(&q->tag_set_list, &set->tag_list);
    3829             : 
    3830           0 :         mutex_unlock(&set->tag_list_lock);
    3831           0 : }
    3832             : 
    3833             : /* All allocations will be freed in release handler of q->mq_kobj */
    3834           0 : static int blk_mq_alloc_ctxs(struct request_queue *q)
    3835             : {
    3836             :         struct blk_mq_ctxs *ctxs;
    3837             :         int cpu;
    3838             : 
    3839           0 :         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
    3840           0 :         if (!ctxs)
    3841             :                 return -ENOMEM;
    3842             : 
    3843           0 :         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
    3844           0 :         if (!ctxs->queue_ctx)
    3845             :                 goto fail;
    3846             : 
    3847           0 :         for_each_possible_cpu(cpu) {
    3848           0 :                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
    3849           0 :                 ctx->ctxs = ctxs;
    3850             :         }
    3851             : 
    3852           0 :         q->mq_kobj = &ctxs->kobj;
    3853           0 :         q->queue_ctx = ctxs->queue_ctx;
    3854             : 
    3855           0 :         return 0;
    3856             :  fail:
    3857           0 :         kfree(ctxs);
    3858           0 :         return -ENOMEM;
    3859             : }
    3860             : 
    3861             : /*
    3862             :  * It is the actual release handler for mq, but we do it from
    3863             :  * request queue's release handler for avoiding use-after-free
    3864             :  * and headache because q->mq_kobj shouldn't have been introduced,
    3865             :  * but we can't group ctx/kctx kobj without it.
    3866             :  */
    3867           0 : void blk_mq_release(struct request_queue *q)
    3868             : {
    3869             :         struct blk_mq_hw_ctx *hctx, *next;
    3870             :         unsigned long i;
    3871             : 
    3872           0 :         queue_for_each_hw_ctx(q, hctx, i)
    3873           0 :                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
    3874             : 
    3875             :         /* all hctx are in .unused_hctx_list now */
    3876           0 :         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
    3877           0 :                 list_del_init(&hctx->hctx_list);
    3878           0 :                 kobject_put(&hctx->kobj);
    3879             :         }
    3880             : 
    3881           0 :         xa_destroy(&q->hctx_table);
    3882             : 
    3883             :         /*
    3884             :          * release .mq_kobj and sw queue's kobject now because
    3885             :          * both share lifetime with request queue.
    3886             :          */
    3887           0 :         blk_mq_sysfs_deinit(q);
    3888           0 : }
    3889             : 
    3890           0 : static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
    3891             :                 void *queuedata)
    3892             : {
    3893             :         struct request_queue *q;
    3894             :         int ret;
    3895             : 
    3896           0 :         q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
    3897           0 :         if (!q)
    3898             :                 return ERR_PTR(-ENOMEM);
    3899           0 :         q->queuedata = queuedata;
    3900           0 :         ret = blk_mq_init_allocated_queue(set, q);
    3901           0 :         if (ret) {
    3902           0 :                 blk_cleanup_queue(q);
    3903           0 :                 return ERR_PTR(ret);
    3904             :         }
    3905             :         return q;
    3906             : }
    3907             : 
    3908           0 : struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
    3909             : {
    3910           0 :         return blk_mq_init_queue_data(set, NULL);
    3911             : }
    3912             : EXPORT_SYMBOL(blk_mq_init_queue);
    3913             : 
    3914           0 : struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
    3915             :                 struct lock_class_key *lkclass)
    3916             : {
    3917             :         struct request_queue *q;
    3918             :         struct gendisk *disk;
    3919             : 
    3920           0 :         q = blk_mq_init_queue_data(set, queuedata);
    3921           0 :         if (IS_ERR(q))
    3922             :                 return ERR_CAST(q);
    3923             : 
    3924           0 :         disk = __alloc_disk_node(q, set->numa_node, lkclass);
    3925           0 :         if (!disk) {
    3926           0 :                 blk_cleanup_queue(q);
    3927           0 :                 return ERR_PTR(-ENOMEM);
    3928             :         }
    3929             :         return disk;
    3930             : }
    3931             : EXPORT_SYMBOL(__blk_mq_alloc_disk);
    3932             : 
    3933           0 : static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
    3934             :                 struct blk_mq_tag_set *set, struct request_queue *q,
    3935             :                 int hctx_idx, int node)
    3936             : {
    3937           0 :         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
    3938             : 
    3939             :         /* reuse dead hctx first */
    3940           0 :         spin_lock(&q->unused_hctx_lock);
    3941           0 :         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
    3942           0 :                 if (tmp->numa_node == node) {
    3943             :                         hctx = tmp;
    3944             :                         break;
    3945             :                 }
    3946             :         }
    3947           0 :         if (hctx)
    3948           0 :                 list_del_init(&hctx->hctx_list);
    3949           0 :         spin_unlock(&q->unused_hctx_lock);
    3950             : 
    3951           0 :         if (!hctx)
    3952           0 :                 hctx = blk_mq_alloc_hctx(q, set, node);
    3953           0 :         if (!hctx)
    3954             :                 goto fail;
    3955             : 
    3956           0 :         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
    3957             :                 goto free_hctx;
    3958             : 
    3959             :         return hctx;
    3960             : 
    3961             :  free_hctx:
    3962           0 :         kobject_put(&hctx->kobj);
    3963             :  fail:
    3964             :         return NULL;
    3965             : }
    3966             : 
    3967           0 : static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
    3968             :                                                 struct request_queue *q)
    3969             : {
    3970             :         struct blk_mq_hw_ctx *hctx;
    3971             :         unsigned long i, j;
    3972             : 
    3973             :         /* protect against switching io scheduler  */
    3974           0 :         mutex_lock(&q->sysfs_lock);
    3975           0 :         for (i = 0; i < set->nr_hw_queues; i++) {
    3976             :                 int old_node;
    3977           0 :                 int node = blk_mq_get_hctx_node(set, i);
    3978           0 :                 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
    3979             : 
    3980           0 :                 if (old_hctx) {
    3981           0 :                         old_node = old_hctx->numa_node;
    3982           0 :                         blk_mq_exit_hctx(q, set, old_hctx, i);
    3983             :                 }
    3984             : 
    3985           0 :                 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
    3986           0 :                         if (!old_hctx)
    3987             :                                 break;
    3988           0 :                         pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
    3989             :                                         node, old_node);
    3990           0 :                         hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
    3991           0 :                         WARN_ON_ONCE(!hctx);
    3992             :                 }
    3993             :         }
    3994             :         /*
    3995             :          * Increasing nr_hw_queues fails. Free the newly allocated
    3996             :          * hctxs and keep the previous q->nr_hw_queues.
    3997             :          */
    3998           0 :         if (i != set->nr_hw_queues) {
    3999           0 :                 j = q->nr_hw_queues;
    4000             :         } else {
    4001           0 :                 j = i;
    4002           0 :                 q->nr_hw_queues = set->nr_hw_queues;
    4003             :         }
    4004             : 
    4005           0 :         xa_for_each_start(&q->hctx_table, j, hctx, j)
    4006           0 :                 blk_mq_exit_hctx(q, set, hctx, j);
    4007           0 :         mutex_unlock(&q->sysfs_lock);
    4008           0 : }
    4009             : 
    4010           0 : static void blk_mq_update_poll_flag(struct request_queue *q)
    4011             : {
    4012           0 :         struct blk_mq_tag_set *set = q->tag_set;
    4013             : 
    4014           0 :         if (set->nr_maps > HCTX_TYPE_POLL &&
    4015           0 :             set->map[HCTX_TYPE_POLL].nr_queues)
    4016           0 :                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
    4017             :         else
    4018           0 :                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
    4019           0 : }
    4020             : 
    4021           0 : int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
    4022             :                 struct request_queue *q)
    4023             : {
    4024           0 :         WARN_ON_ONCE(blk_queue_has_srcu(q) !=
    4025             :                         !!(set->flags & BLK_MQ_F_BLOCKING));
    4026             : 
    4027             :         /* mark the queue as mq asap */
    4028           0 :         q->mq_ops = set->ops;
    4029             : 
    4030           0 :         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
    4031             :                                              blk_mq_poll_stats_bkt,
    4032             :                                              BLK_MQ_POLL_STATS_BKTS, q);
    4033           0 :         if (!q->poll_cb)
    4034             :                 goto err_exit;
    4035             : 
    4036           0 :         if (blk_mq_alloc_ctxs(q))
    4037             :                 goto err_poll;
    4038             : 
    4039             :         /* init q->mq_kobj and sw queues' kobjects */
    4040           0 :         blk_mq_sysfs_init(q);
    4041             : 
    4042           0 :         INIT_LIST_HEAD(&q->unused_hctx_list);
    4043           0 :         spin_lock_init(&q->unused_hctx_lock);
    4044             : 
    4045           0 :         xa_init(&q->hctx_table);
    4046             : 
    4047           0 :         blk_mq_realloc_hw_ctxs(set, q);
    4048           0 :         if (!q->nr_hw_queues)
    4049             :                 goto err_hctxs;
    4050             : 
    4051           0 :         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
    4052           0 :         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
    4053             : 
    4054           0 :         q->tag_set = set;
    4055             : 
    4056           0 :         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
    4057           0 :         blk_mq_update_poll_flag(q);
    4058             : 
    4059           0 :         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
    4060           0 :         INIT_LIST_HEAD(&q->requeue_list);
    4061           0 :         spin_lock_init(&q->requeue_lock);
    4062             : 
    4063           0 :         q->nr_requests = set->queue_depth;
    4064             : 
    4065             :         /*
    4066             :          * Default to classic polling
    4067             :          */
    4068           0 :         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
    4069             : 
    4070           0 :         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
    4071           0 :         blk_mq_add_queue_tag_set(set, q);
    4072           0 :         blk_mq_map_swqueue(q);
    4073           0 :         return 0;
    4074             : 
    4075             : err_hctxs:
    4076           0 :         xa_destroy(&q->hctx_table);
    4077           0 :         q->nr_hw_queues = 0;
    4078           0 :         blk_mq_sysfs_deinit(q);
    4079             : err_poll:
    4080           0 :         blk_stat_free_callback(q->poll_cb);
    4081           0 :         q->poll_cb = NULL;
    4082             : err_exit:
    4083           0 :         q->mq_ops = NULL;
    4084           0 :         return -ENOMEM;
    4085             : }
    4086             : EXPORT_SYMBOL(blk_mq_init_allocated_queue);
    4087             : 
    4088             : /* tags can _not_ be used after returning from blk_mq_exit_queue */
    4089           0 : void blk_mq_exit_queue(struct request_queue *q)
    4090             : {
    4091           0 :         struct blk_mq_tag_set *set = q->tag_set;
    4092             : 
    4093             :         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
    4094           0 :         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
    4095             :         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
    4096           0 :         blk_mq_del_queue_tag_set(q);
    4097           0 : }
    4098             : 
    4099           0 : static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
    4100             : {
    4101             :         int i;
    4102             : 
    4103           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    4104           0 :                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
    4105             :                                                 BLK_MQ_NO_HCTX_IDX,
    4106             :                                                 set->queue_depth);
    4107           0 :                 if (!set->shared_tags)
    4108             :                         return -ENOMEM;
    4109             :         }
    4110             : 
    4111           0 :         for (i = 0; i < set->nr_hw_queues; i++) {
    4112           0 :                 if (!__blk_mq_alloc_map_and_rqs(set, i))
    4113             :                         goto out_unwind;
    4114           0 :                 cond_resched();
    4115             :         }
    4116             : 
    4117             :         return 0;
    4118             : 
    4119             : out_unwind:
    4120           0 :         while (--i >= 0)
    4121           0 :                 __blk_mq_free_map_and_rqs(set, i);
    4122             : 
    4123           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    4124           0 :                 blk_mq_free_map_and_rqs(set, set->shared_tags,
    4125             :                                         BLK_MQ_NO_HCTX_IDX);
    4126             :         }
    4127             : 
    4128             :         return -ENOMEM;
    4129             : }
    4130             : 
    4131             : /*
    4132             :  * Allocate the request maps associated with this tag_set. Note that this
    4133             :  * may reduce the depth asked for, if memory is tight. set->queue_depth
    4134             :  * will be updated to reflect the allocated depth.
    4135             :  */
    4136           0 : static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
    4137             : {
    4138             :         unsigned int depth;
    4139             :         int err;
    4140             : 
    4141           0 :         depth = set->queue_depth;
    4142             :         do {
    4143           0 :                 err = __blk_mq_alloc_rq_maps(set);
    4144           0 :                 if (!err)
    4145             :                         break;
    4146             : 
    4147           0 :                 set->queue_depth >>= 1;
    4148           0 :                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
    4149             :                         err = -ENOMEM;
    4150             :                         break;
    4151             :                 }
    4152           0 :         } while (set->queue_depth);
    4153             : 
    4154           0 :         if (!set->queue_depth || err) {
    4155           0 :                 pr_err("blk-mq: failed to allocate request map\n");
    4156           0 :                 return -ENOMEM;
    4157             :         }
    4158             : 
    4159           0 :         if (depth != set->queue_depth)
    4160           0 :                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
    4161             :                                                 depth, set->queue_depth);
    4162             : 
    4163             :         return 0;
    4164             : }
    4165             : 
    4166           0 : static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
    4167             : {
    4168             :         /*
    4169             :          * blk_mq_map_queues() and multiple .map_queues() implementations
    4170             :          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
    4171             :          * number of hardware queues.
    4172             :          */
    4173           0 :         if (set->nr_maps == 1)
    4174           0 :                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
    4175             : 
    4176           0 :         if (set->ops->map_queues && !is_kdump_kernel()) {
    4177             :                 int i;
    4178             : 
    4179             :                 /*
    4180             :                  * transport .map_queues is usually done in the following
    4181             :                  * way:
    4182             :                  *
    4183             :                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
    4184             :                  *      mask = get_cpu_mask(queue)
    4185             :                  *      for_each_cpu(cpu, mask)
    4186             :                  *              set->map[x].mq_map[cpu] = queue;
    4187             :                  * }
    4188             :                  *
    4189             :                  * When we need to remap, the table has to be cleared for
    4190             :                  * killing stale mapping since one CPU may not be mapped
    4191             :                  * to any hw queue.
    4192             :                  */
    4193           0 :                 for (i = 0; i < set->nr_maps; i++)
    4194           0 :                         blk_mq_clear_mq_map(&set->map[i]);
    4195             : 
    4196           0 :                 return set->ops->map_queues(set);
    4197             :         } else {
    4198           0 :                 BUG_ON(set->nr_maps > 1);
    4199           0 :                 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
    4200             :         }
    4201             : }
    4202             : 
    4203           0 : static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
    4204             :                                   int cur_nr_hw_queues, int new_nr_hw_queues)
    4205             : {
    4206             :         struct blk_mq_tags **new_tags;
    4207             : 
    4208           0 :         if (cur_nr_hw_queues >= new_nr_hw_queues)
    4209             :                 return 0;
    4210             : 
    4211           0 :         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
    4212             :                                 GFP_KERNEL, set->numa_node);
    4213           0 :         if (!new_tags)
    4214             :                 return -ENOMEM;
    4215             : 
    4216           0 :         if (set->tags)
    4217           0 :                 memcpy(new_tags, set->tags, cur_nr_hw_queues *
    4218             :                        sizeof(*set->tags));
    4219           0 :         kfree(set->tags);
    4220           0 :         set->tags = new_tags;
    4221           0 :         set->nr_hw_queues = new_nr_hw_queues;
    4222             : 
    4223             :         return 0;
    4224             : }
    4225             : 
    4226             : static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
    4227             :                                 int new_nr_hw_queues)
    4228             : {
    4229           0 :         return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
    4230             : }
    4231             : 
    4232             : /*
    4233             :  * Alloc a tag set to be associated with one or more request queues.
    4234             :  * May fail with EINVAL for various error conditions. May adjust the
    4235             :  * requested depth down, if it's too large. In that case, the set
    4236             :  * value will be stored in set->queue_depth.
    4237             :  */
    4238           0 : int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
    4239             : {
    4240             :         int i, ret;
    4241             : 
    4242             :         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
    4243             : 
    4244           0 :         if (!set->nr_hw_queues)
    4245             :                 return -EINVAL;
    4246           0 :         if (!set->queue_depth)
    4247             :                 return -EINVAL;
    4248           0 :         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
    4249             :                 return -EINVAL;
    4250             : 
    4251           0 :         if (!set->ops->queue_rq)
    4252             :                 return -EINVAL;
    4253             : 
    4254           0 :         if (!set->ops->get_budget ^ !set->ops->put_budget)
    4255             :                 return -EINVAL;
    4256             : 
    4257           0 :         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
    4258           0 :                 pr_info("blk-mq: reduced tag depth to %u\n",
    4259             :                         BLK_MQ_MAX_DEPTH);
    4260           0 :                 set->queue_depth = BLK_MQ_MAX_DEPTH;
    4261             :         }
    4262             : 
    4263           0 :         if (!set->nr_maps)
    4264           0 :                 set->nr_maps = 1;
    4265           0 :         else if (set->nr_maps > HCTX_MAX_TYPES)
    4266             :                 return -EINVAL;
    4267             : 
    4268             :         /*
    4269             :          * If a crashdump is active, then we are potentially in a very
    4270             :          * memory constrained environment. Limit us to 1 queue and
    4271             :          * 64 tags to prevent using too much memory.
    4272             :          */
    4273             :         if (is_kdump_kernel()) {
    4274             :                 set->nr_hw_queues = 1;
    4275             :                 set->nr_maps = 1;
    4276             :                 set->queue_depth = min(64U, set->queue_depth);
    4277             :         }
    4278             :         /*
    4279             :          * There is no use for more h/w queues than cpus if we just have
    4280             :          * a single map
    4281             :          */
    4282           0 :         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
    4283           0 :                 set->nr_hw_queues = nr_cpu_ids;
    4284             : 
    4285           0 :         if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
    4286             :                 return -ENOMEM;
    4287             : 
    4288             :         ret = -ENOMEM;
    4289           0 :         for (i = 0; i < set->nr_maps; i++) {
    4290           0 :                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
    4291             :                                                   sizeof(set->map[i].mq_map[0]),
    4292             :                                                   GFP_KERNEL, set->numa_node);
    4293           0 :                 if (!set->map[i].mq_map)
    4294             :                         goto out_free_mq_map;
    4295           0 :                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
    4296             :         }
    4297             : 
    4298           0 :         ret = blk_mq_update_queue_map(set);
    4299           0 :         if (ret)
    4300             :                 goto out_free_mq_map;
    4301             : 
    4302           0 :         ret = blk_mq_alloc_set_map_and_rqs(set);
    4303           0 :         if (ret)
    4304             :                 goto out_free_mq_map;
    4305             : 
    4306           0 :         mutex_init(&set->tag_list_lock);
    4307           0 :         INIT_LIST_HEAD(&set->tag_list);
    4308             : 
    4309           0 :         return 0;
    4310             : 
    4311             : out_free_mq_map:
    4312           0 :         for (i = 0; i < set->nr_maps; i++) {
    4313           0 :                 kfree(set->map[i].mq_map);
    4314           0 :                 set->map[i].mq_map = NULL;
    4315             :         }
    4316           0 :         kfree(set->tags);
    4317           0 :         set->tags = NULL;
    4318           0 :         return ret;
    4319             : }
    4320             : EXPORT_SYMBOL(blk_mq_alloc_tag_set);
    4321             : 
    4322             : /* allocate and initialize a tagset for a simple single-queue device */
    4323           0 : int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
    4324             :                 const struct blk_mq_ops *ops, unsigned int queue_depth,
    4325             :                 unsigned int set_flags)
    4326             : {
    4327           0 :         memset(set, 0, sizeof(*set));
    4328           0 :         set->ops = ops;
    4329           0 :         set->nr_hw_queues = 1;
    4330           0 :         set->nr_maps = 1;
    4331           0 :         set->queue_depth = queue_depth;
    4332           0 :         set->numa_node = NUMA_NO_NODE;
    4333           0 :         set->flags = set_flags;
    4334           0 :         return blk_mq_alloc_tag_set(set);
    4335             : }
    4336             : EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
    4337             : 
    4338           0 : void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
    4339             : {
    4340             :         int i, j;
    4341             : 
    4342           0 :         for (i = 0; i < set->nr_hw_queues; i++)
    4343           0 :                 __blk_mq_free_map_and_rqs(set, i);
    4344             : 
    4345           0 :         if (blk_mq_is_shared_tags(set->flags)) {
    4346           0 :                 blk_mq_free_map_and_rqs(set, set->shared_tags,
    4347             :                                         BLK_MQ_NO_HCTX_IDX);
    4348             :         }
    4349             : 
    4350           0 :         for (j = 0; j < set->nr_maps; j++) {
    4351           0 :                 kfree(set->map[j].mq_map);
    4352           0 :                 set->map[j].mq_map = NULL;
    4353             :         }
    4354             : 
    4355           0 :         kfree(set->tags);
    4356           0 :         set->tags = NULL;
    4357           0 : }
    4358             : EXPORT_SYMBOL(blk_mq_free_tag_set);
    4359             : 
    4360           0 : int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
    4361             : {
    4362           0 :         struct blk_mq_tag_set *set = q->tag_set;
    4363             :         struct blk_mq_hw_ctx *hctx;
    4364             :         int ret;
    4365             :         unsigned long i;
    4366             : 
    4367           0 :         if (!set)
    4368             :                 return -EINVAL;
    4369             : 
    4370           0 :         if (q->nr_requests == nr)
    4371             :                 return 0;
    4372             : 
    4373           0 :         blk_mq_freeze_queue(q);
    4374           0 :         blk_mq_quiesce_queue(q);
    4375             : 
    4376           0 :         ret = 0;
    4377           0 :         queue_for_each_hw_ctx(q, hctx, i) {
    4378           0 :                 if (!hctx->tags)
    4379           0 :                         continue;
    4380             :                 /*
    4381             :                  * If we're using an MQ scheduler, just update the scheduler
    4382             :                  * queue depth. This is similar to what the old code would do.
    4383             :                  */
    4384           0 :                 if (hctx->sched_tags) {
    4385           0 :                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
    4386             :                                                       nr, true);
    4387             :                 } else {
    4388           0 :                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
    4389             :                                                       false);
    4390             :                 }
    4391           0 :                 if (ret)
    4392             :                         break;
    4393           0 :                 if (q->elevator && q->elevator->type->ops.depth_updated)
    4394           0 :                         q->elevator->type->ops.depth_updated(hctx);
    4395             :         }
    4396           0 :         if (!ret) {
    4397           0 :                 q->nr_requests = nr;
    4398           0 :                 if (blk_mq_is_shared_tags(set->flags)) {
    4399           0 :                         if (q->elevator)
    4400           0 :                                 blk_mq_tag_update_sched_shared_tags(q);
    4401             :                         else
    4402           0 :                                 blk_mq_tag_resize_shared_tags(set, nr);
    4403             :                 }
    4404             :         }
    4405             : 
    4406           0 :         blk_mq_unquiesce_queue(q);
    4407           0 :         blk_mq_unfreeze_queue(q);
    4408             : 
    4409           0 :         return ret;
    4410             : }
    4411             : 
    4412             : /*
    4413             :  * request_queue and elevator_type pair.
    4414             :  * It is just used by __blk_mq_update_nr_hw_queues to cache
    4415             :  * the elevator_type associated with a request_queue.
    4416             :  */
    4417             : struct blk_mq_qe_pair {
    4418             :         struct list_head node;
    4419             :         struct request_queue *q;
    4420             :         struct elevator_type *type;
    4421             : };
    4422             : 
    4423             : /*
    4424             :  * Cache the elevator_type in qe pair list and switch the
    4425             :  * io scheduler to 'none'
    4426             :  */
    4427           0 : static bool blk_mq_elv_switch_none(struct list_head *head,
    4428             :                 struct request_queue *q)
    4429             : {
    4430             :         struct blk_mq_qe_pair *qe;
    4431             : 
    4432           0 :         if (!q->elevator)
    4433             :                 return true;
    4434             : 
    4435           0 :         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
    4436           0 :         if (!qe)
    4437             :                 return false;
    4438             : 
    4439           0 :         INIT_LIST_HEAD(&qe->node);
    4440           0 :         qe->q = q;
    4441           0 :         qe->type = q->elevator->type;
    4442           0 :         list_add(&qe->node, head);
    4443             : 
    4444           0 :         mutex_lock(&q->sysfs_lock);
    4445             :         /*
    4446             :          * After elevator_switch_mq, the previous elevator_queue will be
    4447             :          * released by elevator_release. The reference of the io scheduler
    4448             :          * module get by elevator_get will also be put. So we need to get
    4449             :          * a reference of the io scheduler module here to prevent it to be
    4450             :          * removed.
    4451             :          */
    4452           0 :         __module_get(qe->type->elevator_owner);
    4453           0 :         elevator_switch_mq(q, NULL);
    4454           0 :         mutex_unlock(&q->sysfs_lock);
    4455             : 
    4456           0 :         return true;
    4457             : }
    4458             : 
    4459             : static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
    4460             :                                                 struct request_queue *q)
    4461             : {
    4462             :         struct blk_mq_qe_pair *qe;
    4463             : 
    4464           0 :         list_for_each_entry(qe, head, node)
    4465           0 :                 if (qe->q == q)
    4466             :                         return qe;
    4467             : 
    4468             :         return NULL;
    4469             : }
    4470             : 
    4471           0 : static void blk_mq_elv_switch_back(struct list_head *head,
    4472             :                                   struct request_queue *q)
    4473             : {
    4474             :         struct blk_mq_qe_pair *qe;
    4475             :         struct elevator_type *t;
    4476             : 
    4477           0 :         qe = blk_lookup_qe_pair(head, q);
    4478           0 :         if (!qe)
    4479             :                 return;
    4480           0 :         t = qe->type;
    4481           0 :         list_del(&qe->node);
    4482           0 :         kfree(qe);
    4483             : 
    4484           0 :         mutex_lock(&q->sysfs_lock);
    4485           0 :         elevator_switch_mq(q, t);
    4486           0 :         mutex_unlock(&q->sysfs_lock);
    4487             : }
    4488             : 
    4489           0 : static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
    4490             :                                                         int nr_hw_queues)
    4491             : {
    4492             :         struct request_queue *q;
    4493           0 :         LIST_HEAD(head);
    4494             :         int prev_nr_hw_queues;
    4495             : 
    4496             :         lockdep_assert_held(&set->tag_list_lock);
    4497             : 
    4498           0 :         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
    4499           0 :                 nr_hw_queues = nr_cpu_ids;
    4500           0 :         if (nr_hw_queues < 1)
    4501           0 :                 return;
    4502           0 :         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
    4503             :                 return;
    4504             : 
    4505           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4506           0 :                 blk_mq_freeze_queue(q);
    4507             :         /*
    4508             :          * Switch IO scheduler to 'none', cleaning up the data associated
    4509             :          * with the previous scheduler. We will switch back once we are done
    4510             :          * updating the new sw to hw queue mappings.
    4511             :          */
    4512           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4513           0 :                 if (!blk_mq_elv_switch_none(&head, q))
    4514             :                         goto switch_back;
    4515             : 
    4516           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    4517           0 :                 blk_mq_debugfs_unregister_hctxs(q);
    4518           0 :                 blk_mq_sysfs_unregister(q);
    4519             :         }
    4520             : 
    4521           0 :         prev_nr_hw_queues = set->nr_hw_queues;
    4522           0 :         if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
    4523             :             0)
    4524             :                 goto reregister;
    4525             : 
    4526           0 :         set->nr_hw_queues = nr_hw_queues;
    4527             : fallback:
    4528           0 :         blk_mq_update_queue_map(set);
    4529           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    4530           0 :                 blk_mq_realloc_hw_ctxs(set, q);
    4531           0 :                 blk_mq_update_poll_flag(q);
    4532           0 :                 if (q->nr_hw_queues != set->nr_hw_queues) {
    4533           0 :                         int i = prev_nr_hw_queues;
    4534             : 
    4535           0 :                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
    4536             :                                         nr_hw_queues, prev_nr_hw_queues);
    4537           0 :                         for (; i < set->nr_hw_queues; i++)
    4538           0 :                                 __blk_mq_free_map_and_rqs(set, i);
    4539             : 
    4540           0 :                         set->nr_hw_queues = prev_nr_hw_queues;
    4541           0 :                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
    4542           0 :                         goto fallback;
    4543             :                 }
    4544           0 :                 blk_mq_map_swqueue(q);
    4545             :         }
    4546             : 
    4547             : reregister:
    4548           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list) {
    4549           0 :                 blk_mq_sysfs_register(q);
    4550           0 :                 blk_mq_debugfs_register_hctxs(q);
    4551             :         }
    4552             : 
    4553             : switch_back:
    4554           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4555           0 :                 blk_mq_elv_switch_back(&head, q);
    4556             : 
    4557           0 :         list_for_each_entry(q, &set->tag_list, tag_set_list)
    4558           0 :                 blk_mq_unfreeze_queue(q);
    4559             : }
    4560             : 
    4561           0 : void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
    4562             : {
    4563           0 :         mutex_lock(&set->tag_list_lock);
    4564           0 :         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
    4565           0 :         mutex_unlock(&set->tag_list_lock);
    4566           0 : }
    4567             : EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
    4568             : 
    4569             : /* Enable polling stats and return whether they were already enabled. */
    4570             : static bool blk_poll_stats_enable(struct request_queue *q)
    4571             : {
    4572           0 :         if (q->poll_stat)
    4573             :                 return true;
    4574             : 
    4575           0 :         return blk_stats_alloc_enable(q);
    4576             : }
    4577             : 
    4578           0 : static void blk_mq_poll_stats_start(struct request_queue *q)
    4579             : {
    4580             :         /*
    4581             :          * We don't arm the callback if polling stats are not enabled or the
    4582             :          * callback is already active.
    4583             :          */
    4584           0 :         if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
    4585             :                 return;
    4586             : 
    4587           0 :         blk_stat_activate_msecs(q->poll_cb, 100);
    4588             : }
    4589             : 
    4590           0 : static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
    4591             : {
    4592           0 :         struct request_queue *q = cb->data;
    4593             :         int bucket;
    4594             : 
    4595           0 :         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
    4596           0 :                 if (cb->stat[bucket].nr_samples)
    4597           0 :                         q->poll_stat[bucket] = cb->stat[bucket];
    4598             :         }
    4599           0 : }
    4600             : 
    4601           0 : static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
    4602             :                                        struct request *rq)
    4603             : {
    4604           0 :         unsigned long ret = 0;
    4605             :         int bucket;
    4606             : 
    4607             :         /*
    4608             :          * If stats collection isn't on, don't sleep but turn it on for
    4609             :          * future users
    4610             :          */
    4611           0 :         if (!blk_poll_stats_enable(q))
    4612             :                 return 0;
    4613             : 
    4614             :         /*
    4615             :          * As an optimistic guess, use half of the mean service time
    4616             :          * for this type of request. We can (and should) make this smarter.
    4617             :          * For instance, if the completion latencies are tight, we can
    4618             :          * get closer than just half the mean. This is especially
    4619             :          * important on devices where the completion latencies are longer
    4620             :          * than ~10 usec. We do use the stats for the relevant IO size
    4621             :          * if available which does lead to better estimates.
    4622             :          */
    4623           0 :         bucket = blk_mq_poll_stats_bkt(rq);
    4624           0 :         if (bucket < 0)
    4625             :                 return ret;
    4626             : 
    4627           0 :         if (q->poll_stat[bucket].nr_samples)
    4628           0 :                 ret = (q->poll_stat[bucket].mean + 1) / 2;
    4629             : 
    4630             :         return ret;
    4631             : }
    4632             : 
    4633           0 : static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
    4634             : {
    4635           0 :         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
    4636           0 :         struct request *rq = blk_qc_to_rq(hctx, qc);
    4637             :         struct hrtimer_sleeper hs;
    4638             :         enum hrtimer_mode mode;
    4639             :         unsigned int nsecs;
    4640             :         ktime_t kt;
    4641             : 
    4642             :         /*
    4643             :          * If a request has completed on queue that uses an I/O scheduler, we
    4644             :          * won't get back a request from blk_qc_to_rq.
    4645             :          */
    4646           0 :         if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
    4647             :                 return false;
    4648             : 
    4649             :         /*
    4650             :          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
    4651             :          *
    4652             :          *  0:  use half of prev avg
    4653             :          * >0:       use this specific value
    4654             :          */
    4655           0 :         if (q->poll_nsec > 0)
    4656           0 :                 nsecs = q->poll_nsec;
    4657             :         else
    4658           0 :                 nsecs = blk_mq_poll_nsecs(q, rq);
    4659             : 
    4660           0 :         if (!nsecs)
    4661             :                 return false;
    4662             : 
    4663           0 :         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
    4664             : 
    4665             :         /*
    4666             :          * This will be replaced with the stats tracking code, using
    4667             :          * 'avg_completion_time / 2' as the pre-sleep target.
    4668             :          */
    4669           0 :         kt = nsecs;
    4670             : 
    4671           0 :         mode = HRTIMER_MODE_REL;
    4672           0 :         hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
    4673           0 :         hrtimer_set_expires(&hs.timer, kt);
    4674             : 
    4675             :         do {
    4676           0 :                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
    4677             :                         break;
    4678           0 :                 set_current_state(TASK_UNINTERRUPTIBLE);
    4679           0 :                 hrtimer_sleeper_start_expires(&hs, mode);
    4680           0 :                 if (hs.task)
    4681           0 :                         io_schedule();
    4682           0 :                 hrtimer_cancel(&hs.timer);
    4683           0 :                 mode = HRTIMER_MODE_ABS;
    4684           0 :         } while (hs.task && !signal_pending(current));
    4685             : 
    4686           0 :         __set_current_state(TASK_RUNNING);
    4687           0 :         destroy_hrtimer_on_stack(&hs.timer);
    4688             : 
    4689             :         /*
    4690             :          * If we sleep, have the caller restart the poll loop to reset the
    4691             :          * state.  Like for the other success return cases, the caller is
    4692             :          * responsible for checking if the IO completed.  If the IO isn't
    4693             :          * complete, we'll get called again and will go straight to the busy
    4694             :          * poll loop.
    4695             :          */
    4696           0 :         return true;
    4697             : }
    4698             : 
    4699           0 : static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
    4700             :                                struct io_comp_batch *iob, unsigned int flags)
    4701             : {
    4702           0 :         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
    4703           0 :         long state = get_current_state();
    4704             :         int ret;
    4705             : 
    4706             :         do {
    4707           0 :                 ret = q->mq_ops->poll(hctx, iob);
    4708           0 :                 if (ret > 0) {
    4709           0 :                         __set_current_state(TASK_RUNNING);
    4710           0 :                         return ret;
    4711             :                 }
    4712             : 
    4713           0 :                 if (signal_pending_state(state, current))
    4714           0 :                         __set_current_state(TASK_RUNNING);
    4715           0 :                 if (task_is_running(current))
    4716             :                         return 1;
    4717             : 
    4718           0 :                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
    4719             :                         break;
    4720             :                 cpu_relax();
    4721           0 :         } while (!need_resched());
    4722             : 
    4723           0 :         __set_current_state(TASK_RUNNING);
    4724           0 :         return 0;
    4725             : }
    4726             : 
    4727           0 : int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
    4728             :                 unsigned int flags)
    4729             : {
    4730           0 :         if (!(flags & BLK_POLL_NOSLEEP) &&
    4731           0 :             q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
    4732           0 :                 if (blk_mq_poll_hybrid(q, cookie))
    4733             :                         return 1;
    4734             :         }
    4735           0 :         return blk_mq_poll_classic(q, cookie, iob, flags);
    4736             : }
    4737             : 
    4738           0 : unsigned int blk_mq_rq_cpu(struct request *rq)
    4739             : {
    4740           0 :         return rq->mq_ctx->cpu;
    4741             : }
    4742             : EXPORT_SYMBOL(blk_mq_rq_cpu);
    4743             : 
    4744           0 : void blk_mq_cancel_work_sync(struct request_queue *q)
    4745             : {
    4746           0 :         if (queue_is_mq(q)) {
    4747             :                 struct blk_mq_hw_ctx *hctx;
    4748             :                 unsigned long i;
    4749             : 
    4750           0 :                 cancel_delayed_work_sync(&q->requeue_work);
    4751             : 
    4752           0 :                 queue_for_each_hw_ctx(q, hctx, i)
    4753           0 :                         cancel_delayed_work_sync(&hctx->run_work);
    4754             :         }
    4755           0 : }
    4756             : 
    4757           1 : static int __init blk_mq_init(void)
    4758             : {
    4759             :         int i;
    4760             : 
    4761           2 :         for_each_possible_cpu(i)
    4762           2 :                 init_llist_head(&per_cpu(blk_cpu_done, i));
    4763           1 :         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
    4764             : 
    4765           1 :         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
    4766             :                                   "block/softirq:dead", NULL,
    4767             :                                   blk_softirq_cpu_dead);
    4768           1 :         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
    4769             :                                 blk_mq_hctx_notify_dead);
    4770           1 :         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
    4771             :                                 blk_mq_hctx_notify_online,
    4772             :                                 blk_mq_hctx_notify_offline);
    4773           1 :         return 0;
    4774             : }
    4775             : subsys_initcall(blk_mq_init);

Generated by: LCOV version 1.14