LCOV - code coverage report
Current view: top level - block - blk-mq-sched.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 240 0.0 %
Date: 2022-12-09 01:23:36 Functions: 0 19 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * blk-mq scheduling framework
       4             :  *
       5             :  * Copyright (C) 2016 Jens Axboe
       6             :  */
       7             : #include <linux/kernel.h>
       8             : #include <linux/module.h>
       9             : #include <linux/blk-mq.h>
      10             : #include <linux/list_sort.h>
      11             : 
      12             : #include <trace/events/block.h>
      13             : 
      14             : #include "blk.h"
      15             : #include "blk-mq.h"
      16             : #include "blk-mq-debugfs.h"
      17             : #include "blk-mq-sched.h"
      18             : #include "blk-mq-tag.h"
      19             : #include "blk-wbt.h"
      20             : 
      21             : /*
      22             :  * Mark a hardware queue as needing a restart. For shared queues, maintain
      23             :  * a count of how many hardware queues are marked for restart.
      24             :  */
      25           0 : void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
      26             : {
      27           0 :         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
      28             :                 return;
      29             : 
      30           0 :         set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
      31             : }
      32             : EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
      33             : 
      34           0 : void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
      35             : {
      36           0 :         clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
      37             : 
      38             :         /*
      39             :          * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
      40             :          * in blk_mq_run_hw_queue(). Its pair is the barrier in
      41             :          * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
      42             :          * meantime new request added to hctx->dispatch is missed to check in
      43             :          * blk_mq_run_hw_queue().
      44             :          */
      45           0 :         smp_mb();
      46             : 
      47           0 :         blk_mq_run_hw_queue(hctx, true);
      48           0 : }
      49             : 
      50           0 : static int sched_rq_cmp(void *priv, const struct list_head *a,
      51             :                         const struct list_head *b)
      52             : {
      53           0 :         struct request *rqa = container_of(a, struct request, queuelist);
      54           0 :         struct request *rqb = container_of(b, struct request, queuelist);
      55             : 
      56           0 :         return rqa->mq_hctx > rqb->mq_hctx;
      57             : }
      58             : 
      59           0 : static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
      60             : {
      61           0 :         struct blk_mq_hw_ctx *hctx =
      62           0 :                 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
      63             :         struct request *rq;
      64           0 :         LIST_HEAD(hctx_list);
      65           0 :         unsigned int count = 0;
      66             : 
      67           0 :         list_for_each_entry(rq, rq_list, queuelist) {
      68           0 :                 if (rq->mq_hctx != hctx) {
      69           0 :                         list_cut_before(&hctx_list, rq_list, &rq->queuelist);
      70             :                         goto dispatch;
      71             :                 }
      72           0 :                 count++;
      73             :         }
      74             :         list_splice_tail_init(rq_list, &hctx_list);
      75             : 
      76             : dispatch:
      77           0 :         return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
      78             : }
      79             : 
      80             : #define BLK_MQ_BUDGET_DELAY     3               /* ms units */
      81             : 
      82             : /*
      83             :  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
      84             :  * its queue by itself in its completion handler, so we don't need to
      85             :  * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
      86             :  *
      87             :  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
      88             :  * be run again.  This is necessary to avoid starving flushes.
      89             :  */
      90           0 : static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
      91             : {
      92           0 :         struct request_queue *q = hctx->queue;
      93           0 :         struct elevator_queue *e = q->elevator;
      94           0 :         bool multi_hctxs = false, run_queue = false;
      95           0 :         bool dispatched = false, busy = false;
      96             :         unsigned int max_dispatch;
      97           0 :         LIST_HEAD(rq_list);
      98           0 :         int count = 0;
      99             : 
     100           0 :         if (hctx->dispatch_busy)
     101             :                 max_dispatch = 1;
     102             :         else
     103           0 :                 max_dispatch = hctx->queue->nr_requests;
     104             : 
     105             :         do {
     106             :                 struct request *rq;
     107             :                 int budget_token;
     108             : 
     109           0 :                 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
     110             :                         break;
     111             : 
     112           0 :                 if (!list_empty_careful(&hctx->dispatch)) {
     113             :                         busy = true;
     114             :                         break;
     115             :                 }
     116             : 
     117           0 :                 budget_token = blk_mq_get_dispatch_budget(q);
     118           0 :                 if (budget_token < 0)
     119             :                         break;
     120             : 
     121           0 :                 rq = e->type->ops.dispatch_request(hctx);
     122           0 :                 if (!rq) {
     123             :                         blk_mq_put_dispatch_budget(q, budget_token);
     124             :                         /*
     125             :                          * We're releasing without dispatching. Holding the
     126             :                          * budget could have blocked any "hctx"s with the
     127             :                          * same queue and if we didn't dispatch then there's
     128             :                          * no guarantee anyone will kick the queue.  Kick it
     129             :                          * ourselves.
     130             :                          */
     131             :                         run_queue = true;
     132             :                         break;
     133             :                 }
     134             : 
     135           0 :                 blk_mq_set_rq_budget_token(rq, budget_token);
     136             : 
     137             :                 /*
     138             :                  * Now this rq owns the budget which has to be released
     139             :                  * if this rq won't be queued to driver via .queue_rq()
     140             :                  * in blk_mq_dispatch_rq_list().
     141             :                  */
     142           0 :                 list_add_tail(&rq->queuelist, &rq_list);
     143           0 :                 count++;
     144           0 :                 if (rq->mq_hctx != hctx)
     145           0 :                         multi_hctxs = true;
     146             : 
     147             :                 /*
     148             :                  * If we cannot get tag for the request, stop dequeueing
     149             :                  * requests from the IO scheduler. We are unlikely to be able
     150             :                  * to submit them anyway and it creates false impression for
     151             :                  * scheduling heuristics that the device can take more IO.
     152             :                  */
     153           0 :                 if (!blk_mq_get_driver_tag(rq))
     154             :                         break;
     155           0 :         } while (count < max_dispatch);
     156             : 
     157           0 :         if (!count) {
     158           0 :                 if (run_queue)
     159           0 :                         blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
     160           0 :         } else if (multi_hctxs) {
     161             :                 /*
     162             :                  * Requests from different hctx may be dequeued from some
     163             :                  * schedulers, such as bfq and deadline.
     164             :                  *
     165             :                  * Sort the requests in the list according to their hctx,
     166             :                  * dispatch batching requests from same hctx at a time.
     167             :                  */
     168           0 :                 list_sort(NULL, &rq_list, sched_rq_cmp);
     169             :                 do {
     170           0 :                         dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
     171           0 :                 } while (!list_empty(&rq_list));
     172             :         } else {
     173           0 :                 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
     174             :         }
     175             : 
     176           0 :         if (busy)
     177             :                 return -EAGAIN;
     178           0 :         return !!dispatched;
     179             : }
     180             : 
     181           0 : static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
     182             : {
     183           0 :         unsigned long end = jiffies + HZ;
     184             :         int ret;
     185             : 
     186             :         do {
     187           0 :                 ret = __blk_mq_do_dispatch_sched(hctx);
     188           0 :                 if (ret != 1)
     189             :                         break;
     190           0 :                 if (need_resched() || time_is_before_jiffies(end)) {
     191           0 :                         blk_mq_delay_run_hw_queue(hctx, 0);
     192           0 :                         break;
     193             :                 }
     194             :         } while (1);
     195             : 
     196           0 :         return ret;
     197             : }
     198             : 
     199             : static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
     200             :                                           struct blk_mq_ctx *ctx)
     201             : {
     202           0 :         unsigned short idx = ctx->index_hw[hctx->type];
     203             : 
     204           0 :         if (++idx == hctx->nr_ctx)
     205           0 :                 idx = 0;
     206             : 
     207           0 :         return hctx->ctxs[idx];
     208             : }
     209             : 
     210             : /*
     211             :  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
     212             :  * its queue by itself in its completion handler, so we don't need to
     213             :  * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
     214             :  *
     215             :  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
     216             :  * be run again.  This is necessary to avoid starving flushes.
     217             :  */
     218           0 : static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
     219             : {
     220           0 :         struct request_queue *q = hctx->queue;
     221           0 :         LIST_HEAD(rq_list);
     222           0 :         struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
     223           0 :         int ret = 0;
     224             :         struct request *rq;
     225             : 
     226             :         do {
     227             :                 int budget_token;
     228             : 
     229           0 :                 if (!list_empty_careful(&hctx->dispatch)) {
     230             :                         ret = -EAGAIN;
     231             :                         break;
     232             :                 }
     233             : 
     234           0 :                 if (!sbitmap_any_bit_set(&hctx->ctx_map))
     235             :                         break;
     236             : 
     237           0 :                 budget_token = blk_mq_get_dispatch_budget(q);
     238           0 :                 if (budget_token < 0)
     239             :                         break;
     240             : 
     241           0 :                 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
     242           0 :                 if (!rq) {
     243           0 :                         blk_mq_put_dispatch_budget(q, budget_token);
     244             :                         /*
     245             :                          * We're releasing without dispatching. Holding the
     246             :                          * budget could have blocked any "hctx"s with the
     247             :                          * same queue and if we didn't dispatch then there's
     248             :                          * no guarantee anyone will kick the queue.  Kick it
     249             :                          * ourselves.
     250             :                          */
     251           0 :                         blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
     252           0 :                         break;
     253             :                 }
     254             : 
     255           0 :                 blk_mq_set_rq_budget_token(rq, budget_token);
     256             : 
     257             :                 /*
     258             :                  * Now this rq owns the budget which has to be released
     259             :                  * if this rq won't be queued to driver via .queue_rq()
     260             :                  * in blk_mq_dispatch_rq_list().
     261             :                  */
     262           0 :                 list_add(&rq->queuelist, &rq_list);
     263             : 
     264             :                 /* round robin for fair dispatch */
     265           0 :                 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
     266             : 
     267           0 :         } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
     268             : 
     269           0 :         WRITE_ONCE(hctx->dispatch_from, ctx);
     270           0 :         return ret;
     271             : }
     272             : 
     273           0 : static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
     274             : {
     275           0 :         struct request_queue *q = hctx->queue;
     276           0 :         const bool has_sched = q->elevator;
     277           0 :         int ret = 0;
     278           0 :         LIST_HEAD(rq_list);
     279             : 
     280             :         /*
     281             :          * If we have previous entries on our dispatch list, grab them first for
     282             :          * more fair dispatch.
     283             :          */
     284           0 :         if (!list_empty_careful(&hctx->dispatch)) {
     285           0 :                 spin_lock(&hctx->lock);
     286           0 :                 if (!list_empty(&hctx->dispatch))
     287           0 :                         list_splice_init(&hctx->dispatch, &rq_list);
     288           0 :                 spin_unlock(&hctx->lock);
     289             :         }
     290             : 
     291             :         /*
     292             :          * Only ask the scheduler for requests, if we didn't have residual
     293             :          * requests from the dispatch list. This is to avoid the case where
     294             :          * we only ever dispatch a fraction of the requests available because
     295             :          * of low device queue depth. Once we pull requests out of the IO
     296             :          * scheduler, we can no longer merge or sort them. So it's best to
     297             :          * leave them there for as long as we can. Mark the hw queue as
     298             :          * needing a restart in that case.
     299             :          *
     300             :          * We want to dispatch from the scheduler if there was nothing
     301             :          * on the dispatch list or we were able to dispatch from the
     302             :          * dispatch list.
     303             :          */
     304           0 :         if (!list_empty(&rq_list)) {
     305           0 :                 blk_mq_sched_mark_restart_hctx(hctx);
     306           0 :                 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
     307           0 :                         if (has_sched)
     308           0 :                                 ret = blk_mq_do_dispatch_sched(hctx);
     309             :                         else
     310           0 :                                 ret = blk_mq_do_dispatch_ctx(hctx);
     311             :                 }
     312           0 :         } else if (has_sched) {
     313           0 :                 ret = blk_mq_do_dispatch_sched(hctx);
     314           0 :         } else if (hctx->dispatch_busy) {
     315             :                 /* dequeue request one by one from sw queue if queue is busy */
     316           0 :                 ret = blk_mq_do_dispatch_ctx(hctx);
     317             :         } else {
     318           0 :                 blk_mq_flush_busy_ctxs(hctx, &rq_list);
     319           0 :                 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
     320             :         }
     321             : 
     322           0 :         return ret;
     323             : }
     324             : 
     325           0 : void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
     326             : {
     327           0 :         struct request_queue *q = hctx->queue;
     328             : 
     329             :         /* RCU or SRCU read lock is needed before checking quiesced flag */
     330           0 :         if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
     331             :                 return;
     332             : 
     333           0 :         hctx->run++;
     334             : 
     335             :         /*
     336             :          * A return of -EAGAIN is an indication that hctx->dispatch is not
     337             :          * empty and we must run again in order to avoid starving flushes.
     338             :          */
     339           0 :         if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
     340           0 :                 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
     341           0 :                         blk_mq_run_hw_queue(hctx, true);
     342             :         }
     343             : }
     344             : 
     345           0 : bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
     346             :                 unsigned int nr_segs)
     347             : {
     348           0 :         struct elevator_queue *e = q->elevator;
     349             :         struct blk_mq_ctx *ctx;
     350             :         struct blk_mq_hw_ctx *hctx;
     351           0 :         bool ret = false;
     352             :         enum hctx_type type;
     353             : 
     354           0 :         if (e && e->type->ops.bio_merge) {
     355           0 :                 ret = e->type->ops.bio_merge(q, bio, nr_segs);
     356           0 :                 goto out_put;
     357             :         }
     358             : 
     359           0 :         ctx = blk_mq_get_ctx(q);
     360           0 :         hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
     361           0 :         type = hctx->type;
     362           0 :         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
     363           0 :             list_empty_careful(&ctx->rq_lists[type]))
     364             :                 goto out_put;
     365             : 
     366             :         /* default per sw-queue merge */
     367           0 :         spin_lock(&ctx->lock);
     368             :         /*
     369             :          * Reverse check our software queue for entries that we could
     370             :          * potentially merge with. Currently includes a hand-wavy stop
     371             :          * count of 8, to not spend too much time checking for merges.
     372             :          */
     373           0 :         if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
     374           0 :                 ret = true;
     375             : 
     376           0 :         spin_unlock(&ctx->lock);
     377             : out_put:
     378           0 :         return ret;
     379             : }
     380             : 
     381           0 : bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
     382             :                                    struct list_head *free)
     383             : {
     384           0 :         return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
     385             : }
     386             : EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
     387             : 
     388             : static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
     389             :                                        struct request *rq)
     390             : {
     391             :         /*
     392             :          * dispatch flush and passthrough rq directly
     393             :          *
     394             :          * passthrough request has to be added to hctx->dispatch directly.
     395             :          * For some reason, device may be in one situation which can't
     396             :          * handle FS request, so STS_RESOURCE is always returned and the
     397             :          * FS request will be added to hctx->dispatch. However passthrough
     398             :          * request may be required at that time for fixing the problem. If
     399             :          * passthrough request is added to scheduler queue, there isn't any
     400             :          * chance to dispatch it given we prioritize requests in hctx->dispatch.
     401             :          */
     402           0 :         if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
     403             :                 return true;
     404             : 
     405             :         return false;
     406             : }
     407             : 
     408           0 : void blk_mq_sched_insert_request(struct request *rq, bool at_head,
     409             :                                  bool run_queue, bool async)
     410             : {
     411           0 :         struct request_queue *q = rq->q;
     412           0 :         struct elevator_queue *e = q->elevator;
     413           0 :         struct blk_mq_ctx *ctx = rq->mq_ctx;
     414           0 :         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
     415             : 
     416           0 :         WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
     417             : 
     418           0 :         if (blk_mq_sched_bypass_insert(hctx, rq)) {
     419             :                 /*
     420             :                  * Firstly normal IO request is inserted to scheduler queue or
     421             :                  * sw queue, meantime we add flush request to dispatch queue(
     422             :                  * hctx->dispatch) directly and there is at most one in-flight
     423             :                  * flush request for each hw queue, so it doesn't matter to add
     424             :                  * flush request to tail or front of the dispatch queue.
     425             :                  *
     426             :                  * Secondly in case of NCQ, flush request belongs to non-NCQ
     427             :                  * command, and queueing it will fail when there is any
     428             :                  * in-flight normal IO request(NCQ command). When adding flush
     429             :                  * rq to the front of hctx->dispatch, it is easier to introduce
     430             :                  * extra time to flush rq's latency because of S_SCHED_RESTART
     431             :                  * compared with adding to the tail of dispatch queue, then
     432             :                  * chance of flush merge is increased, and less flush requests
     433             :                  * will be issued to controller. It is observed that ~10% time
     434             :                  * is saved in blktests block/004 on disk attached to AHCI/NCQ
     435             :                  * drive when adding flush rq to the front of hctx->dispatch.
     436             :                  *
     437             :                  * Simply queue flush rq to the front of hctx->dispatch so that
     438             :                  * intensive flush workloads can benefit in case of NCQ HW.
     439             :                  */
     440           0 :                 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
     441           0 :                 blk_mq_request_bypass_insert(rq, at_head, false);
     442           0 :                 goto run;
     443             :         }
     444             : 
     445           0 :         if (e) {
     446             :                 LIST_HEAD(list);
     447             : 
     448           0 :                 list_add(&rq->queuelist, &list);
     449           0 :                 e->type->ops.insert_requests(hctx, &list, at_head);
     450             :         } else {
     451           0 :                 spin_lock(&ctx->lock);
     452           0 :                 __blk_mq_insert_request(hctx, rq, at_head);
     453           0 :                 spin_unlock(&ctx->lock);
     454             :         }
     455             : 
     456             : run:
     457           0 :         if (run_queue)
     458           0 :                 blk_mq_run_hw_queue(hctx, async);
     459           0 : }
     460             : 
     461           0 : void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
     462             :                                   struct blk_mq_ctx *ctx,
     463             :                                   struct list_head *list, bool run_queue_async)
     464             : {
     465             :         struct elevator_queue *e;
     466           0 :         struct request_queue *q = hctx->queue;
     467             : 
     468             :         /*
     469             :          * blk_mq_sched_insert_requests() is called from flush plug
     470             :          * context only, and hold one usage counter to prevent queue
     471             :          * from being released.
     472             :          */
     473           0 :         percpu_ref_get(&q->q_usage_counter);
     474             : 
     475           0 :         e = hctx->queue->elevator;
     476           0 :         if (e) {
     477           0 :                 e->type->ops.insert_requests(hctx, list, false);
     478             :         } else {
     479             :                 /*
     480             :                  * try to issue requests directly if the hw queue isn't
     481             :                  * busy in case of 'none' scheduler, and this way may save
     482             :                  * us one extra enqueue & dequeue to sw queue.
     483             :                  */
     484           0 :                 if (!hctx->dispatch_busy && !run_queue_async) {
     485           0 :                         blk_mq_run_dispatch_ops(hctx->queue,
     486             :                                 blk_mq_try_issue_list_directly(hctx, list));
     487           0 :                         if (list_empty(list))
     488             :                                 goto out;
     489             :                 }
     490           0 :                 blk_mq_insert_requests(hctx, ctx, list);
     491             :         }
     492             : 
     493           0 :         blk_mq_run_hw_queue(hctx, run_queue_async);
     494             :  out:
     495           0 :         percpu_ref_put(&q->q_usage_counter);
     496           0 : }
     497             : 
     498           0 : static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
     499             :                                           struct blk_mq_hw_ctx *hctx,
     500             :                                           unsigned int hctx_idx)
     501             : {
     502           0 :         if (blk_mq_is_shared_tags(q->tag_set->flags)) {
     503           0 :                 hctx->sched_tags = q->sched_shared_tags;
     504             :                 return 0;
     505             :         }
     506             : 
     507           0 :         hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
     508           0 :                                                     q->nr_requests);
     509             : 
     510           0 :         if (!hctx->sched_tags)
     511             :                 return -ENOMEM;
     512             :         return 0;
     513             : }
     514             : 
     515             : static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
     516             : {
     517           0 :         blk_mq_free_rq_map(queue->sched_shared_tags);
     518           0 :         queue->sched_shared_tags = NULL;
     519             : }
     520             : 
     521             : /* called in queue's release handler, tagset has gone away */
     522           0 : static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
     523             : {
     524             :         struct blk_mq_hw_ctx *hctx;
     525             :         unsigned long i;
     526             : 
     527           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     528           0 :                 if (hctx->sched_tags) {
     529           0 :                         if (!blk_mq_is_shared_tags(flags))
     530           0 :                                 blk_mq_free_rq_map(hctx->sched_tags);
     531           0 :                         hctx->sched_tags = NULL;
     532             :                 }
     533             :         }
     534             : 
     535           0 :         if (blk_mq_is_shared_tags(flags))
     536             :                 blk_mq_exit_sched_shared_tags(q);
     537           0 : }
     538             : 
     539           0 : static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
     540             : {
     541           0 :         struct blk_mq_tag_set *set = queue->tag_set;
     542             : 
     543             :         /*
     544             :          * Set initial depth at max so that we don't need to reallocate for
     545             :          * updating nr_requests.
     546             :          */
     547           0 :         queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
     548             :                                                 BLK_MQ_NO_HCTX_IDX,
     549             :                                                 MAX_SCHED_RQ);
     550           0 :         if (!queue->sched_shared_tags)
     551             :                 return -ENOMEM;
     552             : 
     553           0 :         blk_mq_tag_update_sched_shared_tags(queue);
     554             : 
     555           0 :         return 0;
     556             : }
     557             : 
     558           0 : int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
     559             : {
     560           0 :         unsigned int flags = q->tag_set->flags;
     561             :         struct blk_mq_hw_ctx *hctx;
     562             :         struct elevator_queue *eq;
     563             :         unsigned long i;
     564             :         int ret;
     565             : 
     566           0 :         if (!e) {
     567           0 :                 q->elevator = NULL;
     568           0 :                 q->nr_requests = q->tag_set->queue_depth;
     569           0 :                 return 0;
     570             :         }
     571             : 
     572             :         /*
     573             :          * Default to double of smaller one between hw queue_depth and 128,
     574             :          * since we don't split into sync/async like the old code did.
     575             :          * Additionally, this is a per-hw queue depth.
     576             :          */
     577           0 :         q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
     578             :                                    BLKDEV_DEFAULT_RQ);
     579             : 
     580           0 :         if (blk_mq_is_shared_tags(flags)) {
     581           0 :                 ret = blk_mq_init_sched_shared_tags(q);
     582           0 :                 if (ret)
     583             :                         return ret;
     584             :         }
     585             : 
     586           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     587           0 :                 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
     588           0 :                 if (ret)
     589             :                         goto err_free_map_and_rqs;
     590             :         }
     591             : 
     592           0 :         ret = e->ops.init_sched(q, e);
     593           0 :         if (ret)
     594             :                 goto err_free_map_and_rqs;
     595             : 
     596           0 :         blk_mq_debugfs_register_sched(q);
     597             : 
     598           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     599           0 :                 if (e->ops.init_hctx) {
     600           0 :                         ret = e->ops.init_hctx(hctx, i);
     601           0 :                         if (ret) {
     602           0 :                                 eq = q->elevator;
     603           0 :                                 blk_mq_sched_free_rqs(q);
     604           0 :                                 blk_mq_exit_sched(q, eq);
     605           0 :                                 kobject_put(&eq->kobj);
     606           0 :                                 return ret;
     607             :                         }
     608             :                 }
     609           0 :                 blk_mq_debugfs_register_sched_hctx(q, hctx);
     610             :         }
     611             : 
     612             :         return 0;
     613             : 
     614             : err_free_map_and_rqs:
     615           0 :         blk_mq_sched_free_rqs(q);
     616           0 :         blk_mq_sched_tags_teardown(q, flags);
     617             : 
     618           0 :         q->elevator = NULL;
     619           0 :         return ret;
     620             : }
     621             : 
     622             : /*
     623             :  * called in either blk_queue_cleanup or elevator_switch, tagset
     624             :  * is required for freeing requests
     625             :  */
     626           0 : void blk_mq_sched_free_rqs(struct request_queue *q)
     627             : {
     628             :         struct blk_mq_hw_ctx *hctx;
     629             :         unsigned long i;
     630             : 
     631           0 :         if (blk_mq_is_shared_tags(q->tag_set->flags)) {
     632           0 :                 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
     633             :                                 BLK_MQ_NO_HCTX_IDX);
     634             :         } else {
     635           0 :                 queue_for_each_hw_ctx(q, hctx, i) {
     636           0 :                         if (hctx->sched_tags)
     637           0 :                                 blk_mq_free_rqs(q->tag_set,
     638             :                                                 hctx->sched_tags, i);
     639             :                 }
     640             :         }
     641           0 : }
     642             : 
     643           0 : void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
     644             : {
     645             :         struct blk_mq_hw_ctx *hctx;
     646             :         unsigned long i;
     647           0 :         unsigned int flags = 0;
     648             : 
     649           0 :         queue_for_each_hw_ctx(q, hctx, i) {
     650           0 :                 blk_mq_debugfs_unregister_sched_hctx(hctx);
     651           0 :                 if (e->type->ops.exit_hctx && hctx->sched_data) {
     652           0 :                         e->type->ops.exit_hctx(hctx, i);
     653           0 :                         hctx->sched_data = NULL;
     654             :                 }
     655           0 :                 flags = hctx->flags;
     656             :         }
     657           0 :         blk_mq_debugfs_unregister_sched(q);
     658           0 :         if (e->type->ops.exit_sched)
     659           0 :                 e->type->ops.exit_sched(e);
     660           0 :         blk_mq_sched_tags_teardown(q, flags);
     661           0 :         q->elevator = NULL;
     662           0 : }

Generated by: LCOV version 1.14