Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 : * for the blk-mq scheduling framework
5 : *
6 : * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 : */
8 : #include <linux/kernel.h>
9 : #include <linux/fs.h>
10 : #include <linux/blkdev.h>
11 : #include <linux/blk-mq.h>
12 : #include <linux/bio.h>
13 : #include <linux/module.h>
14 : #include <linux/slab.h>
15 : #include <linux/init.h>
16 : #include <linux/compiler.h>
17 : #include <linux/rbtree.h>
18 : #include <linux/sbitmap.h>
19 :
20 : #include <trace/events/block.h>
21 :
22 : #include "elevator.h"
23 : #include "blk.h"
24 : #include "blk-mq.h"
25 : #include "blk-mq-debugfs.h"
26 : #include "blk-mq-tag.h"
27 : #include "blk-mq-sched.h"
28 :
29 : /*
30 : * See Documentation/block/deadline-iosched.rst
31 : */
32 : static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33 : static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34 : /*
35 : * Time after which to dispatch lower priority requests even if higher
36 : * priority requests are pending.
37 : */
38 : static const int prio_aging_expire = 10 * HZ;
39 : static const int writes_starved = 2; /* max times reads can starve a write */
40 : static const int fifo_batch = 16; /* # of sequential requests treated as one
41 : by the above parameters. For throughput. */
42 :
43 : enum dd_data_dir {
44 : DD_READ = READ,
45 : DD_WRITE = WRITE,
46 : };
47 :
48 : enum { DD_DIR_COUNT = 2 };
49 :
50 : enum dd_prio {
51 : DD_RT_PRIO = 0,
52 : DD_BE_PRIO = 1,
53 : DD_IDLE_PRIO = 2,
54 : DD_PRIO_MAX = 2,
55 : };
56 :
57 : enum { DD_PRIO_COUNT = 3 };
58 :
59 : /*
60 : * I/O statistics per I/O priority. It is fine if these counters overflow.
61 : * What matters is that these counters are at least as wide as
62 : * log2(max_outstanding_requests).
63 : */
64 : struct io_stats_per_prio {
65 : uint32_t inserted;
66 : uint32_t merged;
67 : uint32_t dispatched;
68 : atomic_t completed;
69 : };
70 :
71 : /*
72 : * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
73 : * present on both sort_list[] and fifo_list[].
74 : */
75 : struct dd_per_prio {
76 : struct list_head dispatch;
77 : struct rb_root sort_list[DD_DIR_COUNT];
78 : struct list_head fifo_list[DD_DIR_COUNT];
79 : /* Next request in FIFO order. Read, write or both are NULL. */
80 : struct request *next_rq[DD_DIR_COUNT];
81 : struct io_stats_per_prio stats;
82 : };
83 :
84 : struct deadline_data {
85 : /*
86 : * run time data
87 : */
88 :
89 : struct dd_per_prio per_prio[DD_PRIO_COUNT];
90 :
91 : /* Data direction of latest dispatched request. */
92 : enum dd_data_dir last_dir;
93 : unsigned int batching; /* number of sequential requests made */
94 : unsigned int starved; /* times reads have starved writes */
95 :
96 : /*
97 : * settings that change how the i/o scheduler behaves
98 : */
99 : int fifo_expire[DD_DIR_COUNT];
100 : int fifo_batch;
101 : int writes_starved;
102 : int front_merges;
103 : u32 async_depth;
104 : int prio_aging_expire;
105 :
106 : spinlock_t lock;
107 : spinlock_t zone_lock;
108 : };
109 :
110 : /* Maps an I/O priority class to a deadline scheduler priority. */
111 : static const enum dd_prio ioprio_class_to_prio[] = {
112 : [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
113 : [IOPRIO_CLASS_RT] = DD_RT_PRIO,
114 : [IOPRIO_CLASS_BE] = DD_BE_PRIO,
115 : [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
116 : };
117 :
118 : static inline struct rb_root *
119 : deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
120 : {
121 0 : return &per_prio->sort_list[rq_data_dir(rq)];
122 : }
123 :
124 : /*
125 : * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
126 : * request.
127 : */
128 : static u8 dd_rq_ioclass(struct request *rq)
129 : {
130 0 : return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
131 : }
132 :
133 : /*
134 : * get the request after `rq' in sector-sorted order
135 : */
136 : static inline struct request *
137 : deadline_latter_request(struct request *rq)
138 : {
139 0 : struct rb_node *node = rb_next(&rq->rb_node);
140 :
141 0 : if (node)
142 0 : return rb_entry_rq(node);
143 :
144 : return NULL;
145 : }
146 :
147 : static void
148 : deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
149 : {
150 0 : struct rb_root *root = deadline_rb_root(per_prio, rq);
151 :
152 0 : elv_rb_add(root, rq);
153 : }
154 :
155 : static inline void
156 0 : deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
157 : {
158 0 : const enum dd_data_dir data_dir = rq_data_dir(rq);
159 :
160 0 : if (per_prio->next_rq[data_dir] == rq)
161 0 : per_prio->next_rq[data_dir] = deadline_latter_request(rq);
162 :
163 0 : elv_rb_del(deadline_rb_root(per_prio, rq), rq);
164 0 : }
165 :
166 : /*
167 : * remove rq from rbtree and fifo.
168 : */
169 0 : static void deadline_remove_request(struct request_queue *q,
170 : struct dd_per_prio *per_prio,
171 : struct request *rq)
172 : {
173 0 : list_del_init(&rq->queuelist);
174 :
175 : /*
176 : * We might not be on the rbtree, if we are doing an insert merge
177 : */
178 0 : if (!RB_EMPTY_NODE(&rq->rb_node))
179 0 : deadline_del_rq_rb(per_prio, rq);
180 :
181 0 : elv_rqhash_del(q, rq);
182 0 : if (q->last_merge == rq)
183 0 : q->last_merge = NULL;
184 0 : }
185 :
186 0 : static void dd_request_merged(struct request_queue *q, struct request *req,
187 : enum elv_merge type)
188 : {
189 0 : struct deadline_data *dd = q->elevator->elevator_data;
190 0 : const u8 ioprio_class = dd_rq_ioclass(req);
191 0 : const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
192 0 : struct dd_per_prio *per_prio = &dd->per_prio[prio];
193 :
194 : /*
195 : * if the merge was a front merge, we need to reposition request
196 : */
197 0 : if (type == ELEVATOR_FRONT_MERGE) {
198 0 : elv_rb_del(deadline_rb_root(per_prio, req), req);
199 : deadline_add_rq_rb(per_prio, req);
200 : }
201 0 : }
202 :
203 : /*
204 : * Callback function that is invoked after @next has been merged into @req.
205 : */
206 0 : static void dd_merged_requests(struct request_queue *q, struct request *req,
207 : struct request *next)
208 : {
209 0 : struct deadline_data *dd = q->elevator->elevator_data;
210 0 : const u8 ioprio_class = dd_rq_ioclass(next);
211 0 : const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
212 :
213 : lockdep_assert_held(&dd->lock);
214 :
215 0 : dd->per_prio[prio].stats.merged++;
216 :
217 : /*
218 : * if next expires before rq, assign its expire time to rq
219 : * and move into next position (next will be deleted) in fifo
220 : */
221 0 : if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
222 0 : if (time_before((unsigned long)next->fifo_time,
223 : (unsigned long)req->fifo_time)) {
224 0 : list_move(&req->queuelist, &next->queuelist);
225 0 : req->fifo_time = next->fifo_time;
226 : }
227 : }
228 :
229 : /*
230 : * kill knowledge of next, this one is a goner
231 : */
232 0 : deadline_remove_request(q, &dd->per_prio[prio], next);
233 0 : }
234 :
235 : /*
236 : * move an entry to dispatch queue
237 : */
238 : static void
239 0 : deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
240 : struct request *rq)
241 : {
242 0 : const enum dd_data_dir data_dir = rq_data_dir(rq);
243 :
244 0 : per_prio->next_rq[data_dir] = deadline_latter_request(rq);
245 :
246 : /*
247 : * take it off the sort and fifo list
248 : */
249 0 : deadline_remove_request(rq->q, per_prio, rq);
250 0 : }
251 :
252 : /* Number of requests queued for a given priority level. */
253 : static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
254 : {
255 0 : const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
256 :
257 : lockdep_assert_held(&dd->lock);
258 :
259 0 : return stats->inserted - atomic_read(&stats->completed);
260 : }
261 :
262 : /*
263 : * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
264 : * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
265 : */
266 : static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
267 : enum dd_data_dir data_dir)
268 : {
269 0 : struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
270 :
271 : /*
272 : * rq is expired!
273 : */
274 0 : if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
275 : return 1;
276 :
277 : return 0;
278 : }
279 :
280 : /*
281 : * For the specified data direction, return the next request to
282 : * dispatch using arrival ordered lists.
283 : */
284 : static struct request *
285 : deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
286 : enum dd_data_dir data_dir)
287 : {
288 : struct request *rq;
289 : unsigned long flags;
290 :
291 0 : if (list_empty(&per_prio->fifo_list[data_dir]))
292 : return NULL;
293 :
294 0 : rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
295 : if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
296 : return rq;
297 :
298 : /*
299 : * Look for a write request that can be dispatched, that is one with
300 : * an unlocked target zone.
301 : */
302 : spin_lock_irqsave(&dd->zone_lock, flags);
303 : list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
304 : if (blk_req_can_dispatch_to_zone(rq))
305 : goto out;
306 : }
307 : rq = NULL;
308 : out:
309 : spin_unlock_irqrestore(&dd->zone_lock, flags);
310 :
311 : return rq;
312 : }
313 :
314 : /*
315 : * For the specified data direction, return the next request to
316 : * dispatch using sector position sorted lists.
317 : */
318 : static struct request *
319 : deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
320 : enum dd_data_dir data_dir)
321 : {
322 : struct request *rq;
323 : unsigned long flags;
324 :
325 0 : rq = per_prio->next_rq[data_dir];
326 0 : if (!rq)
327 : return NULL;
328 :
329 : if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
330 : return rq;
331 :
332 : /*
333 : * Look for a write request that can be dispatched, that is one with
334 : * an unlocked target zone.
335 : */
336 : spin_lock_irqsave(&dd->zone_lock, flags);
337 : while (rq) {
338 : if (blk_req_can_dispatch_to_zone(rq))
339 : break;
340 : rq = deadline_latter_request(rq);
341 : }
342 : spin_unlock_irqrestore(&dd->zone_lock, flags);
343 :
344 : return rq;
345 : }
346 :
347 : /*
348 : * Returns true if and only if @rq started after @latest_start where
349 : * @latest_start is in jiffies.
350 : */
351 : static bool started_after(struct deadline_data *dd, struct request *rq,
352 : unsigned long latest_start)
353 : {
354 0 : unsigned long start_time = (unsigned long)rq->fifo_time;
355 :
356 0 : start_time -= dd->fifo_expire[rq_data_dir(rq)];
357 :
358 0 : return time_after(start_time, latest_start);
359 : }
360 :
361 : /*
362 : * deadline_dispatch_requests selects the best request according to
363 : * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
364 : */
365 0 : static struct request *__dd_dispatch_request(struct deadline_data *dd,
366 : struct dd_per_prio *per_prio,
367 : unsigned long latest_start)
368 : {
369 : struct request *rq, *next_rq;
370 : enum dd_data_dir data_dir;
371 : enum dd_prio prio;
372 : u8 ioprio_class;
373 :
374 : lockdep_assert_held(&dd->lock);
375 :
376 0 : if (!list_empty(&per_prio->dispatch)) {
377 0 : rq = list_first_entry(&per_prio->dispatch, struct request,
378 : queuelist);
379 0 : if (started_after(dd, rq, latest_start))
380 : return NULL;
381 0 : list_del_init(&rq->queuelist);
382 : goto done;
383 : }
384 :
385 : /*
386 : * batches are currently reads XOR writes
387 : */
388 0 : rq = deadline_next_request(dd, per_prio, dd->last_dir);
389 0 : if (rq && dd->batching < dd->fifo_batch)
390 : /* we have a next request are still entitled to batch */
391 : goto dispatch_request;
392 :
393 : /*
394 : * at this point we are not running a batch. select the appropriate
395 : * data direction (read / write)
396 : */
397 :
398 0 : if (!list_empty(&per_prio->fifo_list[DD_READ])) {
399 0 : BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
400 :
401 0 : if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
402 0 : (dd->starved++ >= dd->writes_starved))
403 : goto dispatch_writes;
404 :
405 : data_dir = DD_READ;
406 :
407 : goto dispatch_find_request;
408 : }
409 :
410 : /*
411 : * there are either no reads or writes have been starved
412 : */
413 :
414 0 : if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
415 : dispatch_writes:
416 0 : BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
417 :
418 0 : dd->starved = 0;
419 :
420 0 : data_dir = DD_WRITE;
421 :
422 0 : goto dispatch_find_request;
423 : }
424 :
425 : return NULL;
426 :
427 : dispatch_find_request:
428 : /*
429 : * we are not running a batch, find best request for selected data_dir
430 : */
431 0 : next_rq = deadline_next_request(dd, per_prio, data_dir);
432 0 : if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
433 : /*
434 : * A deadline has expired, the last request was in the other
435 : * direction, or we have run out of higher-sectored requests.
436 : * Start again from the request with the earliest expiry time.
437 : */
438 0 : rq = deadline_fifo_request(dd, per_prio, data_dir);
439 : } else {
440 : /*
441 : * The last req was the same dir and we have a next request in
442 : * sort order. No expired requests so continue on from here.
443 : */
444 : rq = next_rq;
445 : }
446 :
447 : /*
448 : * For a zoned block device, if we only have writes queued and none of
449 : * them can be dispatched, rq will be NULL.
450 : */
451 0 : if (!rq)
452 : return NULL;
453 :
454 0 : dd->last_dir = data_dir;
455 0 : dd->batching = 0;
456 :
457 : dispatch_request:
458 0 : if (started_after(dd, rq, latest_start))
459 : return NULL;
460 :
461 : /*
462 : * rq is the selected appropriate request.
463 : */
464 0 : dd->batching++;
465 0 : deadline_move_request(dd, per_prio, rq);
466 : done:
467 0 : ioprio_class = dd_rq_ioclass(rq);
468 0 : prio = ioprio_class_to_prio[ioprio_class];
469 0 : dd->per_prio[prio].stats.dispatched++;
470 : /*
471 : * If the request needs its target zone locked, do it.
472 : */
473 0 : blk_req_zone_write_lock(rq);
474 0 : rq->rq_flags |= RQF_STARTED;
475 0 : return rq;
476 : }
477 :
478 : /*
479 : * Check whether there are any requests with priority other than DD_RT_PRIO
480 : * that were inserted more than prio_aging_expire jiffies ago.
481 : */
482 0 : static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
483 : unsigned long now)
484 : {
485 : struct request *rq;
486 : enum dd_prio prio;
487 : int prio_cnt;
488 :
489 : lockdep_assert_held(&dd->lock);
490 :
491 0 : prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
492 0 : !!dd_queued(dd, DD_IDLE_PRIO);
493 0 : if (prio_cnt < 2)
494 : return NULL;
495 :
496 0 : for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
497 0 : rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
498 0 : now - dd->prio_aging_expire);
499 0 : if (rq)
500 : return rq;
501 : }
502 :
503 : return NULL;
504 : }
505 :
506 : /*
507 : * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
508 : *
509 : * One confusing aspect here is that we get called for a specific
510 : * hardware queue, but we may return a request that is for a
511 : * different hardware queue. This is because mq-deadline has shared
512 : * state for all hardware queues, in terms of sorting, FIFOs, etc.
513 : */
514 0 : static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
515 : {
516 0 : struct deadline_data *dd = hctx->queue->elevator->elevator_data;
517 0 : const unsigned long now = jiffies;
518 : struct request *rq;
519 : enum dd_prio prio;
520 :
521 0 : spin_lock(&dd->lock);
522 0 : rq = dd_dispatch_prio_aged_requests(dd, now);
523 0 : if (rq)
524 : goto unlock;
525 :
526 : /*
527 : * Next, dispatch requests in priority order. Ignore lower priority
528 : * requests if any higher priority requests are pending.
529 : */
530 0 : for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
531 0 : rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
532 0 : if (rq || dd_queued(dd, prio))
533 : break;
534 : }
535 :
536 : unlock:
537 0 : spin_unlock(&dd->lock);
538 :
539 0 : return rq;
540 : }
541 :
542 : /*
543 : * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
544 : * function is used by __blk_mq_get_tag().
545 : */
546 0 : static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
547 : {
548 0 : struct deadline_data *dd = data->q->elevator->elevator_data;
549 :
550 : /* Do not throttle synchronous reads. */
551 0 : if (op_is_sync(op) && !op_is_write(op))
552 : return;
553 :
554 : /*
555 : * Throttle asynchronous requests and writes such that these requests
556 : * do not block the allocation of synchronous requests.
557 : */
558 0 : data->shallow_depth = dd->async_depth;
559 : }
560 :
561 : /* Called by blk_mq_update_nr_requests(). */
562 0 : static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
563 : {
564 0 : struct request_queue *q = hctx->queue;
565 0 : struct deadline_data *dd = q->elevator->elevator_data;
566 0 : struct blk_mq_tags *tags = hctx->sched_tags;
567 :
568 0 : dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
569 :
570 0 : sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
571 0 : }
572 :
573 : /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
574 0 : static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
575 : {
576 0 : dd_depth_updated(hctx);
577 0 : return 0;
578 : }
579 :
580 0 : static void dd_exit_sched(struct elevator_queue *e)
581 : {
582 0 : struct deadline_data *dd = e->elevator_data;
583 : enum dd_prio prio;
584 :
585 0 : for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
586 0 : struct dd_per_prio *per_prio = &dd->per_prio[prio];
587 0 : const struct io_stats_per_prio *stats = &per_prio->stats;
588 : uint32_t queued;
589 :
590 0 : WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
591 0 : WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
592 :
593 0 : spin_lock(&dd->lock);
594 0 : queued = dd_queued(dd, prio);
595 0 : spin_unlock(&dd->lock);
596 :
597 0 : WARN_ONCE(queued != 0,
598 : "statistics for priority %d: i %u m %u d %u c %u\n",
599 : prio, stats->inserted, stats->merged,
600 : stats->dispatched, atomic_read(&stats->completed));
601 : }
602 :
603 0 : kfree(dd);
604 0 : }
605 :
606 : /*
607 : * initialize elevator private data (deadline_data).
608 : */
609 0 : static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
610 : {
611 : struct deadline_data *dd;
612 : struct elevator_queue *eq;
613 : enum dd_prio prio;
614 0 : int ret = -ENOMEM;
615 :
616 0 : eq = elevator_alloc(q, e);
617 0 : if (!eq)
618 : return ret;
619 :
620 0 : dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
621 0 : if (!dd)
622 : goto put_eq;
623 :
624 0 : eq->elevator_data = dd;
625 :
626 0 : for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
627 0 : struct dd_per_prio *per_prio = &dd->per_prio[prio];
628 :
629 0 : INIT_LIST_HEAD(&per_prio->dispatch);
630 0 : INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
631 0 : INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
632 0 : per_prio->sort_list[DD_READ] = RB_ROOT;
633 0 : per_prio->sort_list[DD_WRITE] = RB_ROOT;
634 : }
635 0 : dd->fifo_expire[DD_READ] = read_expire;
636 0 : dd->fifo_expire[DD_WRITE] = write_expire;
637 0 : dd->writes_starved = writes_starved;
638 0 : dd->front_merges = 1;
639 0 : dd->last_dir = DD_WRITE;
640 0 : dd->fifo_batch = fifo_batch;
641 0 : dd->prio_aging_expire = prio_aging_expire;
642 0 : spin_lock_init(&dd->lock);
643 0 : spin_lock_init(&dd->zone_lock);
644 :
645 0 : q->elevator = eq;
646 0 : return 0;
647 :
648 : put_eq:
649 0 : kobject_put(&eq->kobj);
650 0 : return ret;
651 : }
652 :
653 : /*
654 : * Try to merge @bio into an existing request. If @bio has been merged into
655 : * an existing request, store the pointer to that request into *@rq.
656 : */
657 0 : static int dd_request_merge(struct request_queue *q, struct request **rq,
658 : struct bio *bio)
659 : {
660 0 : struct deadline_data *dd = q->elevator->elevator_data;
661 0 : const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
662 0 : const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
663 0 : struct dd_per_prio *per_prio = &dd->per_prio[prio];
664 0 : sector_t sector = bio_end_sector(bio);
665 : struct request *__rq;
666 :
667 0 : if (!dd->front_merges)
668 : return ELEVATOR_NO_MERGE;
669 :
670 0 : __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
671 0 : if (__rq) {
672 0 : BUG_ON(sector != blk_rq_pos(__rq));
673 :
674 0 : if (elv_bio_merge_ok(__rq, bio)) {
675 0 : *rq = __rq;
676 0 : if (blk_discard_mergable(__rq))
677 : return ELEVATOR_DISCARD_MERGE;
678 0 : return ELEVATOR_FRONT_MERGE;
679 : }
680 : }
681 :
682 : return ELEVATOR_NO_MERGE;
683 : }
684 :
685 : /*
686 : * Attempt to merge a bio into an existing request. This function is called
687 : * before @bio is associated with a request.
688 : */
689 0 : static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
690 : unsigned int nr_segs)
691 : {
692 0 : struct deadline_data *dd = q->elevator->elevator_data;
693 0 : struct request *free = NULL;
694 : bool ret;
695 :
696 0 : spin_lock(&dd->lock);
697 0 : ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
698 0 : spin_unlock(&dd->lock);
699 :
700 0 : if (free)
701 0 : blk_mq_free_request(free);
702 :
703 0 : return ret;
704 : }
705 :
706 : /*
707 : * add rq to rbtree and fifo
708 : */
709 0 : static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
710 : bool at_head)
711 : {
712 0 : struct request_queue *q = hctx->queue;
713 0 : struct deadline_data *dd = q->elevator->elevator_data;
714 0 : const enum dd_data_dir data_dir = rq_data_dir(rq);
715 0 : u16 ioprio = req_get_ioprio(rq);
716 0 : u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
717 : struct dd_per_prio *per_prio;
718 : enum dd_prio prio;
719 0 : LIST_HEAD(free);
720 :
721 : lockdep_assert_held(&dd->lock);
722 :
723 : /*
724 : * This may be a requeue of a write request that has locked its
725 : * target zone. If it is the case, this releases the zone lock.
726 : */
727 0 : blk_req_zone_write_unlock(rq);
728 :
729 0 : prio = ioprio_class_to_prio[ioprio_class];
730 0 : per_prio = &dd->per_prio[prio];
731 0 : if (!rq->elv.priv[0]) {
732 0 : per_prio->stats.inserted++;
733 0 : rq->elv.priv[0] = (void *)(uintptr_t)1;
734 : }
735 :
736 0 : if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
737 0 : blk_mq_free_requests(&free);
738 0 : return;
739 : }
740 :
741 0 : trace_block_rq_insert(rq);
742 :
743 0 : if (at_head) {
744 0 : list_add(&rq->queuelist, &per_prio->dispatch);
745 0 : rq->fifo_time = jiffies;
746 : } else {
747 0 : deadline_add_rq_rb(per_prio, rq);
748 :
749 0 : if (rq_mergeable(rq)) {
750 0 : elv_rqhash_add(q, rq);
751 0 : if (!q->last_merge)
752 0 : q->last_merge = rq;
753 : }
754 :
755 : /*
756 : * set expire time and add to fifo list
757 : */
758 0 : rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
759 0 : list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
760 : }
761 : }
762 :
763 : /*
764 : * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
765 : */
766 0 : static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
767 : struct list_head *list, bool at_head)
768 : {
769 0 : struct request_queue *q = hctx->queue;
770 0 : struct deadline_data *dd = q->elevator->elevator_data;
771 :
772 0 : spin_lock(&dd->lock);
773 0 : while (!list_empty(list)) {
774 : struct request *rq;
775 :
776 0 : rq = list_first_entry(list, struct request, queuelist);
777 0 : list_del_init(&rq->queuelist);
778 0 : dd_insert_request(hctx, rq, at_head);
779 : }
780 0 : spin_unlock(&dd->lock);
781 0 : }
782 :
783 : /* Callback from inside blk_mq_rq_ctx_init(). */
784 0 : static void dd_prepare_request(struct request *rq)
785 : {
786 0 : rq->elv.priv[0] = NULL;
787 0 : }
788 :
789 : /*
790 : * Callback from inside blk_mq_free_request().
791 : *
792 : * For zoned block devices, write unlock the target zone of
793 : * completed write requests. Do this while holding the zone lock
794 : * spinlock so that the zone is never unlocked while deadline_fifo_request()
795 : * or deadline_next_request() are executing. This function is called for
796 : * all requests, whether or not these requests complete successfully.
797 : *
798 : * For a zoned block device, __dd_dispatch_request() may have stopped
799 : * dispatching requests if all the queued requests are write requests directed
800 : * at zones that are already locked due to on-going write requests. To ensure
801 : * write request dispatch progress in this case, mark the queue as needing a
802 : * restart to ensure that the queue is run again after completion of the
803 : * request and zones being unlocked.
804 : */
805 0 : static void dd_finish_request(struct request *rq)
806 : {
807 0 : struct request_queue *q = rq->q;
808 0 : struct deadline_data *dd = q->elevator->elevator_data;
809 0 : const u8 ioprio_class = dd_rq_ioclass(rq);
810 0 : const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
811 0 : struct dd_per_prio *per_prio = &dd->per_prio[prio];
812 :
813 : /*
814 : * The block layer core may call dd_finish_request() without having
815 : * called dd_insert_requests(). Skip requests that bypassed I/O
816 : * scheduling. See also blk_mq_request_bypass_insert().
817 : */
818 0 : if (!rq->elv.priv[0])
819 : return;
820 :
821 0 : atomic_inc(&per_prio->stats.completed);
822 :
823 0 : if (blk_queue_is_zoned(q)) {
824 : unsigned long flags;
825 :
826 : spin_lock_irqsave(&dd->zone_lock, flags);
827 : blk_req_zone_write_unlock(rq);
828 : if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
829 : blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
830 : spin_unlock_irqrestore(&dd->zone_lock, flags);
831 : }
832 : }
833 :
834 0 : static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
835 : {
836 0 : return !list_empty_careful(&per_prio->dispatch) ||
837 0 : !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
838 0 : !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
839 : }
840 :
841 0 : static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
842 : {
843 0 : struct deadline_data *dd = hctx->queue->elevator->elevator_data;
844 : enum dd_prio prio;
845 :
846 0 : for (prio = 0; prio <= DD_PRIO_MAX; prio++)
847 0 : if (dd_has_work_for_prio(&dd->per_prio[prio]))
848 : return true;
849 :
850 : return false;
851 : }
852 :
853 : /*
854 : * sysfs parts below
855 : */
856 : #define SHOW_INT(__FUNC, __VAR) \
857 : static ssize_t __FUNC(struct elevator_queue *e, char *page) \
858 : { \
859 : struct deadline_data *dd = e->elevator_data; \
860 : \
861 : return sysfs_emit(page, "%d\n", __VAR); \
862 : }
863 : #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
864 0 : SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
865 0 : SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
866 0 : SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
867 0 : SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
868 0 : SHOW_INT(deadline_front_merges_show, dd->front_merges);
869 0 : SHOW_INT(deadline_async_depth_show, dd->async_depth);
870 0 : SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
871 : #undef SHOW_INT
872 : #undef SHOW_JIFFIES
873 :
874 : #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
875 : static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
876 : { \
877 : struct deadline_data *dd = e->elevator_data; \
878 : int __data, __ret; \
879 : \
880 : __ret = kstrtoint(page, 0, &__data); \
881 : if (__ret < 0) \
882 : return __ret; \
883 : if (__data < (MIN)) \
884 : __data = (MIN); \
885 : else if (__data > (MAX)) \
886 : __data = (MAX); \
887 : *(__PTR) = __CONV(__data); \
888 : return count; \
889 : }
890 : #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
891 : STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
892 : #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
893 : STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
894 0 : STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
895 0 : STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
896 0 : STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
897 0 : STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
898 0 : STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
899 0 : STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
900 0 : STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
901 : #undef STORE_FUNCTION
902 : #undef STORE_INT
903 : #undef STORE_JIFFIES
904 :
905 : #define DD_ATTR(name) \
906 : __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
907 :
908 : static struct elv_fs_entry deadline_attrs[] = {
909 : DD_ATTR(read_expire),
910 : DD_ATTR(write_expire),
911 : DD_ATTR(writes_starved),
912 : DD_ATTR(front_merges),
913 : DD_ATTR(async_depth),
914 : DD_ATTR(fifo_batch),
915 : DD_ATTR(prio_aging_expire),
916 : __ATTR_NULL
917 : };
918 :
919 : #ifdef CONFIG_BLK_DEBUG_FS
920 : #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
921 : static void *deadline_##name##_fifo_start(struct seq_file *m, \
922 : loff_t *pos) \
923 : __acquires(&dd->lock) \
924 : { \
925 : struct request_queue *q = m->private; \
926 : struct deadline_data *dd = q->elevator->elevator_data; \
927 : struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
928 : \
929 : spin_lock(&dd->lock); \
930 : return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
931 : } \
932 : \
933 : static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
934 : loff_t *pos) \
935 : { \
936 : struct request_queue *q = m->private; \
937 : struct deadline_data *dd = q->elevator->elevator_data; \
938 : struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
939 : \
940 : return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
941 : } \
942 : \
943 : static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
944 : __releases(&dd->lock) \
945 : { \
946 : struct request_queue *q = m->private; \
947 : struct deadline_data *dd = q->elevator->elevator_data; \
948 : \
949 : spin_unlock(&dd->lock); \
950 : } \
951 : \
952 : static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
953 : .start = deadline_##name##_fifo_start, \
954 : .next = deadline_##name##_fifo_next, \
955 : .stop = deadline_##name##_fifo_stop, \
956 : .show = blk_mq_debugfs_rq_show, \
957 : }; \
958 : \
959 : static int deadline_##name##_next_rq_show(void *data, \
960 : struct seq_file *m) \
961 : { \
962 : struct request_queue *q = data; \
963 : struct deadline_data *dd = q->elevator->elevator_data; \
964 : struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
965 : struct request *rq = per_prio->next_rq[data_dir]; \
966 : \
967 : if (rq) \
968 : __blk_mq_debugfs_rq_show(m, rq); \
969 : return 0; \
970 : }
971 :
972 : DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
973 : DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
974 : DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
975 : DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
976 : DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
977 : DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
978 : #undef DEADLINE_DEBUGFS_DDIR_ATTRS
979 :
980 : static int deadline_batching_show(void *data, struct seq_file *m)
981 : {
982 : struct request_queue *q = data;
983 : struct deadline_data *dd = q->elevator->elevator_data;
984 :
985 : seq_printf(m, "%u\n", dd->batching);
986 : return 0;
987 : }
988 :
989 : static int deadline_starved_show(void *data, struct seq_file *m)
990 : {
991 : struct request_queue *q = data;
992 : struct deadline_data *dd = q->elevator->elevator_data;
993 :
994 : seq_printf(m, "%u\n", dd->starved);
995 : return 0;
996 : }
997 :
998 : static int dd_async_depth_show(void *data, struct seq_file *m)
999 : {
1000 : struct request_queue *q = data;
1001 : struct deadline_data *dd = q->elevator->elevator_data;
1002 :
1003 : seq_printf(m, "%u\n", dd->async_depth);
1004 : return 0;
1005 : }
1006 :
1007 : static int dd_queued_show(void *data, struct seq_file *m)
1008 : {
1009 : struct request_queue *q = data;
1010 : struct deadline_data *dd = q->elevator->elevator_data;
1011 : u32 rt, be, idle;
1012 :
1013 : spin_lock(&dd->lock);
1014 : rt = dd_queued(dd, DD_RT_PRIO);
1015 : be = dd_queued(dd, DD_BE_PRIO);
1016 : idle = dd_queued(dd, DD_IDLE_PRIO);
1017 : spin_unlock(&dd->lock);
1018 :
1019 : seq_printf(m, "%u %u %u\n", rt, be, idle);
1020 :
1021 : return 0;
1022 : }
1023 :
1024 : /* Number of requests owned by the block driver for a given priority. */
1025 : static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1026 : {
1027 : const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1028 :
1029 : lockdep_assert_held(&dd->lock);
1030 :
1031 : return stats->dispatched + stats->merged -
1032 : atomic_read(&stats->completed);
1033 : }
1034 :
1035 : static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1036 : {
1037 : struct request_queue *q = data;
1038 : struct deadline_data *dd = q->elevator->elevator_data;
1039 : u32 rt, be, idle;
1040 :
1041 : spin_lock(&dd->lock);
1042 : rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1043 : be = dd_owned_by_driver(dd, DD_BE_PRIO);
1044 : idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1045 : spin_unlock(&dd->lock);
1046 :
1047 : seq_printf(m, "%u %u %u\n", rt, be, idle);
1048 :
1049 : return 0;
1050 : }
1051 :
1052 : #define DEADLINE_DISPATCH_ATTR(prio) \
1053 : static void *deadline_dispatch##prio##_start(struct seq_file *m, \
1054 : loff_t *pos) \
1055 : __acquires(&dd->lock) \
1056 : { \
1057 : struct request_queue *q = m->private; \
1058 : struct deadline_data *dd = q->elevator->elevator_data; \
1059 : struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1060 : \
1061 : spin_lock(&dd->lock); \
1062 : return seq_list_start(&per_prio->dispatch, *pos); \
1063 : } \
1064 : \
1065 : static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1066 : void *v, loff_t *pos) \
1067 : { \
1068 : struct request_queue *q = m->private; \
1069 : struct deadline_data *dd = q->elevator->elevator_data; \
1070 : struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1071 : \
1072 : return seq_list_next(v, &per_prio->dispatch, pos); \
1073 : } \
1074 : \
1075 : static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1076 : __releases(&dd->lock) \
1077 : { \
1078 : struct request_queue *q = m->private; \
1079 : struct deadline_data *dd = q->elevator->elevator_data; \
1080 : \
1081 : spin_unlock(&dd->lock); \
1082 : } \
1083 : \
1084 : static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1085 : .start = deadline_dispatch##prio##_start, \
1086 : .next = deadline_dispatch##prio##_next, \
1087 : .stop = deadline_dispatch##prio##_stop, \
1088 : .show = blk_mq_debugfs_rq_show, \
1089 : }
1090 :
1091 : DEADLINE_DISPATCH_ATTR(0);
1092 : DEADLINE_DISPATCH_ATTR(1);
1093 : DEADLINE_DISPATCH_ATTR(2);
1094 : #undef DEADLINE_DISPATCH_ATTR
1095 :
1096 : #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1097 : {#name "_fifo_list", 0400, \
1098 : .seq_ops = &deadline_##name##_fifo_seq_ops}
1099 : #define DEADLINE_NEXT_RQ_ATTR(name) \
1100 : {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1101 : static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1102 : DEADLINE_QUEUE_DDIR_ATTRS(read0),
1103 : DEADLINE_QUEUE_DDIR_ATTRS(write0),
1104 : DEADLINE_QUEUE_DDIR_ATTRS(read1),
1105 : DEADLINE_QUEUE_DDIR_ATTRS(write1),
1106 : DEADLINE_QUEUE_DDIR_ATTRS(read2),
1107 : DEADLINE_QUEUE_DDIR_ATTRS(write2),
1108 : DEADLINE_NEXT_RQ_ATTR(read0),
1109 : DEADLINE_NEXT_RQ_ATTR(write0),
1110 : DEADLINE_NEXT_RQ_ATTR(read1),
1111 : DEADLINE_NEXT_RQ_ATTR(write1),
1112 : DEADLINE_NEXT_RQ_ATTR(read2),
1113 : DEADLINE_NEXT_RQ_ATTR(write2),
1114 : {"batching", 0400, deadline_batching_show},
1115 : {"starved", 0400, deadline_starved_show},
1116 : {"async_depth", 0400, dd_async_depth_show},
1117 : {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1118 : {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1119 : {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1120 : {"owned_by_driver", 0400, dd_owned_by_driver_show},
1121 : {"queued", 0400, dd_queued_show},
1122 : {},
1123 : };
1124 : #undef DEADLINE_QUEUE_DDIR_ATTRS
1125 : #endif
1126 :
1127 : static struct elevator_type mq_deadline = {
1128 : .ops = {
1129 : .depth_updated = dd_depth_updated,
1130 : .limit_depth = dd_limit_depth,
1131 : .insert_requests = dd_insert_requests,
1132 : .dispatch_request = dd_dispatch_request,
1133 : .prepare_request = dd_prepare_request,
1134 : .finish_request = dd_finish_request,
1135 : .next_request = elv_rb_latter_request,
1136 : .former_request = elv_rb_former_request,
1137 : .bio_merge = dd_bio_merge,
1138 : .request_merge = dd_request_merge,
1139 : .requests_merged = dd_merged_requests,
1140 : .request_merged = dd_request_merged,
1141 : .has_work = dd_has_work,
1142 : .init_sched = dd_init_sched,
1143 : .exit_sched = dd_exit_sched,
1144 : .init_hctx = dd_init_hctx,
1145 : },
1146 :
1147 : #ifdef CONFIG_BLK_DEBUG_FS
1148 : .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1149 : #endif
1150 : .elevator_attrs = deadline_attrs,
1151 : .elevator_name = "mq-deadline",
1152 : .elevator_alias = "deadline",
1153 : .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1154 : .elevator_owner = THIS_MODULE,
1155 : };
1156 : MODULE_ALIAS("mq-deadline-iosched");
1157 :
1158 1 : static int __init deadline_init(void)
1159 : {
1160 1 : return elv_register(&mq_deadline);
1161 : }
1162 :
1163 0 : static void __exit deadline_exit(void)
1164 : {
1165 0 : elv_unregister(&mq_deadline);
1166 0 : }
1167 :
1168 : module_init(deadline_init);
1169 : module_exit(deadline_exit);
1170 :
1171 : MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1172 : MODULE_LICENSE("GPL");
1173 : MODULE_DESCRIPTION("MQ deadline IO scheduler");
|