Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef RQ_QOS_H
3 : #define RQ_QOS_H
4 :
5 : #include <linux/kernel.h>
6 : #include <linux/blkdev.h>
7 : #include <linux/blk_types.h>
8 : #include <linux/atomic.h>
9 : #include <linux/wait.h>
10 : #include <linux/blk-mq.h>
11 :
12 : #include "blk-mq-debugfs.h"
13 :
14 : struct blk_mq_debugfs_attr;
15 :
16 : enum rq_qos_id {
17 : RQ_QOS_WBT,
18 : RQ_QOS_LATENCY,
19 : RQ_QOS_COST,
20 : RQ_QOS_IOPRIO,
21 : };
22 :
23 : struct rq_wait {
24 : wait_queue_head_t wait;
25 : atomic_t inflight;
26 : };
27 :
28 : struct rq_qos {
29 : struct rq_qos_ops *ops;
30 : struct request_queue *q;
31 : enum rq_qos_id id;
32 : struct rq_qos *next;
33 : #ifdef CONFIG_BLK_DEBUG_FS
34 : struct dentry *debugfs_dir;
35 : #endif
36 : };
37 :
38 : struct rq_qos_ops {
39 : void (*throttle)(struct rq_qos *, struct bio *);
40 : void (*track)(struct rq_qos *, struct request *, struct bio *);
41 : void (*merge)(struct rq_qos *, struct request *, struct bio *);
42 : void (*issue)(struct rq_qos *, struct request *);
43 : void (*requeue)(struct rq_qos *, struct request *);
44 : void (*done)(struct rq_qos *, struct request *);
45 : void (*done_bio)(struct rq_qos *, struct bio *);
46 : void (*cleanup)(struct rq_qos *, struct bio *);
47 : void (*queue_depth_changed)(struct rq_qos *);
48 : void (*exit)(struct rq_qos *);
49 : const struct blk_mq_debugfs_attr *debugfs_attrs;
50 : };
51 :
52 : struct rq_depth {
53 : unsigned int max_depth;
54 :
55 : int scale_step;
56 : bool scaled_max;
57 :
58 : unsigned int queue_depth;
59 : unsigned int default_depth;
60 : };
61 :
62 : static inline struct rq_qos *rq_qos_id(struct request_queue *q,
63 : enum rq_qos_id id)
64 : {
65 : struct rq_qos *rqos;
66 0 : for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
67 0 : if (rqos->id == id)
68 : break;
69 : }
70 : return rqos;
71 : }
72 :
73 : static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
74 : {
75 0 : return rq_qos_id(q, RQ_QOS_WBT);
76 : }
77 :
78 : static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
79 : {
80 : return rq_qos_id(q, RQ_QOS_LATENCY);
81 : }
82 :
83 : static inline void rq_wait_init(struct rq_wait *rq_wait)
84 : {
85 : atomic_set(&rq_wait->inflight, 0);
86 : init_waitqueue_head(&rq_wait->wait);
87 : }
88 :
89 : static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
90 : {
91 : /*
92 : * No IO can be in-flight when adding rqos, so freeze queue, which
93 : * is fine since we only support rq_qos for blk-mq queue.
94 : *
95 : * Reuse ->queue_lock for protecting against other concurrent
96 : * rq_qos adding/deleting
97 : */
98 : blk_mq_freeze_queue(q);
99 :
100 : spin_lock_irq(&q->queue_lock);
101 : rqos->next = q->rq_qos;
102 : q->rq_qos = rqos;
103 : spin_unlock_irq(&q->queue_lock);
104 :
105 : blk_mq_unfreeze_queue(q);
106 :
107 : if (rqos->ops->debugfs_attrs)
108 : blk_mq_debugfs_register_rqos(rqos);
109 : }
110 :
111 : static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
112 : {
113 : struct rq_qos **cur;
114 :
115 : /*
116 : * See comment in rq_qos_add() about freezing queue & using
117 : * ->queue_lock.
118 : */
119 : blk_mq_freeze_queue(q);
120 :
121 : spin_lock_irq(&q->queue_lock);
122 : for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
123 : if (*cur == rqos) {
124 : *cur = rqos->next;
125 : break;
126 : }
127 : }
128 : spin_unlock_irq(&q->queue_lock);
129 :
130 : blk_mq_unfreeze_queue(q);
131 :
132 : blk_mq_debugfs_unregister_rqos(rqos);
133 : }
134 :
135 : typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
136 : typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
137 :
138 : void rq_qos_wait(struct rq_wait *rqw, void *private_data,
139 : acquire_inflight_cb_t *acquire_inflight_cb,
140 : cleanup_cb_t *cleanup_cb);
141 : bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
142 : bool rq_depth_scale_up(struct rq_depth *rqd);
143 : bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
144 : bool rq_depth_calc_max_depth(struct rq_depth *rqd);
145 :
146 : void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
147 : void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
148 : void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
149 : void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
150 : void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
151 : void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
152 : void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
153 : void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
154 : void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
155 :
156 : static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
157 : {
158 0 : if (q->rq_qos)
159 0 : __rq_qos_cleanup(q->rq_qos, bio);
160 : }
161 :
162 : static inline void rq_qos_done(struct request_queue *q, struct request *rq)
163 : {
164 0 : if (q->rq_qos)
165 0 : __rq_qos_done(q->rq_qos, rq);
166 : }
167 :
168 : static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
169 : {
170 0 : if (q->rq_qos)
171 0 : __rq_qos_issue(q->rq_qos, rq);
172 : }
173 :
174 : static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
175 : {
176 0 : if (q->rq_qos)
177 0 : __rq_qos_requeue(q->rq_qos, rq);
178 : }
179 :
180 0 : static inline void rq_qos_done_bio(struct bio *bio)
181 : {
182 0 : if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
183 0 : bio_flagged(bio, BIO_QOS_MERGED))) {
184 0 : struct request_queue *q = bdev_get_queue(bio->bi_bdev);
185 0 : if (q->rq_qos)
186 0 : __rq_qos_done_bio(q->rq_qos, bio);
187 : }
188 0 : }
189 :
190 : static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
191 : {
192 0 : if (q->rq_qos) {
193 0 : bio_set_flag(bio, BIO_QOS_THROTTLED);
194 0 : __rq_qos_throttle(q->rq_qos, bio);
195 : }
196 : }
197 :
198 : static inline void rq_qos_track(struct request_queue *q, struct request *rq,
199 : struct bio *bio)
200 : {
201 0 : if (q->rq_qos)
202 0 : __rq_qos_track(q->rq_qos, rq, bio);
203 : }
204 :
205 : static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
206 : struct bio *bio)
207 : {
208 0 : if (q->rq_qos) {
209 0 : bio_set_flag(bio, BIO_QOS_MERGED);
210 0 : __rq_qos_merge(q->rq_qos, rq, bio);
211 : }
212 : }
213 :
214 : static inline void rq_qos_queue_depth_changed(struct request_queue *q)
215 : {
216 0 : if (q->rq_qos)
217 0 : __rq_qos_queue_depth_changed(q->rq_qos);
218 : }
219 :
220 : void rq_qos_exit(struct request_queue *);
221 :
222 : #endif
|