Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to io context handling
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/module.h>
7 : #include <linux/init.h>
8 : #include <linux/bio.h>
9 : #include <linux/blkdev.h>
10 : #include <linux/slab.h>
11 : #include <linux/security.h>
12 : #include <linux/sched/task.h>
13 :
14 : #include "blk.h"
15 : #include "blk-mq-sched.h"
16 :
17 : /*
18 : * For io context allocations
19 : */
20 : static struct kmem_cache *iocontext_cachep;
21 :
22 : #ifdef CONFIG_BLK_ICQ
23 : /**
24 : * get_io_context - increment reference count to io_context
25 : * @ioc: io_context to get
26 : *
27 : * Increment reference count to @ioc.
28 : */
29 : static void get_io_context(struct io_context *ioc)
30 : {
31 : BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
32 : atomic_long_inc(&ioc->refcount);
33 : }
34 :
35 : static void icq_free_icq_rcu(struct rcu_head *head)
36 : {
37 : struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
38 :
39 : kmem_cache_free(icq->__rcu_icq_cache, icq);
40 : }
41 :
42 : /*
43 : * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
44 : * and queue locked for legacy.
45 : */
46 : static void ioc_exit_icq(struct io_cq *icq)
47 : {
48 : struct elevator_type *et = icq->q->elevator->type;
49 :
50 : if (icq->flags & ICQ_EXITED)
51 : return;
52 :
53 : if (et->ops.exit_icq)
54 : et->ops.exit_icq(icq);
55 :
56 : icq->flags |= ICQ_EXITED;
57 : }
58 :
59 : static void ioc_exit_icqs(struct io_context *ioc)
60 : {
61 : struct io_cq *icq;
62 :
63 : spin_lock_irq(&ioc->lock);
64 : hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
65 : ioc_exit_icq(icq);
66 : spin_unlock_irq(&ioc->lock);
67 : }
68 :
69 : /*
70 : * Release an icq. Called with ioc locked for blk-mq, and with both ioc
71 : * and queue locked for legacy.
72 : */
73 : static void ioc_destroy_icq(struct io_cq *icq)
74 : {
75 : struct io_context *ioc = icq->ioc;
76 : struct request_queue *q = icq->q;
77 : struct elevator_type *et = q->elevator->type;
78 :
79 : lockdep_assert_held(&ioc->lock);
80 :
81 : radix_tree_delete(&ioc->icq_tree, icq->q->id);
82 : hlist_del_init(&icq->ioc_node);
83 : list_del_init(&icq->q_node);
84 :
85 : /*
86 : * Both setting lookup hint to and clearing it from @icq are done
87 : * under queue_lock. If it's not pointing to @icq now, it never
88 : * will. Hint assignment itself can race safely.
89 : */
90 : if (rcu_access_pointer(ioc->icq_hint) == icq)
91 : rcu_assign_pointer(ioc->icq_hint, NULL);
92 :
93 : ioc_exit_icq(icq);
94 :
95 : /*
96 : * @icq->q might have gone away by the time RCU callback runs
97 : * making it impossible to determine icq_cache. Record it in @icq.
98 : */
99 : icq->__rcu_icq_cache = et->icq_cache;
100 : icq->flags |= ICQ_DESTROYED;
101 : call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
102 : }
103 :
104 : /*
105 : * Slow path for ioc release in put_io_context(). Performs double-lock
106 : * dancing to unlink all icq's and then frees ioc.
107 : */
108 : static void ioc_release_fn(struct work_struct *work)
109 : {
110 : struct io_context *ioc = container_of(work, struct io_context,
111 : release_work);
112 : spin_lock_irq(&ioc->lock);
113 :
114 : while (!hlist_empty(&ioc->icq_list)) {
115 : struct io_cq *icq = hlist_entry(ioc->icq_list.first,
116 : struct io_cq, ioc_node);
117 : struct request_queue *q = icq->q;
118 :
119 : if (spin_trylock(&q->queue_lock)) {
120 : ioc_destroy_icq(icq);
121 : spin_unlock(&q->queue_lock);
122 : } else {
123 : /* Make sure q and icq cannot be freed. */
124 : rcu_read_lock();
125 :
126 : /* Re-acquire the locks in the correct order. */
127 : spin_unlock(&ioc->lock);
128 : spin_lock(&q->queue_lock);
129 : spin_lock(&ioc->lock);
130 :
131 : /*
132 : * The icq may have been destroyed when the ioc lock
133 : * was released.
134 : */
135 : if (!(icq->flags & ICQ_DESTROYED))
136 : ioc_destroy_icq(icq);
137 :
138 : spin_unlock(&q->queue_lock);
139 : rcu_read_unlock();
140 : }
141 : }
142 :
143 : spin_unlock_irq(&ioc->lock);
144 :
145 : kmem_cache_free(iocontext_cachep, ioc);
146 : }
147 :
148 : /*
149 : * Releasing icqs requires reverse order double locking and we may already be
150 : * holding a queue_lock. Do it asynchronously from a workqueue.
151 : */
152 : static bool ioc_delay_free(struct io_context *ioc)
153 : {
154 : unsigned long flags;
155 :
156 : spin_lock_irqsave(&ioc->lock, flags);
157 : if (!hlist_empty(&ioc->icq_list)) {
158 : queue_work(system_power_efficient_wq, &ioc->release_work);
159 : spin_unlock_irqrestore(&ioc->lock, flags);
160 : return true;
161 : }
162 : spin_unlock_irqrestore(&ioc->lock, flags);
163 : return false;
164 : }
165 :
166 : /**
167 : * ioc_clear_queue - break any ioc association with the specified queue
168 : * @q: request_queue being cleared
169 : *
170 : * Walk @q->icq_list and exit all io_cq's.
171 : */
172 : void ioc_clear_queue(struct request_queue *q)
173 : {
174 : LIST_HEAD(icq_list);
175 :
176 : spin_lock_irq(&q->queue_lock);
177 : list_splice_init(&q->icq_list, &icq_list);
178 : spin_unlock_irq(&q->queue_lock);
179 :
180 : rcu_read_lock();
181 : while (!list_empty(&icq_list)) {
182 : struct io_cq *icq =
183 : list_entry(icq_list.next, struct io_cq, q_node);
184 :
185 : spin_lock_irq(&icq->ioc->lock);
186 : if (!(icq->flags & ICQ_DESTROYED))
187 : ioc_destroy_icq(icq);
188 : spin_unlock_irq(&icq->ioc->lock);
189 : }
190 : rcu_read_unlock();
191 : }
192 : #else /* CONFIG_BLK_ICQ */
193 : static inline void ioc_exit_icqs(struct io_context *ioc)
194 : {
195 : }
196 : static inline bool ioc_delay_free(struct io_context *ioc)
197 : {
198 : return false;
199 : }
200 : #endif /* CONFIG_BLK_ICQ */
201 :
202 : /**
203 : * put_io_context - put a reference of io_context
204 : * @ioc: io_context to put
205 : *
206 : * Decrement reference count of @ioc and release it if the count reaches
207 : * zero.
208 : */
209 0 : void put_io_context(struct io_context *ioc)
210 : {
211 0 : BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
212 0 : if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
213 0 : kmem_cache_free(iocontext_cachep, ioc);
214 0 : }
215 : EXPORT_SYMBOL_GPL(put_io_context);
216 :
217 : /* Called by the exiting task */
218 0 : void exit_io_context(struct task_struct *task)
219 : {
220 : struct io_context *ioc;
221 :
222 0 : task_lock(task);
223 0 : ioc = task->io_context;
224 0 : task->io_context = NULL;
225 0 : task_unlock(task);
226 :
227 0 : if (atomic_dec_and_test(&ioc->active_ref)) {
228 0 : ioc_exit_icqs(ioc);
229 0 : put_io_context(ioc);
230 : }
231 0 : }
232 :
233 : static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
234 : {
235 : struct io_context *ioc;
236 :
237 0 : ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
238 : node);
239 0 : if (unlikely(!ioc))
240 : return NULL;
241 :
242 0 : atomic_long_set(&ioc->refcount, 1);
243 0 : atomic_set(&ioc->active_ref, 1);
244 : #ifdef CONFIG_BLK_ICQ
245 : spin_lock_init(&ioc->lock);
246 : INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
247 : INIT_HLIST_HEAD(&ioc->icq_list);
248 : INIT_WORK(&ioc->release_work, ioc_release_fn);
249 : #endif
250 : return ioc;
251 : }
252 :
253 0 : int set_task_ioprio(struct task_struct *task, int ioprio)
254 : {
255 : int err;
256 0 : const struct cred *cred = current_cred(), *tcred;
257 :
258 : rcu_read_lock();
259 0 : tcred = __task_cred(task);
260 0 : if (!uid_eq(tcred->uid, cred->euid) &&
261 0 : !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
262 : rcu_read_unlock();
263 0 : return -EPERM;
264 : }
265 0 : rcu_read_unlock();
266 :
267 0 : err = security_task_setioprio(task, ioprio);
268 0 : if (err)
269 : return err;
270 :
271 0 : task_lock(task);
272 0 : if (unlikely(!task->io_context)) {
273 : struct io_context *ioc;
274 :
275 0 : task_unlock(task);
276 :
277 0 : ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
278 0 : if (!ioc)
279 : return -ENOMEM;
280 :
281 0 : task_lock(task);
282 0 : if (task->flags & PF_EXITING) {
283 0 : kmem_cache_free(iocontext_cachep, ioc);
284 0 : goto out;
285 : }
286 0 : if (task->io_context)
287 0 : kmem_cache_free(iocontext_cachep, ioc);
288 : else
289 0 : task->io_context = ioc;
290 : }
291 0 : task->io_context->ioprio = ioprio;
292 : out:
293 0 : task_unlock(task);
294 0 : return 0;
295 : }
296 : EXPORT_SYMBOL_GPL(set_task_ioprio);
297 :
298 0 : int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
299 : {
300 0 : struct io_context *ioc = current->io_context;
301 :
302 : /*
303 : * Share io context with parent, if CLONE_IO is set
304 : */
305 0 : if (clone_flags & CLONE_IO) {
306 0 : atomic_inc(&ioc->active_ref);
307 0 : tsk->io_context = ioc;
308 0 : } else if (ioprio_valid(ioc->ioprio)) {
309 0 : tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
310 0 : if (!tsk->io_context)
311 : return -ENOMEM;
312 0 : tsk->io_context->ioprio = ioc->ioprio;
313 : }
314 :
315 : return 0;
316 : }
317 :
318 : #ifdef CONFIG_BLK_ICQ
319 : /**
320 : * ioc_lookup_icq - lookup io_cq from ioc
321 : * @q: the associated request_queue
322 : *
323 : * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
324 : * with @q->queue_lock held.
325 : */
326 : struct io_cq *ioc_lookup_icq(struct request_queue *q)
327 : {
328 : struct io_context *ioc = current->io_context;
329 : struct io_cq *icq;
330 :
331 : lockdep_assert_held(&q->queue_lock);
332 :
333 : /*
334 : * icq's are indexed from @ioc using radix tree and hint pointer,
335 : * both of which are protected with RCU. All removals are done
336 : * holding both q and ioc locks, and we're holding q lock - if we
337 : * find a icq which points to us, it's guaranteed to be valid.
338 : */
339 : rcu_read_lock();
340 : icq = rcu_dereference(ioc->icq_hint);
341 : if (icq && icq->q == q)
342 : goto out;
343 :
344 : icq = radix_tree_lookup(&ioc->icq_tree, q->id);
345 : if (icq && icq->q == q)
346 : rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
347 : else
348 : icq = NULL;
349 : out:
350 : rcu_read_unlock();
351 : return icq;
352 : }
353 : EXPORT_SYMBOL(ioc_lookup_icq);
354 :
355 : /**
356 : * ioc_create_icq - create and link io_cq
357 : * @q: request_queue of interest
358 : *
359 : * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
360 : * will be created using @gfp_mask.
361 : *
362 : * The caller is responsible for ensuring @ioc won't go away and @q is
363 : * alive and will stay alive until this function returns.
364 : */
365 : static struct io_cq *ioc_create_icq(struct request_queue *q)
366 : {
367 : struct io_context *ioc = current->io_context;
368 : struct elevator_type *et = q->elevator->type;
369 : struct io_cq *icq;
370 :
371 : /* allocate stuff */
372 : icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
373 : q->node);
374 : if (!icq)
375 : return NULL;
376 :
377 : if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
378 : kmem_cache_free(et->icq_cache, icq);
379 : return NULL;
380 : }
381 :
382 : icq->ioc = ioc;
383 : icq->q = q;
384 : INIT_LIST_HEAD(&icq->q_node);
385 : INIT_HLIST_NODE(&icq->ioc_node);
386 :
387 : /* lock both q and ioc and try to link @icq */
388 : spin_lock_irq(&q->queue_lock);
389 : spin_lock(&ioc->lock);
390 :
391 : if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
392 : hlist_add_head(&icq->ioc_node, &ioc->icq_list);
393 : list_add(&icq->q_node, &q->icq_list);
394 : if (et->ops.init_icq)
395 : et->ops.init_icq(icq);
396 : } else {
397 : kmem_cache_free(et->icq_cache, icq);
398 : icq = ioc_lookup_icq(q);
399 : if (!icq)
400 : printk(KERN_ERR "cfq: icq link failed!\n");
401 : }
402 :
403 : spin_unlock(&ioc->lock);
404 : spin_unlock_irq(&q->queue_lock);
405 : radix_tree_preload_end();
406 : return icq;
407 : }
408 :
409 : struct io_cq *ioc_find_get_icq(struct request_queue *q)
410 : {
411 : struct io_context *ioc = current->io_context;
412 : struct io_cq *icq = NULL;
413 :
414 : if (unlikely(!ioc)) {
415 : ioc = alloc_io_context(GFP_ATOMIC, q->node);
416 : if (!ioc)
417 : return NULL;
418 :
419 : task_lock(current);
420 : if (current->io_context) {
421 : kmem_cache_free(iocontext_cachep, ioc);
422 : ioc = current->io_context;
423 : } else {
424 : current->io_context = ioc;
425 : }
426 :
427 : get_io_context(ioc);
428 : task_unlock(current);
429 : } else {
430 : get_io_context(ioc);
431 :
432 : spin_lock_irq(&q->queue_lock);
433 : icq = ioc_lookup_icq(q);
434 : spin_unlock_irq(&q->queue_lock);
435 : }
436 :
437 : if (!icq) {
438 : icq = ioc_create_icq(q);
439 : if (!icq) {
440 : put_io_context(ioc);
441 : return NULL;
442 : }
443 : }
444 : return icq;
445 : }
446 : EXPORT_SYMBOL_GPL(ioc_find_get_icq);
447 : #endif /* CONFIG_BLK_ICQ */
448 :
449 1 : static int __init blk_ioc_init(void)
450 : {
451 1 : iocontext_cachep = kmem_cache_create("blkdev_ioc",
452 : sizeof(struct io_context), 0, SLAB_PANIC, NULL);
453 1 : return 0;
454 : }
455 : subsys_initcall(blk_ioc_init);
|