Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Generic waiting primitives.
4 : *
5 : * (C) 2004 Nadia Yvette Chambers, Oracle
6 : */
7 :
8 1267 : void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
9 : {
10 1523 : spin_lock_init(&wq_head->lock);
11 : lockdep_set_class_and_name(&wq_head->lock, key, name);
12 3046 : INIT_LIST_HEAD(&wq_head->head);
13 1267 : }
14 :
15 : EXPORT_SYMBOL(__init_waitqueue_head);
16 :
17 0 : void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
18 : {
19 : unsigned long flags;
20 :
21 0 : wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
22 0 : spin_lock_irqsave(&wq_head->lock, flags);
23 0 : __add_wait_queue(wq_head, wq_entry);
24 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
25 0 : }
26 : EXPORT_SYMBOL(add_wait_queue);
27 :
28 0 : void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
29 : {
30 : unsigned long flags;
31 :
32 0 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
33 0 : spin_lock_irqsave(&wq_head->lock, flags);
34 0 : __add_wait_queue_entry_tail(wq_head, wq_entry);
35 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
36 0 : }
37 : EXPORT_SYMBOL(add_wait_queue_exclusive);
38 :
39 0 : void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
40 : {
41 : unsigned long flags;
42 :
43 0 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
44 0 : spin_lock_irqsave(&wq_head->lock, flags);
45 0 : __add_wait_queue(wq_head, wq_entry);
46 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
47 0 : }
48 : EXPORT_SYMBOL_GPL(add_wait_queue_priority);
49 :
50 0 : void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
51 : {
52 : unsigned long flags;
53 :
54 0 : spin_lock_irqsave(&wq_head->lock, flags);
55 0 : __remove_wait_queue(wq_head, wq_entry);
56 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
57 0 : }
58 : EXPORT_SYMBOL(remove_wait_queue);
59 :
60 : /*
61 : * Scan threshold to break wait queue walk.
62 : * This allows a waker to take a break from holding the
63 : * wait queue lock during the wait queue walk.
64 : */
65 : #define WAITQUEUE_WALK_BREAK_CNT 64
66 :
67 : /*
68 : * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
69 : * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
70 : * number) then we wake that number of exclusive tasks, and potentially all
71 : * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
72 : * the list and any non-exclusive tasks will be woken first. A priority task
73 : * may be at the head of the list, and can consume the event without any other
74 : * tasks being woken.
75 : *
76 : * There are circumstances in which we can try to wake a task which has already
77 : * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
78 : * zero in this (rare) case, and we handle it by continuing to scan the queue.
79 : */
80 187 : static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
81 : int nr_exclusive, int wake_flags, void *key,
82 : wait_queue_entry_t *bookmark)
83 : {
84 : wait_queue_entry_t *curr, *next;
85 187 : int cnt = 0;
86 :
87 : lockdep_assert_held(&wq_head->lock);
88 :
89 187 : if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
90 0 : curr = list_next_entry(bookmark, entry);
91 :
92 0 : list_del(&bookmark->entry);
93 0 : bookmark->flags = 0;
94 : } else
95 187 : curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
96 :
97 187 : if (&curr->entry == &wq_head->head)
98 : return nr_exclusive;
99 :
100 0 : list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
101 0 : unsigned flags = curr->flags;
102 : int ret;
103 :
104 0 : if (flags & WQ_FLAG_BOOKMARK)
105 0 : continue;
106 :
107 0 : ret = curr->func(curr, mode, wake_flags, key);
108 0 : if (ret < 0)
109 : break;
110 0 : if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
111 : break;
112 :
113 0 : if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
114 0 : (&next->entry != &wq_head->head)) {
115 0 : bookmark->flags = WQ_FLAG_BOOKMARK;
116 0 : list_add_tail(&bookmark->entry, &next->entry);
117 : break;
118 : }
119 : }
120 :
121 : return nr_exclusive;
122 : }
123 :
124 187 : static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
125 : int nr_exclusive, int wake_flags, void *key)
126 : {
127 : unsigned long flags;
128 : wait_queue_entry_t bookmark;
129 :
130 187 : bookmark.flags = 0;
131 187 : bookmark.private = NULL;
132 187 : bookmark.func = NULL;
133 : INIT_LIST_HEAD(&bookmark.entry);
134 :
135 : do {
136 187 : spin_lock_irqsave(&wq_head->lock, flags);
137 187 : nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
138 : wake_flags, key, &bookmark);
139 374 : spin_unlock_irqrestore(&wq_head->lock, flags);
140 187 : } while (bookmark.flags & WQ_FLAG_BOOKMARK);
141 187 : }
142 :
143 : /**
144 : * __wake_up - wake up threads blocked on a waitqueue.
145 : * @wq_head: the waitqueue
146 : * @mode: which threads
147 : * @nr_exclusive: how many wake-one or wake-many threads to wake up
148 : * @key: is directly passed to the wakeup function
149 : *
150 : * If this function wakes up a task, it executes a full memory barrier before
151 : * accessing the task state.
152 : */
153 94 : void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
154 : int nr_exclusive, void *key)
155 : {
156 94 : __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
157 94 : }
158 : EXPORT_SYMBOL(__wake_up);
159 :
160 : /*
161 : * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
162 : */
163 0 : void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
164 : {
165 0 : __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
166 0 : }
167 : EXPORT_SYMBOL_GPL(__wake_up_locked);
168 :
169 0 : void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
170 : {
171 0 : __wake_up_common(wq_head, mode, 1, 0, key, NULL);
172 0 : }
173 : EXPORT_SYMBOL_GPL(__wake_up_locked_key);
174 :
175 0 : void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
176 : unsigned int mode, void *key, wait_queue_entry_t *bookmark)
177 : {
178 0 : __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
179 0 : }
180 : EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
181 :
182 : /**
183 : * __wake_up_sync_key - wake up threads blocked on a waitqueue.
184 : * @wq_head: the waitqueue
185 : * @mode: which threads
186 : * @key: opaque value to be passed to wakeup targets
187 : *
188 : * The sync wakeup differs that the waker knows that it will schedule
189 : * away soon, so while the target thread will be woken up, it will not
190 : * be migrated to another CPU - ie. the two threads are 'synchronized'
191 : * with each other. This can prevent needless bouncing between CPUs.
192 : *
193 : * On UP it can prevent extra preemption.
194 : *
195 : * If this function wakes up a task, it executes a full memory barrier before
196 : * accessing the task state.
197 : */
198 93 : void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
199 : void *key)
200 : {
201 93 : if (unlikely(!wq_head))
202 : return;
203 :
204 93 : __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
205 : }
206 : EXPORT_SYMBOL_GPL(__wake_up_sync_key);
207 :
208 : /**
209 : * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
210 : * @wq_head: the waitqueue
211 : * @mode: which threads
212 : * @key: opaque value to be passed to wakeup targets
213 : *
214 : * The sync wakeup differs in that the waker knows that it will schedule
215 : * away soon, so while the target thread will be woken up, it will not
216 : * be migrated to another CPU - ie. the two threads are 'synchronized'
217 : * with each other. This can prevent needless bouncing between CPUs.
218 : *
219 : * On UP it can prevent extra preemption.
220 : *
221 : * If this function wakes up a task, it executes a full memory barrier before
222 : * accessing the task state.
223 : */
224 0 : void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
225 : unsigned int mode, void *key)
226 : {
227 0 : __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
228 0 : }
229 : EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
230 :
231 : /*
232 : * __wake_up_sync - see __wake_up_sync_key()
233 : */
234 0 : void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
235 : {
236 0 : __wake_up_sync_key(wq_head, mode, NULL);
237 0 : }
238 : EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
239 :
240 0 : void __wake_up_pollfree(struct wait_queue_head *wq_head)
241 : {
242 0 : __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
243 : /* POLLFREE must have cleared the queue. */
244 0 : WARN_ON_ONCE(waitqueue_active(wq_head));
245 0 : }
246 :
247 : /*
248 : * Note: we use "set_current_state()" _after_ the wait-queue add,
249 : * because we need a memory barrier there on SMP, so that any
250 : * wake-function that tests for the wait-queue being active
251 : * will be guaranteed to see waitqueue addition _or_ subsequent
252 : * tests in this thread will see the wakeup having taken place.
253 : *
254 : * The spin_unlock() itself is semi-permeable and only protects
255 : * one way (it only protects stuff inside the critical region and
256 : * stops them from bleeding out - it would still allow subsequent
257 : * loads to move into the critical region).
258 : */
259 : void
260 1 : prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
261 : {
262 : unsigned long flags;
263 :
264 1 : wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
265 1 : spin_lock_irqsave(&wq_head->lock, flags);
266 2 : if (list_empty(&wq_entry->entry))
267 : __add_wait_queue(wq_head, wq_entry);
268 1 : set_current_state(state);
269 2 : spin_unlock_irqrestore(&wq_head->lock, flags);
270 1 : }
271 : EXPORT_SYMBOL(prepare_to_wait);
272 :
273 : /* Returns true if we are the first waiter in the queue, false otherwise. */
274 : bool
275 0 : prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
276 : {
277 : unsigned long flags;
278 0 : bool was_empty = false;
279 :
280 0 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
281 0 : spin_lock_irqsave(&wq_head->lock, flags);
282 0 : if (list_empty(&wq_entry->entry)) {
283 0 : was_empty = list_empty(&wq_head->head);
284 : __add_wait_queue_entry_tail(wq_head, wq_entry);
285 : }
286 0 : set_current_state(state);
287 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
288 0 : return was_empty;
289 : }
290 : EXPORT_SYMBOL(prepare_to_wait_exclusive);
291 :
292 2 : void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
293 : {
294 2 : wq_entry->flags = flags;
295 2 : wq_entry->private = current;
296 2 : wq_entry->func = autoremove_wake_function;
297 4 : INIT_LIST_HEAD(&wq_entry->entry);
298 2 : }
299 : EXPORT_SYMBOL(init_wait_entry);
300 :
301 2 : long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
302 : {
303 : unsigned long flags;
304 2 : long ret = 0;
305 :
306 2 : spin_lock_irqsave(&wq_head->lock, flags);
307 2 : if (signal_pending_state(state, current)) {
308 : /*
309 : * Exclusive waiter must not fail if it was selected by wakeup,
310 : * it should "consume" the condition we were waiting for.
311 : *
312 : * The caller will recheck the condition and return success if
313 : * we were already woken up, we can not miss the event because
314 : * wakeup locks/unlocks the same wq_head->lock.
315 : *
316 : * But we need to ensure that set-condition + wakeup after that
317 : * can't see us, it should wake up another exclusive waiter if
318 : * we fail.
319 : */
320 0 : list_del_init(&wq_entry->entry);
321 0 : ret = -ERESTARTSYS;
322 : } else {
323 4 : if (list_empty(&wq_entry->entry)) {
324 2 : if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
325 : __add_wait_queue_entry_tail(wq_head, wq_entry);
326 : else
327 : __add_wait_queue(wq_head, wq_entry);
328 : }
329 2 : set_current_state(state);
330 : }
331 4 : spin_unlock_irqrestore(&wq_head->lock, flags);
332 :
333 2 : return ret;
334 : }
335 : EXPORT_SYMBOL(prepare_to_wait_event);
336 :
337 : /*
338 : * Note! These two wait functions are entered with the
339 : * wait-queue lock held (and interrupts off in the _irq
340 : * case), so there is no race with testing the wakeup
341 : * condition in the caller before they add the wait
342 : * entry to the wake queue.
343 : */
344 0 : int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
345 : {
346 0 : if (likely(list_empty(&wait->entry)))
347 : __add_wait_queue_entry_tail(wq, wait);
348 :
349 0 : set_current_state(TASK_INTERRUPTIBLE);
350 0 : if (signal_pending(current))
351 : return -ERESTARTSYS;
352 :
353 0 : spin_unlock(&wq->lock);
354 0 : schedule();
355 0 : spin_lock(&wq->lock);
356 :
357 0 : return 0;
358 : }
359 : EXPORT_SYMBOL(do_wait_intr);
360 :
361 0 : int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
362 : {
363 0 : if (likely(list_empty(&wait->entry)))
364 : __add_wait_queue_entry_tail(wq, wait);
365 :
366 0 : set_current_state(TASK_INTERRUPTIBLE);
367 0 : if (signal_pending(current))
368 : return -ERESTARTSYS;
369 :
370 0 : spin_unlock_irq(&wq->lock);
371 0 : schedule();
372 0 : spin_lock_irq(&wq->lock);
373 :
374 0 : return 0;
375 : }
376 : EXPORT_SYMBOL(do_wait_intr_irq);
377 :
378 : /**
379 : * finish_wait - clean up after waiting in a queue
380 : * @wq_head: waitqueue waited on
381 : * @wq_entry: wait descriptor
382 : *
383 : * Sets current thread back to running state and removes
384 : * the wait descriptor from the given waitqueue if still
385 : * queued.
386 : */
387 0 : void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
388 : {
389 : unsigned long flags;
390 :
391 0 : __set_current_state(TASK_RUNNING);
392 : /*
393 : * We can check for list emptiness outside the lock
394 : * IFF:
395 : * - we use the "careful" check that verifies both
396 : * the next and prev pointers, so that there cannot
397 : * be any half-pending updates in progress on other
398 : * CPU's that we haven't seen yet (and that might
399 : * still change the stack area.
400 : * and
401 : * - all other users take the lock (ie we can only
402 : * have _one_ other CPU that looks at or modifies
403 : * the list).
404 : */
405 0 : if (!list_empty_careful(&wq_entry->entry)) {
406 0 : spin_lock_irqsave(&wq_head->lock, flags);
407 0 : list_del_init(&wq_entry->entry);
408 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
409 : }
410 0 : }
411 : EXPORT_SYMBOL(finish_wait);
412 :
413 0 : int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
414 : {
415 0 : int ret = default_wake_function(wq_entry, mode, sync, key);
416 :
417 0 : if (ret)
418 0 : list_del_init_careful(&wq_entry->entry);
419 :
420 0 : return ret;
421 : }
422 : EXPORT_SYMBOL(autoremove_wake_function);
423 :
424 0 : static inline bool is_kthread_should_stop(void)
425 : {
426 0 : return (current->flags & PF_KTHREAD) && kthread_should_stop();
427 : }
428 :
429 : /*
430 : * DEFINE_WAIT_FUNC(wait, woken_wake_func);
431 : *
432 : * add_wait_queue(&wq_head, &wait);
433 : * for (;;) {
434 : * if (condition)
435 : * break;
436 : *
437 : * // in wait_woken() // in woken_wake_function()
438 : *
439 : * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
440 : * smp_mb(); // A try_to_wake_up():
441 : * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
442 : * schedule() if (p->state & mode)
443 : * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
444 : * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
445 : * smp_mb(); // B condition = true;
446 : * } smp_mb(); // C
447 : * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
448 : */
449 0 : long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
450 : {
451 : /*
452 : * The below executes an smp_mb(), which matches with the full barrier
453 : * executed by the try_to_wake_up() in woken_wake_function() such that
454 : * either we see the store to wq_entry->flags in woken_wake_function()
455 : * or woken_wake_function() sees our store to current->state.
456 : */
457 0 : set_current_state(mode); /* A */
458 0 : if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
459 0 : timeout = schedule_timeout(timeout);
460 0 : __set_current_state(TASK_RUNNING);
461 :
462 : /*
463 : * The below executes an smp_mb(), which matches with the smp_mb() (C)
464 : * in woken_wake_function() such that either we see the wait condition
465 : * being true or the store to wq_entry->flags in woken_wake_function()
466 : * follows ours in the coherence order.
467 : */
468 0 : smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
469 :
470 0 : return timeout;
471 : }
472 : EXPORT_SYMBOL(wait_woken);
473 :
474 0 : int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
475 : {
476 : /* Pairs with the smp_store_mb() in wait_woken(). */
477 0 : smp_mb(); /* C */
478 0 : wq_entry->flags |= WQ_FLAG_WOKEN;
479 :
480 0 : return default_wake_function(wq_entry, mode, sync, key);
481 : }
482 : EXPORT_SYMBOL(woken_wake_function);
|