Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /* Kernel thread helper functions.
3 : * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 : * Copyright (C) 2009 Red Hat, Inc.
5 : *
6 : * Creation is done via kthreadd, so that we get a clean environment
7 : * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 : * etc.).
9 : */
10 : #include <uapi/linux/sched/types.h>
11 : #include <linux/mm.h>
12 : #include <linux/mmu_context.h>
13 : #include <linux/sched.h>
14 : #include <linux/sched/mm.h>
15 : #include <linux/sched/task.h>
16 : #include <linux/kthread.h>
17 : #include <linux/completion.h>
18 : #include <linux/err.h>
19 : #include <linux/cgroup.h>
20 : #include <linux/cpuset.h>
21 : #include <linux/unistd.h>
22 : #include <linux/file.h>
23 : #include <linux/export.h>
24 : #include <linux/mutex.h>
25 : #include <linux/slab.h>
26 : #include <linux/freezer.h>
27 : #include <linux/ptrace.h>
28 : #include <linux/uaccess.h>
29 : #include <linux/numa.h>
30 : #include <linux/sched/isolation.h>
31 : #include <trace/events/sched.h>
32 :
33 :
34 : static DEFINE_SPINLOCK(kthread_create_lock);
35 : static LIST_HEAD(kthread_create_list);
36 : struct task_struct *kthreadd_task;
37 :
38 : struct kthread_create_info
39 : {
40 : /* Information passed to kthread() from kthreadd. */
41 : int (*threadfn)(void *data);
42 : void *data;
43 : int node;
44 :
45 : /* Result passed back to kthread_create() from kthreadd. */
46 : struct task_struct *result;
47 : struct completion *done;
48 :
49 : struct list_head list;
50 : };
51 :
52 : struct kthread {
53 : unsigned long flags;
54 : unsigned int cpu;
55 : int result;
56 : int (*threadfn)(void *);
57 : void *data;
58 : struct completion parked;
59 : struct completion exited;
60 : #ifdef CONFIG_BLK_CGROUP
61 : struct cgroup_subsys_state *blkcg_css;
62 : #endif
63 : /* To store the full name if task comm is truncated. */
64 : char *full_name;
65 : };
66 :
67 : enum KTHREAD_BITS {
68 : KTHREAD_IS_PER_CPU = 0,
69 : KTHREAD_SHOULD_STOP,
70 : KTHREAD_SHOULD_PARK,
71 : };
72 :
73 : static inline struct kthread *to_kthread(struct task_struct *k)
74 : {
75 900 : WARN_ON(!(k->flags & PF_KTHREAD));
76 900 : return k->worker_private;
77 : }
78 :
79 : /*
80 : * Variant of to_kthread() that doesn't assume @p is a kthread.
81 : *
82 : * Per construction; when:
83 : *
84 : * (p->flags & PF_KTHREAD) && p->worker_private
85 : *
86 : * the task is both a kthread and struct kthread is persistent. However
87 : * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
88 : * begin_new_exec()).
89 : */
90 : static inline struct kthread *__to_kthread(struct task_struct *p)
91 : {
92 0 : void *kthread = p->worker_private;
93 0 : if (kthread && !(p->flags & PF_KTHREAD))
94 0 : kthread = NULL;
95 : return kthread;
96 : }
97 :
98 0 : void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
99 : {
100 0 : struct kthread *kthread = to_kthread(tsk);
101 :
102 0 : if (!kthread || !kthread->full_name) {
103 0 : __get_task_comm(buf, buf_size, tsk);
104 0 : return;
105 : }
106 :
107 0 : strscpy_pad(buf, kthread->full_name, buf_size);
108 : }
109 :
110 108 : bool set_kthread_struct(struct task_struct *p)
111 : {
112 : struct kthread *kthread;
113 :
114 216 : if (WARN_ON_ONCE(to_kthread(p)))
115 : return false;
116 :
117 108 : kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
118 108 : if (!kthread)
119 : return false;
120 :
121 216 : init_completion(&kthread->exited);
122 216 : init_completion(&kthread->parked);
123 108 : p->vfork_done = &kthread->exited;
124 :
125 108 : p->worker_private = kthread;
126 108 : return true;
127 : }
128 :
129 92 : void free_kthread_struct(struct task_struct *k)
130 : {
131 : struct kthread *kthread;
132 :
133 : /*
134 : * Can be NULL if kmalloc() in set_kthread_struct() failed.
135 : */
136 184 : kthread = to_kthread(k);
137 92 : if (!kthread)
138 : return;
139 :
140 : #ifdef CONFIG_BLK_CGROUP
141 : WARN_ON_ONCE(kthread->blkcg_css);
142 : #endif
143 92 : k->worker_private = NULL;
144 92 : kfree(kthread->full_name);
145 92 : kfree(kthread);
146 : }
147 :
148 : /**
149 : * kthread_should_stop - should this kthread return now?
150 : *
151 : * When someone calls kthread_stop() on your kthread, it will be woken
152 : * and this will return true. You should then return, and your return
153 : * value will be passed through to kthread_stop().
154 : */
155 194 : bool kthread_should_stop(void)
156 : {
157 582 : return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
158 : }
159 : EXPORT_SYMBOL(kthread_should_stop);
160 :
161 187 : bool __kthread_should_park(struct task_struct *k)
162 : {
163 561 : return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
164 : }
165 : EXPORT_SYMBOL_GPL(__kthread_should_park);
166 :
167 : /**
168 : * kthread_should_park - should this kthread park now?
169 : *
170 : * When someone calls kthread_park() on your kthread, it will be woken
171 : * and this will return true. You should then do the necessary
172 : * cleanup and call kthread_parkme()
173 : *
174 : * Similar to kthread_should_stop(), but this keeps the thread alive
175 : * and in a park position. kthread_unpark() "restarts" the thread and
176 : * calls the thread function again.
177 : */
178 187 : bool kthread_should_park(void)
179 : {
180 187 : return __kthread_should_park(current);
181 : }
182 : EXPORT_SYMBOL_GPL(kthread_should_park);
183 :
184 : /**
185 : * kthread_freezable_should_stop - should this freezable kthread return now?
186 : * @was_frozen: optional out parameter, indicates whether %current was frozen
187 : *
188 : * kthread_should_stop() for freezable kthreads, which will enter
189 : * refrigerator if necessary. This function is safe from kthread_stop() /
190 : * freezer deadlock and freezable kthreads should use this function instead
191 : * of calling try_to_freeze() directly.
192 : */
193 0 : bool kthread_freezable_should_stop(bool *was_frozen)
194 : {
195 0 : bool frozen = false;
196 :
197 : might_sleep();
198 :
199 0 : if (unlikely(freezing(current)))
200 0 : frozen = __refrigerator(true);
201 :
202 0 : if (was_frozen)
203 0 : *was_frozen = frozen;
204 :
205 0 : return kthread_should_stop();
206 : }
207 : EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
208 :
209 : /**
210 : * kthread_func - return the function specified on kthread creation
211 : * @task: kthread task in question
212 : *
213 : * Returns NULL if the task is not a kthread.
214 : */
215 0 : void *kthread_func(struct task_struct *task)
216 : {
217 0 : struct kthread *kthread = __to_kthread(task);
218 0 : if (kthread)
219 0 : return kthread->threadfn;
220 : return NULL;
221 : }
222 : EXPORT_SYMBOL_GPL(kthread_func);
223 :
224 : /**
225 : * kthread_data - return data value specified on kthread creation
226 : * @task: kthread task in question
227 : *
228 : * Return the data value specified when kthread @task was created.
229 : * The caller is responsible for ensuring the validity of @task when
230 : * calling this function.
231 : */
232 20 : void *kthread_data(struct task_struct *task)
233 : {
234 40 : return to_kthread(task)->data;
235 : }
236 : EXPORT_SYMBOL_GPL(kthread_data);
237 :
238 : /**
239 : * kthread_probe_data - speculative version of kthread_data()
240 : * @task: possible kthread task in question
241 : *
242 : * @task could be a kthread task. Return the data value specified when it
243 : * was created if accessible. If @task isn't a kthread task or its data is
244 : * inaccessible for any reason, %NULL is returned. This function requires
245 : * that @task itself is safe to dereference.
246 : */
247 0 : void *kthread_probe_data(struct task_struct *task)
248 : {
249 0 : struct kthread *kthread = __to_kthread(task);
250 0 : void *data = NULL;
251 :
252 0 : if (kthread)
253 0 : copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
254 0 : return data;
255 : }
256 :
257 105 : static void __kthread_parkme(struct kthread *self)
258 : {
259 : for (;;) {
260 : /*
261 : * TASK_PARKED is a special state; we must serialize against
262 : * possible pending wakeups to avoid store-store collisions on
263 : * task->state.
264 : *
265 : * Such a collision might possibly result in the task state
266 : * changin from TASK_PARKED and us failing the
267 : * wait_task_inactive() in kthread_park().
268 : */
269 531 : set_special_state(TASK_PARKED);
270 212 : if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
271 : break;
272 :
273 : /*
274 : * Thread is going to call schedule(), do not preempt it,
275 : * or the caller of kthread_park() may spend more time in
276 : * wait_task_inactive().
277 : */
278 1 : preempt_disable();
279 1 : complete(&self->parked);
280 1 : schedule_preempt_disabled();
281 1 : preempt_enable();
282 : }
283 105 : __set_current_state(TASK_RUNNING);
284 105 : }
285 :
286 0 : void kthread_parkme(void)
287 : {
288 0 : __kthread_parkme(to_kthread(current));
289 0 : }
290 : EXPORT_SYMBOL_GPL(kthread_parkme);
291 :
292 : /**
293 : * kthread_exit - Cause the current kthread return @result to kthread_stop().
294 : * @result: The integer value to return to kthread_stop().
295 : *
296 : * While kthread_exit can be called directly, it exists so that
297 : * functions which do some additional work in non-modular code such as
298 : * module_put_and_kthread_exit can be implemented.
299 : *
300 : * Does not return.
301 : */
302 93 : void __noreturn kthread_exit(long result)
303 : {
304 186 : struct kthread *kthread = to_kthread(current);
305 93 : kthread->result = result;
306 93 : do_exit(0);
307 : }
308 :
309 : /**
310 : * kthread_complete_and_exit - Exit the current kthread.
311 : * @comp: Completion to complete
312 : * @code: The integer value to return to kthread_stop().
313 : *
314 : * If present complete @comp and the reuturn code to kthread_stop().
315 : *
316 : * A kernel thread whose module may be removed after the completion of
317 : * @comp can use this function exit safely.
318 : *
319 : * Does not return.
320 : */
321 93 : void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
322 : {
323 93 : if (comp)
324 93 : complete(comp);
325 :
326 93 : kthread_exit(code);
327 : }
328 : EXPORT_SYMBOL(kthread_complete_and_exit);
329 :
330 105 : static int kthread(void *_create)
331 : {
332 : static const struct sched_param param = { .sched_priority = 0 };
333 : /* Copy data: it's on kthread's stack */
334 105 : struct kthread_create_info *create = _create;
335 105 : int (*threadfn)(void *data) = create->threadfn;
336 105 : void *data = create->data;
337 : struct completion *done;
338 : struct kthread *self;
339 : int ret;
340 :
341 210 : self = to_kthread(current);
342 :
343 : /* If user was SIGKILLed, I release the structure. */
344 210 : done = xchg(&create->done, NULL);
345 105 : if (!done) {
346 0 : kfree(create);
347 0 : kthread_exit(-EINTR);
348 : }
349 :
350 105 : self->threadfn = threadfn;
351 105 : self->data = data;
352 :
353 : /*
354 : * The new thread inherited kthreadd's priority and CPU mask. Reset
355 : * back to default in case they have been changed.
356 : */
357 105 : sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
358 315 : set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_KTHREAD));
359 :
360 : /* OK, tell user we're spawned, wait for stop or wakeup */
361 105 : __set_current_state(TASK_UNINTERRUPTIBLE);
362 105 : create->result = current;
363 : /*
364 : * Thread is going to call schedule(), do not preempt it,
365 : * or the creator may spend more time in wait_task_inactive().
366 : */
367 105 : preempt_disable();
368 105 : complete(done);
369 105 : schedule_preempt_disabled();
370 105 : preempt_enable();
371 :
372 105 : ret = -EINTR;
373 210 : if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
374 : cgroup_kthread_ready();
375 105 : __kthread_parkme(self);
376 105 : ret = threadfn(data);
377 : }
378 0 : kthread_exit(ret);
379 : }
380 :
381 : /* called from kernel_clone() to get node information for about to be created task */
382 107 : int tsk_fork_get_node(struct task_struct *tsk)
383 : {
384 : #ifdef CONFIG_NUMA
385 : if (tsk == kthreadd_task)
386 : return tsk->pref_node_fork;
387 : #endif
388 107 : return NUMA_NO_NODE;
389 : }
390 :
391 105 : static void create_kthread(struct kthread_create_info *create)
392 : {
393 : int pid;
394 :
395 : #ifdef CONFIG_NUMA
396 : current->pref_node_fork = create->node;
397 : #endif
398 : /* We want our own signal handler (we take no signals by default). */
399 105 : pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
400 105 : if (pid < 0) {
401 : /* If user was SIGKILLed, I release the structure. */
402 0 : struct completion *done = xchg(&create->done, NULL);
403 :
404 0 : if (!done) {
405 0 : kfree(create);
406 0 : return;
407 : }
408 0 : create->result = ERR_PTR(pid);
409 0 : complete(done);
410 : }
411 : }
412 :
413 : static __printf(4, 0)
414 105 : struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
415 : void *data, int node,
416 : const char namefmt[],
417 : va_list args)
418 : {
419 105 : DECLARE_COMPLETION_ONSTACK(done);
420 : struct task_struct *task;
421 105 : struct kthread_create_info *create = kmalloc(sizeof(*create),
422 : GFP_KERNEL);
423 :
424 105 : if (!create)
425 : return ERR_PTR(-ENOMEM);
426 105 : create->threadfn = threadfn;
427 105 : create->data = data;
428 105 : create->node = node;
429 105 : create->done = &done;
430 :
431 105 : spin_lock(&kthread_create_lock);
432 210 : list_add_tail(&create->list, &kthread_create_list);
433 105 : spin_unlock(&kthread_create_lock);
434 :
435 105 : wake_up_process(kthreadd_task);
436 : /*
437 : * Wait for completion in killable state, for I might be chosen by
438 : * the OOM killer while kthreadd is trying to allocate memory for
439 : * new kernel thread.
440 : */
441 105 : if (unlikely(wait_for_completion_killable(&done))) {
442 : /*
443 : * If I was SIGKILLed before kthreadd (or new kernel thread)
444 : * calls complete(), leave the cleanup of this structure to
445 : * that thread.
446 : */
447 0 : if (xchg(&create->done, NULL))
448 : return ERR_PTR(-EINTR);
449 : /*
450 : * kthreadd (or new kernel thread) will call complete()
451 : * shortly.
452 : */
453 0 : wait_for_completion(&done);
454 : }
455 105 : task = create->result;
456 105 : if (!IS_ERR(task)) {
457 : char name[TASK_COMM_LEN];
458 : va_list aq;
459 : int len;
460 :
461 : /*
462 : * task is already visible to other tasks, so updating
463 : * COMM must be protected.
464 : */
465 105 : va_copy(aq, args);
466 105 : len = vsnprintf(name, sizeof(name), namefmt, aq);
467 105 : va_end(aq);
468 105 : if (len >= TASK_COMM_LEN) {
469 186 : struct kthread *kthread = to_kthread(task);
470 :
471 : /* leave it truncated when out of memory. */
472 93 : kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
473 : }
474 105 : set_task_comm(task, name);
475 : }
476 105 : kfree(create);
477 105 : return task;
478 : }
479 :
480 : /**
481 : * kthread_create_on_node - create a kthread.
482 : * @threadfn: the function to run until signal_pending(current).
483 : * @data: data ptr for @threadfn.
484 : * @node: task and thread structures for the thread are allocated on this node
485 : * @namefmt: printf-style name for the thread.
486 : *
487 : * Description: This helper function creates and names a kernel
488 : * thread. The thread will be stopped: use wake_up_process() to start
489 : * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
490 : * is affine to all CPUs.
491 : *
492 : * If thread is going to be bound on a particular cpu, give its node
493 : * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
494 : * When woken, the thread will run @threadfn() with @data as its
495 : * argument. @threadfn() can either return directly if it is a
496 : * standalone thread for which no one will call kthread_stop(), or
497 : * return when 'kthread_should_stop()' is true (which means
498 : * kthread_stop() has been called). The return value should be zero
499 : * or a negative error number; it will be passed to kthread_stop().
500 : *
501 : * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
502 : */
503 105 : struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
504 : void *data, int node,
505 : const char namefmt[],
506 : ...)
507 : {
508 : struct task_struct *task;
509 : va_list args;
510 :
511 105 : va_start(args, namefmt);
512 105 : task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
513 105 : va_end(args);
514 :
515 105 : return task;
516 : }
517 : EXPORT_SYMBOL(kthread_create_on_node);
518 :
519 10 : static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
520 : {
521 : unsigned long flags;
522 :
523 10 : if (!wait_task_inactive(p, state)) {
524 : WARN_ON(1);
525 : return;
526 : }
527 :
528 : /* It's safe because the task is inactive. */
529 10 : raw_spin_lock_irqsave(&p->pi_lock, flags);
530 10 : do_set_cpus_allowed(p, mask);
531 10 : p->flags |= PF_NO_SETAFFINITY;
532 20 : raw_spin_unlock_irqrestore(&p->pi_lock, flags);
533 : }
534 :
535 : static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
536 : {
537 2 : __kthread_bind_mask(p, cpumask_of(cpu), state);
538 : }
539 :
540 8 : void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
541 : {
542 8 : __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
543 8 : }
544 :
545 : /**
546 : * kthread_bind - bind a just-created kthread to a cpu.
547 : * @p: thread created by kthread_create().
548 : * @cpu: cpu (might not be online, must be possible) for @k to run on.
549 : *
550 : * Description: This function is equivalent to set_cpus_allowed(),
551 : * except that @cpu doesn't need to be online, and the thread must be
552 : * stopped (i.e., just returned from kthread_create()).
553 : */
554 0 : void kthread_bind(struct task_struct *p, unsigned int cpu)
555 : {
556 2 : __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
557 0 : }
558 : EXPORT_SYMBOL(kthread_bind);
559 :
560 : /**
561 : * kthread_create_on_cpu - Create a cpu bound kthread
562 : * @threadfn: the function to run until signal_pending(current).
563 : * @data: data ptr for @threadfn.
564 : * @cpu: The cpu on which the thread should be bound,
565 : * @namefmt: printf-style name for the thread. Format is restricted
566 : * to "name.*%u". Code fills in cpu number.
567 : *
568 : * Description: This helper function creates and names a kernel thread
569 : */
570 1 : struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
571 : void *data, unsigned int cpu,
572 : const char *namefmt)
573 : {
574 : struct task_struct *p;
575 :
576 1 : p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
577 : cpu);
578 1 : if (IS_ERR(p))
579 : return p;
580 1 : kthread_bind(p, cpu);
581 : /* CPU hotplug need to bind once again when unparking the thread. */
582 2 : to_kthread(p)->cpu = cpu;
583 1 : return p;
584 : }
585 : EXPORT_SYMBOL(kthread_create_on_cpu);
586 :
587 5 : void kthread_set_per_cpu(struct task_struct *k, int cpu)
588 : {
589 10 : struct kthread *kthread = to_kthread(k);
590 5 : if (!kthread)
591 : return;
592 :
593 5 : WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
594 :
595 5 : if (cpu < 0) {
596 0 : clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
597 : return;
598 : }
599 :
600 5 : kthread->cpu = cpu;
601 5 : set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
602 : }
603 :
604 0 : bool kthread_is_per_cpu(struct task_struct *p)
605 : {
606 0 : struct kthread *kthread = __to_kthread(p);
607 0 : if (!kthread)
608 : return false;
609 :
610 0 : return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
611 : }
612 :
613 : /**
614 : * kthread_unpark - unpark a thread created by kthread_create().
615 : * @k: thread created by kthread_create().
616 : *
617 : * Sets kthread_should_park() for @k to return false, wakes it, and
618 : * waits for it to return. If the thread is marked percpu then its
619 : * bound to the cpu again.
620 : */
621 1 : void kthread_unpark(struct task_struct *k)
622 : {
623 2 : struct kthread *kthread = to_kthread(k);
624 :
625 : /*
626 : * Newly created kthread was parked when the CPU was offline.
627 : * The binding was lost and we need to set it again.
628 : */
629 2 : if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
630 1 : __kthread_bind(k, kthread->cpu, TASK_PARKED);
631 :
632 2 : clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
633 : /*
634 : * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
635 : */
636 1 : wake_up_state(k, TASK_PARKED);
637 1 : }
638 : EXPORT_SYMBOL_GPL(kthread_unpark);
639 :
640 : /**
641 : * kthread_park - park a thread created by kthread_create().
642 : * @k: thread created by kthread_create().
643 : *
644 : * Sets kthread_should_park() for @k to return true, wakes it, and
645 : * waits for it to return. This can also be called after kthread_create()
646 : * instead of calling wake_up_process(): the thread will park without
647 : * calling threadfn().
648 : *
649 : * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
650 : * If called by the kthread itself just the park bit is set.
651 : */
652 1 : int kthread_park(struct task_struct *k)
653 : {
654 2 : struct kthread *kthread = to_kthread(k);
655 :
656 1 : if (WARN_ON(k->flags & PF_EXITING))
657 : return -ENOSYS;
658 :
659 2 : if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
660 : return -EBUSY;
661 :
662 2 : set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
663 1 : if (k != current) {
664 1 : wake_up_process(k);
665 : /*
666 : * Wait for __kthread_parkme() to complete(), this means we
667 : * _will_ have TASK_PARKED and are about to call schedule().
668 : */
669 1 : wait_for_completion(&kthread->parked);
670 : /*
671 : * Now wait for that schedule() to complete and the task to
672 : * get scheduled out.
673 : */
674 1 : WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
675 : }
676 :
677 : return 0;
678 : }
679 : EXPORT_SYMBOL_GPL(kthread_park);
680 :
681 : /**
682 : * kthread_stop - stop a thread created by kthread_create().
683 : * @k: thread created by kthread_create().
684 : *
685 : * Sets kthread_should_stop() for @k to return true, wakes it, and
686 : * waits for it to exit. This can also be called after kthread_create()
687 : * instead of calling wake_up_process(): the thread will exit without
688 : * calling threadfn().
689 : *
690 : * If threadfn() may call kthread_exit() itself, the caller must ensure
691 : * task_struct can't go away.
692 : *
693 : * Returns the result of threadfn(), or %-EINTR if wake_up_process()
694 : * was never called.
695 : */
696 0 : int kthread_stop(struct task_struct *k)
697 : {
698 : struct kthread *kthread;
699 : int ret;
700 :
701 0 : trace_sched_kthread_stop(k);
702 :
703 0 : get_task_struct(k);
704 0 : kthread = to_kthread(k);
705 0 : set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
706 0 : kthread_unpark(k);
707 0 : wake_up_process(k);
708 0 : wait_for_completion(&kthread->exited);
709 0 : ret = kthread->result;
710 0 : put_task_struct(k);
711 :
712 0 : trace_sched_kthread_stop_ret(ret);
713 0 : return ret;
714 : }
715 : EXPORT_SYMBOL(kthread_stop);
716 :
717 1 : int kthreadd(void *unused)
718 : {
719 1 : struct task_struct *tsk = current;
720 :
721 : /* Setup a clean context for our children to inherit. */
722 1 : set_task_comm(tsk, "kthreadd");
723 1 : ignore_signals(tsk);
724 2 : set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD));
725 1 : set_mems_allowed(node_states[N_MEMORY]);
726 :
727 1 : current->flags |= PF_NOFREEZE;
728 : cgroup_init_kthreadd();
729 :
730 : for (;;) {
731 106 : set_current_state(TASK_INTERRUPTIBLE);
732 106 : if (list_empty(&kthread_create_list))
733 106 : schedule();
734 105 : __set_current_state(TASK_RUNNING);
735 :
736 : spin_lock(&kthread_create_lock);
737 210 : while (!list_empty(&kthread_create_list)) {
738 : struct kthread_create_info *create;
739 :
740 105 : create = list_entry(kthread_create_list.next,
741 : struct kthread_create_info, list);
742 210 : list_del_init(&create->list);
743 105 : spin_unlock(&kthread_create_lock);
744 :
745 105 : create_kthread(create);
746 :
747 : spin_lock(&kthread_create_lock);
748 : }
749 : spin_unlock(&kthread_create_lock);
750 : }
751 :
752 : return 0;
753 : }
754 :
755 0 : void __kthread_init_worker(struct kthread_worker *worker,
756 : const char *name,
757 : struct lock_class_key *key)
758 : {
759 0 : memset(worker, 0, sizeof(struct kthread_worker));
760 : raw_spin_lock_init(&worker->lock);
761 : lockdep_set_class_and_name(&worker->lock, key, name);
762 0 : INIT_LIST_HEAD(&worker->work_list);
763 0 : INIT_LIST_HEAD(&worker->delayed_work_list);
764 0 : }
765 : EXPORT_SYMBOL_GPL(__kthread_init_worker);
766 :
767 : /**
768 : * kthread_worker_fn - kthread function to process kthread_worker
769 : * @worker_ptr: pointer to initialized kthread_worker
770 : *
771 : * This function implements the main cycle of kthread worker. It processes
772 : * work_list until it is stopped with kthread_stop(). It sleeps when the queue
773 : * is empty.
774 : *
775 : * The works are not allowed to keep any locks, disable preemption or interrupts
776 : * when they finish. There is defined a safe point for freezing when one work
777 : * finishes and before a new one is started.
778 : *
779 : * Also the works must not be handled by more than one worker at the same time,
780 : * see also kthread_queue_work().
781 : */
782 0 : int kthread_worker_fn(void *worker_ptr)
783 : {
784 0 : struct kthread_worker *worker = worker_ptr;
785 : struct kthread_work *work;
786 :
787 : /*
788 : * FIXME: Update the check and remove the assignment when all kthread
789 : * worker users are created using kthread_create_worker*() functions.
790 : */
791 0 : WARN_ON(worker->task && worker->task != current);
792 0 : worker->task = current;
793 :
794 0 : if (worker->flags & KTW_FREEZABLE)
795 0 : set_freezable();
796 :
797 : repeat:
798 0 : set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
799 :
800 0 : if (kthread_should_stop()) {
801 0 : __set_current_state(TASK_RUNNING);
802 0 : raw_spin_lock_irq(&worker->lock);
803 0 : worker->task = NULL;
804 0 : raw_spin_unlock_irq(&worker->lock);
805 0 : return 0;
806 : }
807 :
808 0 : work = NULL;
809 0 : raw_spin_lock_irq(&worker->lock);
810 0 : if (!list_empty(&worker->work_list)) {
811 0 : work = list_first_entry(&worker->work_list,
812 : struct kthread_work, node);
813 0 : list_del_init(&work->node);
814 : }
815 0 : worker->current_work = work;
816 0 : raw_spin_unlock_irq(&worker->lock);
817 :
818 0 : if (work) {
819 0 : kthread_work_func_t func = work->func;
820 0 : __set_current_state(TASK_RUNNING);
821 0 : trace_sched_kthread_work_execute_start(work);
822 0 : work->func(work);
823 : /*
824 : * Avoid dereferencing work after this point. The trace
825 : * event only cares about the address.
826 : */
827 0 : trace_sched_kthread_work_execute_end(work, func);
828 0 : } else if (!freezing(current))
829 0 : schedule();
830 :
831 0 : try_to_freeze();
832 0 : cond_resched();
833 0 : goto repeat;
834 : }
835 : EXPORT_SYMBOL_GPL(kthread_worker_fn);
836 :
837 : static __printf(3, 0) struct kthread_worker *
838 0 : __kthread_create_worker(int cpu, unsigned int flags,
839 : const char namefmt[], va_list args)
840 : {
841 : struct kthread_worker *worker;
842 : struct task_struct *task;
843 0 : int node = NUMA_NO_NODE;
844 :
845 0 : worker = kzalloc(sizeof(*worker), GFP_KERNEL);
846 0 : if (!worker)
847 : return ERR_PTR(-ENOMEM);
848 :
849 0 : kthread_init_worker(worker);
850 :
851 0 : if (cpu >= 0)
852 0 : node = cpu_to_node(cpu);
853 :
854 0 : task = __kthread_create_on_node(kthread_worker_fn, worker,
855 : node, namefmt, args);
856 0 : if (IS_ERR(task))
857 : goto fail_task;
858 :
859 0 : if (cpu >= 0)
860 0 : kthread_bind(task, cpu);
861 :
862 0 : worker->flags = flags;
863 0 : worker->task = task;
864 0 : wake_up_process(task);
865 0 : return worker;
866 :
867 : fail_task:
868 0 : kfree(worker);
869 0 : return ERR_CAST(task);
870 : }
871 :
872 : /**
873 : * kthread_create_worker - create a kthread worker
874 : * @flags: flags modifying the default behavior of the worker
875 : * @namefmt: printf-style name for the kthread worker (task).
876 : *
877 : * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
878 : * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
879 : * when the worker was SIGKILLed.
880 : */
881 : struct kthread_worker *
882 0 : kthread_create_worker(unsigned int flags, const char namefmt[], ...)
883 : {
884 : struct kthread_worker *worker;
885 : va_list args;
886 :
887 0 : va_start(args, namefmt);
888 0 : worker = __kthread_create_worker(-1, flags, namefmt, args);
889 0 : va_end(args);
890 :
891 0 : return worker;
892 : }
893 : EXPORT_SYMBOL(kthread_create_worker);
894 :
895 : /**
896 : * kthread_create_worker_on_cpu - create a kthread worker and bind it
897 : * to a given CPU and the associated NUMA node.
898 : * @cpu: CPU number
899 : * @flags: flags modifying the default behavior of the worker
900 : * @namefmt: printf-style name for the kthread worker (task).
901 : *
902 : * Use a valid CPU number if you want to bind the kthread worker
903 : * to the given CPU and the associated NUMA node.
904 : *
905 : * A good practice is to add the cpu number also into the worker name.
906 : * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
907 : *
908 : * CPU hotplug:
909 : * The kthread worker API is simple and generic. It just provides a way
910 : * to create, use, and destroy workers.
911 : *
912 : * It is up to the API user how to handle CPU hotplug. They have to decide
913 : * how to handle pending work items, prevent queuing new ones, and
914 : * restore the functionality when the CPU goes off and on. There are a
915 : * few catches:
916 : *
917 : * - CPU affinity gets lost when it is scheduled on an offline CPU.
918 : *
919 : * - The worker might not exist when the CPU was off when the user
920 : * created the workers.
921 : *
922 : * Good practice is to implement two CPU hotplug callbacks and to
923 : * destroy/create the worker when the CPU goes down/up.
924 : *
925 : * Return:
926 : * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
927 : * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
928 : * when the worker was SIGKILLed.
929 : */
930 : struct kthread_worker *
931 0 : kthread_create_worker_on_cpu(int cpu, unsigned int flags,
932 : const char namefmt[], ...)
933 : {
934 : struct kthread_worker *worker;
935 : va_list args;
936 :
937 0 : va_start(args, namefmt);
938 0 : worker = __kthread_create_worker(cpu, flags, namefmt, args);
939 0 : va_end(args);
940 :
941 0 : return worker;
942 : }
943 : EXPORT_SYMBOL(kthread_create_worker_on_cpu);
944 :
945 : /*
946 : * Returns true when the work could not be queued at the moment.
947 : * It happens when it is already pending in a worker list
948 : * or when it is being cancelled.
949 : */
950 : static inline bool queuing_blocked(struct kthread_worker *worker,
951 : struct kthread_work *work)
952 : {
953 : lockdep_assert_held(&worker->lock);
954 :
955 0 : return !list_empty(&work->node) || work->canceling;
956 : }
957 :
958 0 : static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
959 : struct kthread_work *work)
960 : {
961 : lockdep_assert_held(&worker->lock);
962 0 : WARN_ON_ONCE(!list_empty(&work->node));
963 : /* Do not use a work with >1 worker, see kthread_queue_work() */
964 0 : WARN_ON_ONCE(work->worker && work->worker != worker);
965 0 : }
966 :
967 : /* insert @work before @pos in @worker */
968 0 : static void kthread_insert_work(struct kthread_worker *worker,
969 : struct kthread_work *work,
970 : struct list_head *pos)
971 : {
972 0 : kthread_insert_work_sanity_check(worker, work);
973 :
974 0 : trace_sched_kthread_work_queue_work(worker, work);
975 :
976 0 : list_add_tail(&work->node, pos);
977 0 : work->worker = worker;
978 0 : if (!worker->current_work && likely(worker->task))
979 0 : wake_up_process(worker->task);
980 0 : }
981 :
982 : /**
983 : * kthread_queue_work - queue a kthread_work
984 : * @worker: target kthread_worker
985 : * @work: kthread_work to queue
986 : *
987 : * Queue @work to work processor @task for async execution. @task
988 : * must have been created with kthread_worker_create(). Returns %true
989 : * if @work was successfully queued, %false if it was already pending.
990 : *
991 : * Reinitialize the work if it needs to be used by another worker.
992 : * For example, when the worker was stopped and started again.
993 : */
994 0 : bool kthread_queue_work(struct kthread_worker *worker,
995 : struct kthread_work *work)
996 : {
997 0 : bool ret = false;
998 : unsigned long flags;
999 :
1000 0 : raw_spin_lock_irqsave(&worker->lock, flags);
1001 0 : if (!queuing_blocked(worker, work)) {
1002 0 : kthread_insert_work(worker, work, &worker->work_list);
1003 0 : ret = true;
1004 : }
1005 0 : raw_spin_unlock_irqrestore(&worker->lock, flags);
1006 0 : return ret;
1007 : }
1008 : EXPORT_SYMBOL_GPL(kthread_queue_work);
1009 :
1010 : /**
1011 : * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1012 : * delayed work when the timer expires.
1013 : * @t: pointer to the expired timer
1014 : *
1015 : * The format of the function is defined by struct timer_list.
1016 : * It should have been called from irqsafe timer with irq already off.
1017 : */
1018 0 : void kthread_delayed_work_timer_fn(struct timer_list *t)
1019 : {
1020 0 : struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
1021 0 : struct kthread_work *work = &dwork->work;
1022 0 : struct kthread_worker *worker = work->worker;
1023 : unsigned long flags;
1024 :
1025 : /*
1026 : * This might happen when a pending work is reinitialized.
1027 : * It means that it is used a wrong way.
1028 : */
1029 0 : if (WARN_ON_ONCE(!worker))
1030 : return;
1031 :
1032 0 : raw_spin_lock_irqsave(&worker->lock, flags);
1033 : /* Work must not be used with >1 worker, see kthread_queue_work(). */
1034 0 : WARN_ON_ONCE(work->worker != worker);
1035 :
1036 : /* Move the work from worker->delayed_work_list. */
1037 0 : WARN_ON_ONCE(list_empty(&work->node));
1038 0 : list_del_init(&work->node);
1039 0 : if (!work->canceling)
1040 0 : kthread_insert_work(worker, work, &worker->work_list);
1041 :
1042 0 : raw_spin_unlock_irqrestore(&worker->lock, flags);
1043 : }
1044 : EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1045 :
1046 0 : static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1047 : struct kthread_delayed_work *dwork,
1048 : unsigned long delay)
1049 : {
1050 0 : struct timer_list *timer = &dwork->timer;
1051 0 : struct kthread_work *work = &dwork->work;
1052 :
1053 0 : WARN_ON_FUNCTION_MISMATCH(timer->function,
1054 : kthread_delayed_work_timer_fn);
1055 :
1056 : /*
1057 : * If @delay is 0, queue @dwork->work immediately. This is for
1058 : * both optimization and correctness. The earliest @timer can
1059 : * expire is on the closest next tick and delayed_work users depend
1060 : * on that there's no such delay when @delay is 0.
1061 : */
1062 0 : if (!delay) {
1063 0 : kthread_insert_work(worker, work, &worker->work_list);
1064 0 : return;
1065 : }
1066 :
1067 : /* Be paranoid and try to detect possible races already now. */
1068 0 : kthread_insert_work_sanity_check(worker, work);
1069 :
1070 0 : list_add(&work->node, &worker->delayed_work_list);
1071 0 : work->worker = worker;
1072 0 : timer->expires = jiffies + delay;
1073 0 : add_timer(timer);
1074 : }
1075 :
1076 : /**
1077 : * kthread_queue_delayed_work - queue the associated kthread work
1078 : * after a delay.
1079 : * @worker: target kthread_worker
1080 : * @dwork: kthread_delayed_work to queue
1081 : * @delay: number of jiffies to wait before queuing
1082 : *
1083 : * If the work has not been pending it starts a timer that will queue
1084 : * the work after the given @delay. If @delay is zero, it queues the
1085 : * work immediately.
1086 : *
1087 : * Return: %false if the @work has already been pending. It means that
1088 : * either the timer was running or the work was queued. It returns %true
1089 : * otherwise.
1090 : */
1091 0 : bool kthread_queue_delayed_work(struct kthread_worker *worker,
1092 : struct kthread_delayed_work *dwork,
1093 : unsigned long delay)
1094 : {
1095 0 : struct kthread_work *work = &dwork->work;
1096 : unsigned long flags;
1097 0 : bool ret = false;
1098 :
1099 0 : raw_spin_lock_irqsave(&worker->lock, flags);
1100 :
1101 0 : if (!queuing_blocked(worker, work)) {
1102 0 : __kthread_queue_delayed_work(worker, dwork, delay);
1103 0 : ret = true;
1104 : }
1105 :
1106 0 : raw_spin_unlock_irqrestore(&worker->lock, flags);
1107 0 : return ret;
1108 : }
1109 : EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1110 :
1111 : struct kthread_flush_work {
1112 : struct kthread_work work;
1113 : struct completion done;
1114 : };
1115 :
1116 0 : static void kthread_flush_work_fn(struct kthread_work *work)
1117 : {
1118 0 : struct kthread_flush_work *fwork =
1119 0 : container_of(work, struct kthread_flush_work, work);
1120 0 : complete(&fwork->done);
1121 0 : }
1122 :
1123 : /**
1124 : * kthread_flush_work - flush a kthread_work
1125 : * @work: work to flush
1126 : *
1127 : * If @work is queued or executing, wait for it to finish execution.
1128 : */
1129 0 : void kthread_flush_work(struct kthread_work *work)
1130 : {
1131 0 : struct kthread_flush_work fwork = {
1132 : KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1133 0 : COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1134 : };
1135 : struct kthread_worker *worker;
1136 0 : bool noop = false;
1137 :
1138 0 : worker = work->worker;
1139 0 : if (!worker)
1140 0 : return;
1141 :
1142 0 : raw_spin_lock_irq(&worker->lock);
1143 : /* Work must not be used with >1 worker, see kthread_queue_work(). */
1144 0 : WARN_ON_ONCE(work->worker != worker);
1145 :
1146 0 : if (!list_empty(&work->node))
1147 0 : kthread_insert_work(worker, &fwork.work, work->node.next);
1148 0 : else if (worker->current_work == work)
1149 0 : kthread_insert_work(worker, &fwork.work,
1150 : worker->work_list.next);
1151 : else
1152 : noop = true;
1153 :
1154 0 : raw_spin_unlock_irq(&worker->lock);
1155 :
1156 0 : if (!noop)
1157 0 : wait_for_completion(&fwork.done);
1158 : }
1159 : EXPORT_SYMBOL_GPL(kthread_flush_work);
1160 :
1161 : /*
1162 : * Make sure that the timer is neither set nor running and could
1163 : * not manipulate the work list_head any longer.
1164 : *
1165 : * The function is called under worker->lock. The lock is temporary
1166 : * released but the timer can't be set again in the meantime.
1167 : */
1168 0 : static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1169 : unsigned long *flags)
1170 : {
1171 0 : struct kthread_delayed_work *dwork =
1172 0 : container_of(work, struct kthread_delayed_work, work);
1173 0 : struct kthread_worker *worker = work->worker;
1174 :
1175 : /*
1176 : * del_timer_sync() must be called to make sure that the timer
1177 : * callback is not running. The lock must be temporary released
1178 : * to avoid a deadlock with the callback. In the meantime,
1179 : * any queuing is blocked by setting the canceling counter.
1180 : */
1181 0 : work->canceling++;
1182 0 : raw_spin_unlock_irqrestore(&worker->lock, *flags);
1183 0 : del_timer_sync(&dwork->timer);
1184 0 : raw_spin_lock_irqsave(&worker->lock, *flags);
1185 0 : work->canceling--;
1186 0 : }
1187 :
1188 : /*
1189 : * This function removes the work from the worker queue.
1190 : *
1191 : * It is called under worker->lock. The caller must make sure that
1192 : * the timer used by delayed work is not running, e.g. by calling
1193 : * kthread_cancel_delayed_work_timer().
1194 : *
1195 : * The work might still be in use when this function finishes. See the
1196 : * current_work proceed by the worker.
1197 : *
1198 : * Return: %true if @work was pending and successfully canceled,
1199 : * %false if @work was not pending
1200 : */
1201 : static bool __kthread_cancel_work(struct kthread_work *work)
1202 : {
1203 : /*
1204 : * Try to remove the work from a worker list. It might either
1205 : * be from worker->work_list or from worker->delayed_work_list.
1206 : */
1207 0 : if (!list_empty(&work->node)) {
1208 0 : list_del_init(&work->node);
1209 : return true;
1210 : }
1211 :
1212 : return false;
1213 : }
1214 :
1215 : /**
1216 : * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1217 : * @worker: kthread worker to use
1218 : * @dwork: kthread delayed work to queue
1219 : * @delay: number of jiffies to wait before queuing
1220 : *
1221 : * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1222 : * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1223 : * @work is guaranteed to be queued immediately.
1224 : *
1225 : * Return: %false if @dwork was idle and queued, %true otherwise.
1226 : *
1227 : * A special case is when the work is being canceled in parallel.
1228 : * It might be caused either by the real kthread_cancel_delayed_work_sync()
1229 : * or yet another kthread_mod_delayed_work() call. We let the other command
1230 : * win and return %true here. The return value can be used for reference
1231 : * counting and the number of queued works stays the same. Anyway, the caller
1232 : * is supposed to synchronize these operations a reasonable way.
1233 : *
1234 : * This function is safe to call from any context including IRQ handler.
1235 : * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1236 : * for details.
1237 : */
1238 0 : bool kthread_mod_delayed_work(struct kthread_worker *worker,
1239 : struct kthread_delayed_work *dwork,
1240 : unsigned long delay)
1241 : {
1242 0 : struct kthread_work *work = &dwork->work;
1243 : unsigned long flags;
1244 : int ret;
1245 :
1246 0 : raw_spin_lock_irqsave(&worker->lock, flags);
1247 :
1248 : /* Do not bother with canceling when never queued. */
1249 0 : if (!work->worker) {
1250 : ret = false;
1251 : goto fast_queue;
1252 : }
1253 :
1254 : /* Work must not be used with >1 worker, see kthread_queue_work() */
1255 0 : WARN_ON_ONCE(work->worker != worker);
1256 :
1257 : /*
1258 : * Temporary cancel the work but do not fight with another command
1259 : * that is canceling the work as well.
1260 : *
1261 : * It is a bit tricky because of possible races with another
1262 : * mod_delayed_work() and cancel_delayed_work() callers.
1263 : *
1264 : * The timer must be canceled first because worker->lock is released
1265 : * when doing so. But the work can be removed from the queue (list)
1266 : * only when it can be queued again so that the return value can
1267 : * be used for reference counting.
1268 : */
1269 0 : kthread_cancel_delayed_work_timer(work, &flags);
1270 0 : if (work->canceling) {
1271 : /* The number of works in the queue does not change. */
1272 : ret = true;
1273 : goto out;
1274 : }
1275 0 : ret = __kthread_cancel_work(work);
1276 :
1277 : fast_queue:
1278 0 : __kthread_queue_delayed_work(worker, dwork, delay);
1279 : out:
1280 0 : raw_spin_unlock_irqrestore(&worker->lock, flags);
1281 0 : return ret;
1282 : }
1283 : EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1284 :
1285 0 : static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1286 : {
1287 0 : struct kthread_worker *worker = work->worker;
1288 : unsigned long flags;
1289 0 : int ret = false;
1290 :
1291 0 : if (!worker)
1292 : goto out;
1293 :
1294 0 : raw_spin_lock_irqsave(&worker->lock, flags);
1295 : /* Work must not be used with >1 worker, see kthread_queue_work(). */
1296 0 : WARN_ON_ONCE(work->worker != worker);
1297 :
1298 0 : if (is_dwork)
1299 0 : kthread_cancel_delayed_work_timer(work, &flags);
1300 :
1301 0 : ret = __kthread_cancel_work(work);
1302 :
1303 0 : if (worker->current_work != work)
1304 : goto out_fast;
1305 :
1306 : /*
1307 : * The work is in progress and we need to wait with the lock released.
1308 : * In the meantime, block any queuing by setting the canceling counter.
1309 : */
1310 0 : work->canceling++;
1311 0 : raw_spin_unlock_irqrestore(&worker->lock, flags);
1312 0 : kthread_flush_work(work);
1313 0 : raw_spin_lock_irqsave(&worker->lock, flags);
1314 0 : work->canceling--;
1315 :
1316 : out_fast:
1317 0 : raw_spin_unlock_irqrestore(&worker->lock, flags);
1318 : out:
1319 0 : return ret;
1320 : }
1321 :
1322 : /**
1323 : * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1324 : * @work: the kthread work to cancel
1325 : *
1326 : * Cancel @work and wait for its execution to finish. This function
1327 : * can be used even if the work re-queues itself. On return from this
1328 : * function, @work is guaranteed to be not pending or executing on any CPU.
1329 : *
1330 : * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1331 : * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1332 : *
1333 : * The caller must ensure that the worker on which @work was last
1334 : * queued can't be destroyed before this function returns.
1335 : *
1336 : * Return: %true if @work was pending, %false otherwise.
1337 : */
1338 0 : bool kthread_cancel_work_sync(struct kthread_work *work)
1339 : {
1340 0 : return __kthread_cancel_work_sync(work, false);
1341 : }
1342 : EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1343 :
1344 : /**
1345 : * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1346 : * wait for it to finish.
1347 : * @dwork: the kthread delayed work to cancel
1348 : *
1349 : * This is kthread_cancel_work_sync() for delayed works.
1350 : *
1351 : * Return: %true if @dwork was pending, %false otherwise.
1352 : */
1353 0 : bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1354 : {
1355 0 : return __kthread_cancel_work_sync(&dwork->work, true);
1356 : }
1357 : EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1358 :
1359 : /**
1360 : * kthread_flush_worker - flush all current works on a kthread_worker
1361 : * @worker: worker to flush
1362 : *
1363 : * Wait until all currently executing or pending works on @worker are
1364 : * finished.
1365 : */
1366 0 : void kthread_flush_worker(struct kthread_worker *worker)
1367 : {
1368 0 : struct kthread_flush_work fwork = {
1369 : KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1370 0 : COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1371 : };
1372 :
1373 0 : kthread_queue_work(worker, &fwork.work);
1374 0 : wait_for_completion(&fwork.done);
1375 0 : }
1376 : EXPORT_SYMBOL_GPL(kthread_flush_worker);
1377 :
1378 : /**
1379 : * kthread_destroy_worker - destroy a kthread worker
1380 : * @worker: worker to be destroyed
1381 : *
1382 : * Flush and destroy @worker. The simple flush is enough because the kthread
1383 : * worker API is used only in trivial scenarios. There are no multi-step state
1384 : * machines needed.
1385 : */
1386 0 : void kthread_destroy_worker(struct kthread_worker *worker)
1387 : {
1388 : struct task_struct *task;
1389 :
1390 0 : task = worker->task;
1391 0 : if (WARN_ON(!task))
1392 : return;
1393 :
1394 0 : kthread_flush_worker(worker);
1395 0 : kthread_stop(task);
1396 0 : WARN_ON(!list_empty(&worker->work_list));
1397 0 : kfree(worker);
1398 : }
1399 : EXPORT_SYMBOL(kthread_destroy_worker);
1400 :
1401 : /**
1402 : * kthread_use_mm - make the calling kthread operate on an address space
1403 : * @mm: address space to operate on
1404 : */
1405 0 : void kthread_use_mm(struct mm_struct *mm)
1406 : {
1407 : struct mm_struct *active_mm;
1408 0 : struct task_struct *tsk = current;
1409 :
1410 0 : WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1411 0 : WARN_ON_ONCE(tsk->mm);
1412 :
1413 0 : task_lock(tsk);
1414 : /* Hold off tlb flush IPIs while switching mm's */
1415 : local_irq_disable();
1416 0 : active_mm = tsk->active_mm;
1417 0 : if (active_mm != mm) {
1418 0 : mmgrab(mm);
1419 0 : tsk->active_mm = mm;
1420 : }
1421 0 : tsk->mm = mm;
1422 0 : membarrier_update_current_mm(mm);
1423 0 : switch_mm_irqs_off(active_mm, mm, tsk);
1424 : local_irq_enable();
1425 0 : task_unlock(tsk);
1426 : #ifdef finish_arch_post_lock_switch
1427 : finish_arch_post_lock_switch();
1428 : #endif
1429 :
1430 : /*
1431 : * When a kthread starts operating on an address space, the loop
1432 : * in membarrier_{private,global}_expedited() may not observe
1433 : * that tsk->mm, and not issue an IPI. Membarrier requires a
1434 : * memory barrier after storing to tsk->mm, before accessing
1435 : * user-space memory. A full memory barrier for membarrier
1436 : * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1437 : * mmdrop(), or explicitly with smp_mb().
1438 : */
1439 0 : if (active_mm != mm)
1440 : mmdrop(active_mm);
1441 : else
1442 0 : smp_mb();
1443 0 : }
1444 : EXPORT_SYMBOL_GPL(kthread_use_mm);
1445 :
1446 : /**
1447 : * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1448 : * @mm: address space to operate on
1449 : */
1450 0 : void kthread_unuse_mm(struct mm_struct *mm)
1451 : {
1452 0 : struct task_struct *tsk = current;
1453 :
1454 0 : WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1455 0 : WARN_ON_ONCE(!tsk->mm);
1456 :
1457 0 : task_lock(tsk);
1458 : /*
1459 : * When a kthread stops operating on an address space, the loop
1460 : * in membarrier_{private,global}_expedited() may not observe
1461 : * that tsk->mm, and not issue an IPI. Membarrier requires a
1462 : * memory barrier after accessing user-space memory, before
1463 : * clearing tsk->mm.
1464 : */
1465 : smp_mb__after_spinlock();
1466 0 : sync_mm_rss(mm);
1467 : local_irq_disable();
1468 0 : tsk->mm = NULL;
1469 0 : membarrier_update_current_mm(NULL);
1470 : /* active_mm is still 'mm' */
1471 0 : enter_lazy_tlb(mm, tsk);
1472 : local_irq_enable();
1473 0 : task_unlock(tsk);
1474 0 : }
1475 : EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1476 :
1477 : #ifdef CONFIG_BLK_CGROUP
1478 : /**
1479 : * kthread_associate_blkcg - associate blkcg to current kthread
1480 : * @css: the cgroup info
1481 : *
1482 : * Current thread must be a kthread. The thread is running jobs on behalf of
1483 : * other threads. In some cases, we expect the jobs attach cgroup info of
1484 : * original threads instead of that of current thread. This function stores
1485 : * original thread's cgroup info in current kthread context for later
1486 : * retrieval.
1487 : */
1488 : void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1489 : {
1490 : struct kthread *kthread;
1491 :
1492 : if (!(current->flags & PF_KTHREAD))
1493 : return;
1494 : kthread = to_kthread(current);
1495 : if (!kthread)
1496 : return;
1497 :
1498 : if (kthread->blkcg_css) {
1499 : css_put(kthread->blkcg_css);
1500 : kthread->blkcg_css = NULL;
1501 : }
1502 : if (css) {
1503 : css_get(css);
1504 : kthread->blkcg_css = css;
1505 : }
1506 : }
1507 : EXPORT_SYMBOL(kthread_associate_blkcg);
1508 :
1509 : /**
1510 : * kthread_blkcg - get associated blkcg css of current kthread
1511 : *
1512 : * Current thread must be a kthread.
1513 : */
1514 : struct cgroup_subsys_state *kthread_blkcg(void)
1515 : {
1516 : struct kthread *kthread;
1517 :
1518 : if (current->flags & PF_KTHREAD) {
1519 : kthread = to_kthread(current);
1520 : if (kthread)
1521 : return kthread->blkcg_css;
1522 : }
1523 : return NULL;
1524 : }
1525 : EXPORT_SYMBOL(kthread_blkcg);
1526 : #endif
|