1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <trace/events/sched.h>
21 void __init_kthread_worker(struct kthread_worker *worker,
23 struct lock_class_key *key)
25 spin_lock_init(&worker->lock);
26 lockdep_set_class_and_name(&worker->lock, key, name);
27 INIT_LIST_HEAD(&worker->work_list);
30 EXPORT_SYMBOL_GPL(__init_kthread_worker);
33 * kthread_worker_fn - kthread function to process kthread_worker
34 * @worker_ptr: pointer to initialized kthread_worker
36 * This function can be used as @threadfn to kthread_create() or
37 * kthread_run() with @worker_ptr argument pointing to an initialized
38 * kthread_worker. The started kthread will process work_list until
39 * the it is stopped with kthread_stop(). A kthread can also call
40 * this function directly after extra initialization.
42 * Different kthreads can be used for the same kthread_worker as long
43 * as there's only one kthread attached to it at any given time. A
44 * kthread_worker without an attached kthread simply collects queued
47 int kthread_worker_fn(void *worker_ptr)
49 struct kthread_worker *worker = worker_ptr;
50 struct kthread_work *work;
52 WARN_ON(worker->task);
53 worker->task = current;
55 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
57 if (kthread_should_stop()) {
58 __set_current_state(TASK_RUNNING);
59 spin_lock_irq(&worker->lock);
61 spin_unlock_irq(&worker->lock);
66 spin_lock_irq(&worker->lock);
67 if (!list_empty(&worker->work_list)) {
68 work = list_first_entry(&worker->work_list,
69 struct kthread_work, node);
70 list_del_init(&work->node);
72 spin_unlock_irq(&worker->lock);
75 __set_current_state(TASK_RUNNING);
77 smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
78 work->done_seq = work->queue_seq;
79 smp_mb(); /* mb worker-b1 paired with flush-b0 */
80 if (atomic_read(&work->flushing))
81 wake_up_all(&work->done);
82 } else if (!freezing(current))
88 EXPORT_SYMBOL_GPL(kthread_worker_fn);
91 * queue_kthread_work - queue a kthread_work
92 * @worker: target kthread_worker
93 * @work: kthread_work to queue
95 * Queue @work to work processor @task for async execution. @task
96 * must have been created with kthread_worker_create(). Returns %true
97 * if @work was successfully queued, %false if it was already pending.
99 bool queue_kthread_work(struct kthread_worker *worker,
100 struct kthread_work *work)
105 spin_lock_irqsave(&worker->lock, flags);
106 if (list_empty(&work->node)) {
107 list_add_tail(&work->node, &worker->work_list);
109 if (likely(worker->task))
110 wake_up_process(worker->task);
113 spin_unlock_irqrestore(&worker->lock, flags);
116 EXPORT_SYMBOL_GPL(queue_kthread_work);
119 * flush_kthread_work - flush a kthread_work
120 * @work: work to flush
122 * If @work is queued or executing, wait for it to finish execution.
124 void flush_kthread_work(struct kthread_work *work)
126 int seq = work->queue_seq;
128 atomic_inc(&work->flushing);
131 * mb flush-b0 paired with worker-b1, to make sure either
132 * worker sees the above increment or we see done_seq update.
134 smp_mb__after_atomic_inc();
136 /* A - B <= 0 tests whether B is in front of A regardless of overflow */
137 wait_event(work->done, seq - work->done_seq <= 0);
138 atomic_dec(&work->flushing);
141 * rmb flush-b1 paired with worker-b0, to make sure our caller
142 * sees every change made by work->func().
144 smp_mb__after_atomic_dec();
146 EXPORT_SYMBOL_GPL(flush_kthread_work);
148 struct kthread_flush_work {
149 struct kthread_work work;
150 struct completion done;
153 static void kthread_flush_work_fn(struct kthread_work *work)
155 struct kthread_flush_work *fwork =
156 container_of(work, struct kthread_flush_work, work);
157 complete(&fwork->done);
161 * flush_kthread_worker - flush all current works on a kthread_worker
162 * @worker: worker to flush
164 * Wait until all currently executing or pending works on @worker are
167 void flush_kthread_worker(struct kthread_worker *worker)
169 struct kthread_flush_work fwork = {
170 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
171 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
174 queue_kthread_work(worker, &fwork.work);
175 wait_for_completion(&fwork.done);
177 EXPORT_SYMBOL_GPL(flush_kthread_worker);