]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_KTHREAD_H |
3 | #define _LINUX_KTHREAD_H | |
4 | /* Simple interface for creating and stopping kernel threads without mess. */ | |
5 | #include <linux/err.h> | |
6 | #include <linux/sched.h> | |
7 | ||
9bf5b9eb CH |
8 | struct mm_struct; |
9 | ||
b9075fa9 | 10 | __printf(4, 5) |
207205a2 ED |
11 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
12 | void *data, | |
13 | int node, | |
b9075fa9 | 14 | const char namefmt[], ...); |
207205a2 | 15 | |
e154ccc8 JC |
16 | /** |
17 | * kthread_create - create a kthread on the current node | |
18 | * @threadfn: the function to run in the thread | |
19 | * @data: data pointer for @threadfn() | |
20 | * @namefmt: printf-style format string for the thread name | |
d16977f3 | 21 | * @arg...: arguments for @namefmt. |
e154ccc8 JC |
22 | * |
23 | * This macro will create a kthread on the current node, leaving it in | |
24 | * the stopped state. This is just a helper for kthread_create_on_node(); | |
25 | * see the documentation there for more details. | |
26 | */ | |
207205a2 | 27 | #define kthread_create(threadfn, data, namefmt, arg...) \ |
e9f06986 | 28 | kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) |
207205a2 | 29 | |
1da177e4 | 30 | |
2a1d4460 TG |
31 | struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), |
32 | void *data, | |
33 | unsigned int cpu, | |
34 | const char *namefmt); | |
35 | ||
ac687e6e PZ |
36 | void kthread_set_per_cpu(struct task_struct *k, int cpu); |
37 | bool kthread_is_per_cpu(struct task_struct *k); | |
38 | ||
1da177e4 | 39 | /** |
9e37bd30 | 40 | * kthread_run - create and wake a thread. |
1da177e4 LT |
41 | * @threadfn: the function to run until signal_pending(current). |
42 | * @data: data ptr for @threadfn. | |
43 | * @namefmt: printf-style name for the thread. | |
44 | * | |
45 | * Description: Convenient wrapper for kthread_create() followed by | |
9e37bd30 RD |
46 | * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). |
47 | */ | |
1da177e4 LT |
48 | #define kthread_run(threadfn, data, namefmt, ...) \ |
49 | ({ \ | |
50 | struct task_struct *__k \ | |
51 | = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ | |
52 | if (!IS_ERR(__k)) \ | |
53 | wake_up_process(__k); \ | |
54 | __k; \ | |
55 | }) | |
56 | ||
1da5c46f | 57 | void free_kthread_struct(struct task_struct *k); |
1da177e4 | 58 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
25834c73 | 59 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); |
1da177e4 | 60 | int kthread_stop(struct task_struct *k); |
2a1d4460 TG |
61 | bool kthread_should_stop(void); |
62 | bool kthread_should_park(void); | |
0121805d | 63 | bool __kthread_should_park(struct task_struct *k); |
8a32c441 | 64 | bool kthread_freezable_should_stop(bool *was_frozen); |
52782c92 | 65 | void *kthread_func(struct task_struct *k); |
82805ab7 | 66 | void *kthread_data(struct task_struct *k); |
e700591a | 67 | void *kthread_probe_data(struct task_struct *k); |
2a1d4460 TG |
68 | int kthread_park(struct task_struct *k); |
69 | void kthread_unpark(struct task_struct *k); | |
70 | void kthread_parkme(void); | |
1da177e4 | 71 | |
73c27992 EB |
72 | int kthreadd(void *unused); |
73 | extern struct task_struct *kthreadd_task; | |
207205a2 | 74 | extern int tsk_fork_get_node(struct task_struct *tsk); |
73c27992 | 75 | |
b56c0d89 TH |
76 | /* |
77 | * Simple work processor based on kthread. | |
78 | * | |
79 | * This provides easier way to make use of kthreads. A kthread_work | |
3989144f | 80 | * can be queued and flushed using queue/kthread_flush_work() |
b56c0d89 TH |
81 | * respectively. Queued kthread_works are processed by a kthread |
82 | * running kthread_worker_fn(). | |
b56c0d89 TH |
83 | */ |
84 | struct kthread_work; | |
85 | typedef void (*kthread_work_func_t)(struct kthread_work *work); | |
fe5c3b69 | 86 | void kthread_delayed_work_timer_fn(struct timer_list *t); |
b56c0d89 | 87 | |
dbf52682 PM |
88 | enum { |
89 | KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ | |
90 | }; | |
91 | ||
b56c0d89 | 92 | struct kthread_worker { |
dbf52682 | 93 | unsigned int flags; |
fe99a4f4 | 94 | raw_spinlock_t lock; |
b56c0d89 | 95 | struct list_head work_list; |
22597dc3 | 96 | struct list_head delayed_work_list; |
b56c0d89 | 97 | struct task_struct *task; |
46f3d976 | 98 | struct kthread_work *current_work; |
b56c0d89 TH |
99 | }; |
100 | ||
101 | struct kthread_work { | |
102 | struct list_head node; | |
103 | kthread_work_func_t func; | |
46f3d976 | 104 | struct kthread_worker *worker; |
37be45d4 PM |
105 | /* Number of canceling calls that are running at the moment. */ |
106 | int canceling; | |
b56c0d89 TH |
107 | }; |
108 | ||
22597dc3 PM |
109 | struct kthread_delayed_work { |
110 | struct kthread_work work; | |
111 | struct timer_list timer; | |
112 | }; | |
113 | ||
b56c0d89 | 114 | #define KTHREAD_WORKER_INIT(worker) { \ |
fe99a4f4 | 115 | .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ |
b56c0d89 | 116 | .work_list = LIST_HEAD_INIT((worker).work_list), \ |
22597dc3 | 117 | .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ |
b56c0d89 TH |
118 | } |
119 | ||
120 | #define KTHREAD_WORK_INIT(work, fn) { \ | |
121 | .node = LIST_HEAD_INIT((work).node), \ | |
122 | .func = (fn), \ | |
b56c0d89 TH |
123 | } |
124 | ||
22597dc3 PM |
125 | #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ |
126 | .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ | |
841b86f3 | 127 | .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ |
22597dc3 PM |
128 | TIMER_IRQSAFE), \ |
129 | } | |
130 | ||
b56c0d89 TH |
131 | #define DEFINE_KTHREAD_WORKER(worker) \ |
132 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) | |
133 | ||
134 | #define DEFINE_KTHREAD_WORK(work, fn) \ | |
135 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) | |
136 | ||
22597dc3 PM |
137 | #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ |
138 | struct kthread_delayed_work dwork = \ | |
139 | KTHREAD_DELAYED_WORK_INIT(dwork, fn) | |
140 | ||
4f32e9b1 | 141 | /* |
95847e1b LJ |
142 | * kthread_worker.lock needs its own lockdep class key when defined on |
143 | * stack with lockdep enabled. Use the following macros in such cases. | |
4f32e9b1 YZ |
144 | */ |
145 | #ifdef CONFIG_LOCKDEP | |
146 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ | |
3989144f | 147 | ({ kthread_init_worker(&worker); worker; }) |
4f32e9b1 YZ |
148 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ |
149 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) | |
4f32e9b1 YZ |
150 | #else |
151 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) | |
4f32e9b1 YZ |
152 | #endif |
153 | ||
3989144f | 154 | extern void __kthread_init_worker(struct kthread_worker *worker, |
4f32e9b1 YZ |
155 | const char *name, struct lock_class_key *key); |
156 | ||
3989144f | 157 | #define kthread_init_worker(worker) \ |
4f32e9b1 YZ |
158 | do { \ |
159 | static struct lock_class_key __key; \ | |
3989144f | 160 | __kthread_init_worker((worker), "("#worker")->lock", &__key); \ |
4f32e9b1 YZ |
161 | } while (0) |
162 | ||
3989144f | 163 | #define kthread_init_work(work, fn) \ |
4f32e9b1 YZ |
164 | do { \ |
165 | memset((work), 0, sizeof(struct kthread_work)); \ | |
166 | INIT_LIST_HEAD(&(work)->node); \ | |
167 | (work)->func = (fn); \ | |
4f32e9b1 | 168 | } while (0) |
b56c0d89 | 169 | |
22597dc3 PM |
170 | #define kthread_init_delayed_work(dwork, fn) \ |
171 | do { \ | |
172 | kthread_init_work(&(dwork)->work, (fn)); \ | |
ad01423a | 173 | timer_setup(&(dwork)->timer, \ |
98c985d7 PM |
174 | kthread_delayed_work_timer_fn, \ |
175 | TIMER_IRQSAFE); \ | |
22597dc3 PM |
176 | } while (0) |
177 | ||
b56c0d89 TH |
178 | int kthread_worker_fn(void *worker_ptr); |
179 | ||
dbf52682 | 180 | __printf(2, 3) |
fbae2d44 | 181 | struct kthread_worker * |
dbf52682 | 182 | kthread_create_worker(unsigned int flags, const char namefmt[], ...); |
fbae2d44 | 183 | |
c0b942a7 | 184 | __printf(3, 4) struct kthread_worker * |
dbf52682 PM |
185 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, |
186 | const char namefmt[], ...); | |
fbae2d44 | 187 | |
3989144f | 188 | bool kthread_queue_work(struct kthread_worker *worker, |
b56c0d89 | 189 | struct kthread_work *work); |
22597dc3 PM |
190 | |
191 | bool kthread_queue_delayed_work(struct kthread_worker *worker, | |
192 | struct kthread_delayed_work *dwork, | |
193 | unsigned long delay); | |
194 | ||
9a6b06c8 PM |
195 | bool kthread_mod_delayed_work(struct kthread_worker *worker, |
196 | struct kthread_delayed_work *dwork, | |
197 | unsigned long delay); | |
198 | ||
3989144f PM |
199 | void kthread_flush_work(struct kthread_work *work); |
200 | void kthread_flush_worker(struct kthread_worker *worker); | |
b56c0d89 | 201 | |
37be45d4 PM |
202 | bool kthread_cancel_work_sync(struct kthread_work *work); |
203 | bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); | |
204 | ||
35033fe9 PM |
205 | void kthread_destroy_worker(struct kthread_worker *worker); |
206 | ||
f5678e7f CH |
207 | void kthread_use_mm(struct mm_struct *mm); |
208 | void kthread_unuse_mm(struct mm_struct *mm); | |
9bf5b9eb | 209 | |
8af0c18a SB |
210 | struct cgroup_subsys_state; |
211 | ||
0b508bc9 | 212 | #ifdef CONFIG_BLK_CGROUP |
05e3db95 SL |
213 | void kthread_associate_blkcg(struct cgroup_subsys_state *css); |
214 | struct cgroup_subsys_state *kthread_blkcg(void); | |
215 | #else | |
216 | static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } | |
217 | static inline struct cgroup_subsys_state *kthread_blkcg(void) | |
218 | { | |
219 | return NULL; | |
220 | } | |
221 | #endif | |
1da177e4 | 222 | #endif /* _LINUX_KTHREAD_H */ |