]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_KTHREAD_H |
3 | #define _LINUX_KTHREAD_H | |
4 | /* Simple interface for creating and stopping kernel threads without mess. */ | |
5 | #include <linux/err.h> | |
6 | #include <linux/sched.h> | |
7 | ||
b9075fa9 | 8 | __printf(4, 5) |
207205a2 ED |
9 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
10 | void *data, | |
11 | int node, | |
b9075fa9 | 12 | const char namefmt[], ...); |
207205a2 | 13 | |
e154ccc8 JC |
14 | /** |
15 | * kthread_create - create a kthread on the current node | |
16 | * @threadfn: the function to run in the thread | |
17 | * @data: data pointer for @threadfn() | |
18 | * @namefmt: printf-style format string for the thread name | |
d16977f3 | 19 | * @arg...: arguments for @namefmt. |
e154ccc8 JC |
20 | * |
21 | * This macro will create a kthread on the current node, leaving it in | |
22 | * the stopped state. This is just a helper for kthread_create_on_node(); | |
23 | * see the documentation there for more details. | |
24 | */ | |
207205a2 | 25 | #define kthread_create(threadfn, data, namefmt, arg...) \ |
e9f06986 | 26 | kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg) |
207205a2 | 27 | |
1da177e4 | 28 | |
2a1d4460 TG |
29 | struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), |
30 | void *data, | |
31 | unsigned int cpu, | |
32 | const char *namefmt); | |
33 | ||
1da177e4 | 34 | /** |
9e37bd30 | 35 | * kthread_run - create and wake a thread. |
1da177e4 LT |
36 | * @threadfn: the function to run until signal_pending(current). |
37 | * @data: data ptr for @threadfn. | |
38 | * @namefmt: printf-style name for the thread. | |
39 | * | |
40 | * Description: Convenient wrapper for kthread_create() followed by | |
9e37bd30 RD |
41 | * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM). |
42 | */ | |
1da177e4 LT |
43 | #define kthread_run(threadfn, data, namefmt, ...) \ |
44 | ({ \ | |
45 | struct task_struct *__k \ | |
46 | = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \ | |
47 | if (!IS_ERR(__k)) \ | |
48 | wake_up_process(__k); \ | |
49 | __k; \ | |
50 | }) | |
51 | ||
1da5c46f | 52 | void free_kthread_struct(struct task_struct *k); |
1da177e4 | 53 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
25834c73 | 54 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); |
1da177e4 | 55 | int kthread_stop(struct task_struct *k); |
2a1d4460 TG |
56 | bool kthread_should_stop(void); |
57 | bool kthread_should_park(void); | |
0121805d | 58 | bool __kthread_should_park(struct task_struct *k); |
8a32c441 | 59 | bool kthread_freezable_should_stop(bool *was_frozen); |
82805ab7 | 60 | void *kthread_data(struct task_struct *k); |
e700591a | 61 | void *kthread_probe_data(struct task_struct *k); |
2a1d4460 TG |
62 | int kthread_park(struct task_struct *k); |
63 | void kthread_unpark(struct task_struct *k); | |
64 | void kthread_parkme(void); | |
1da177e4 | 65 | |
73c27992 EB |
66 | int kthreadd(void *unused); |
67 | extern struct task_struct *kthreadd_task; | |
207205a2 | 68 | extern int tsk_fork_get_node(struct task_struct *tsk); |
73c27992 | 69 | |
b56c0d89 TH |
70 | /* |
71 | * Simple work processor based on kthread. | |
72 | * | |
73 | * This provides easier way to make use of kthreads. A kthread_work | |
3989144f | 74 | * can be queued and flushed using queue/kthread_flush_work() |
b56c0d89 TH |
75 | * respectively. Queued kthread_works are processed by a kthread |
76 | * running kthread_worker_fn(). | |
b56c0d89 TH |
77 | */ |
78 | struct kthread_work; | |
79 | typedef void (*kthread_work_func_t)(struct kthread_work *work); | |
fe5c3b69 | 80 | void kthread_delayed_work_timer_fn(struct timer_list *t); |
b56c0d89 | 81 | |
dbf52682 PM |
82 | enum { |
83 | KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ | |
84 | }; | |
85 | ||
b56c0d89 | 86 | struct kthread_worker { |
dbf52682 | 87 | unsigned int flags; |
fe99a4f4 | 88 | raw_spinlock_t lock; |
b56c0d89 | 89 | struct list_head work_list; |
22597dc3 | 90 | struct list_head delayed_work_list; |
b56c0d89 | 91 | struct task_struct *task; |
46f3d976 | 92 | struct kthread_work *current_work; |
b56c0d89 TH |
93 | }; |
94 | ||
95 | struct kthread_work { | |
96 | struct list_head node; | |
97 | kthread_work_func_t func; | |
46f3d976 | 98 | struct kthread_worker *worker; |
37be45d4 PM |
99 | /* Number of canceling calls that are running at the moment. */ |
100 | int canceling; | |
b56c0d89 TH |
101 | }; |
102 | ||
22597dc3 PM |
103 | struct kthread_delayed_work { |
104 | struct kthread_work work; | |
105 | struct timer_list timer; | |
106 | }; | |
107 | ||
b56c0d89 | 108 | #define KTHREAD_WORKER_INIT(worker) { \ |
fe99a4f4 | 109 | .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ |
b56c0d89 | 110 | .work_list = LIST_HEAD_INIT((worker).work_list), \ |
22597dc3 | 111 | .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ |
b56c0d89 TH |
112 | } |
113 | ||
114 | #define KTHREAD_WORK_INIT(work, fn) { \ | |
115 | .node = LIST_HEAD_INIT((work).node), \ | |
116 | .func = (fn), \ | |
b56c0d89 TH |
117 | } |
118 | ||
22597dc3 PM |
119 | #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ |
120 | .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ | |
841b86f3 | 121 | .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\ |
22597dc3 PM |
122 | TIMER_IRQSAFE), \ |
123 | } | |
124 | ||
b56c0d89 TH |
125 | #define DEFINE_KTHREAD_WORKER(worker) \ |
126 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) | |
127 | ||
128 | #define DEFINE_KTHREAD_WORK(work, fn) \ | |
129 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) | |
130 | ||
22597dc3 PM |
131 | #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \ |
132 | struct kthread_delayed_work dwork = \ | |
133 | KTHREAD_DELAYED_WORK_INIT(dwork, fn) | |
134 | ||
4f32e9b1 | 135 | /* |
95847e1b LJ |
136 | * kthread_worker.lock needs its own lockdep class key when defined on |
137 | * stack with lockdep enabled. Use the following macros in such cases. | |
4f32e9b1 YZ |
138 | */ |
139 | #ifdef CONFIG_LOCKDEP | |
140 | # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ | |
3989144f | 141 | ({ kthread_init_worker(&worker); worker; }) |
4f32e9b1 YZ |
142 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ |
143 | struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) | |
4f32e9b1 YZ |
144 | #else |
145 | # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) | |
4f32e9b1 YZ |
146 | #endif |
147 | ||
3989144f | 148 | extern void __kthread_init_worker(struct kthread_worker *worker, |
4f32e9b1 YZ |
149 | const char *name, struct lock_class_key *key); |
150 | ||
3989144f | 151 | #define kthread_init_worker(worker) \ |
4f32e9b1 YZ |
152 | do { \ |
153 | static struct lock_class_key __key; \ | |
3989144f | 154 | __kthread_init_worker((worker), "("#worker")->lock", &__key); \ |
4f32e9b1 YZ |
155 | } while (0) |
156 | ||
3989144f | 157 | #define kthread_init_work(work, fn) \ |
4f32e9b1 YZ |
158 | do { \ |
159 | memset((work), 0, sizeof(struct kthread_work)); \ | |
160 | INIT_LIST_HEAD(&(work)->node); \ | |
161 | (work)->func = (fn); \ | |
4f32e9b1 | 162 | } while (0) |
b56c0d89 | 163 | |
22597dc3 PM |
164 | #define kthread_init_delayed_work(dwork, fn) \ |
165 | do { \ | |
166 | kthread_init_work(&(dwork)->work, (fn)); \ | |
ad01423a | 167 | timer_setup(&(dwork)->timer, \ |
98c985d7 PM |
168 | kthread_delayed_work_timer_fn, \ |
169 | TIMER_IRQSAFE); \ | |
22597dc3 PM |
170 | } while (0) |
171 | ||
b56c0d89 TH |
172 | int kthread_worker_fn(void *worker_ptr); |
173 | ||
dbf52682 | 174 | __printf(2, 3) |
fbae2d44 | 175 | struct kthread_worker * |
dbf52682 | 176 | kthread_create_worker(unsigned int flags, const char namefmt[], ...); |
fbae2d44 | 177 | |
c0b942a7 | 178 | __printf(3, 4) struct kthread_worker * |
dbf52682 PM |
179 | kthread_create_worker_on_cpu(int cpu, unsigned int flags, |
180 | const char namefmt[], ...); | |
fbae2d44 | 181 | |
3989144f | 182 | bool kthread_queue_work(struct kthread_worker *worker, |
b56c0d89 | 183 | struct kthread_work *work); |
22597dc3 PM |
184 | |
185 | bool kthread_queue_delayed_work(struct kthread_worker *worker, | |
186 | struct kthread_delayed_work *dwork, | |
187 | unsigned long delay); | |
188 | ||
9a6b06c8 PM |
189 | bool kthread_mod_delayed_work(struct kthread_worker *worker, |
190 | struct kthread_delayed_work *dwork, | |
191 | unsigned long delay); | |
192 | ||
3989144f PM |
193 | void kthread_flush_work(struct kthread_work *work); |
194 | void kthread_flush_worker(struct kthread_worker *worker); | |
b56c0d89 | 195 | |
37be45d4 PM |
196 | bool kthread_cancel_work_sync(struct kthread_work *work); |
197 | bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); | |
198 | ||
35033fe9 PM |
199 | void kthread_destroy_worker(struct kthread_worker *worker); |
200 | ||
8af0c18a SB |
201 | struct cgroup_subsys_state; |
202 | ||
0b508bc9 | 203 | #ifdef CONFIG_BLK_CGROUP |
05e3db95 SL |
204 | void kthread_associate_blkcg(struct cgroup_subsys_state *css); |
205 | struct cgroup_subsys_state *kthread_blkcg(void); | |
206 | #else | |
207 | static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { } | |
208 | static inline struct cgroup_subsys_state *kthread_blkcg(void) | |
209 | { | |
210 | return NULL; | |
211 | } | |
212 | #endif | |
1da177e4 | 213 | #endif /* _LINUX_KTHREAD_H */ |