]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/kthread.c
Merge tag 'block-5.13-2021-06-03' of git://git.kernel.dk/linux-block
[mirror_ubuntu-jammy-kernel.git] / kernel / kthread.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10 #include <uapi/linux/sched/types.h>
11 #include <linux/mm.h>
12 #include <linux/mmu_context.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/kthread.h>
17 #include <linux/completion.h>
18 #include <linux/err.h>
19 #include <linux/cgroup.h>
20 #include <linux/cpuset.h>
21 #include <linux/unistd.h>
22 #include <linux/file.h>
23 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/freezer.h>
27 #include <linux/ptrace.h>
28 #include <linux/uaccess.h>
29 #include <linux/numa.h>
30 #include <linux/sched/isolation.h>
31 #include <trace/events/sched.h>
32
33
34 static DEFINE_SPINLOCK(kthread_create_lock);
35 static LIST_HEAD(kthread_create_list);
36 struct task_struct *kthreadd_task;
37
38 struct kthread_create_info
39 {
40 /* Information passed to kthread() from kthreadd. */
41 int (*threadfn)(void *data);
42 void *data;
43 int node;
44
45 /* Result passed back to kthread_create() from kthreadd. */
46 struct task_struct *result;
47 struct completion *done;
48
49 struct list_head list;
50 };
51
52 struct kthread {
53 unsigned long flags;
54 unsigned int cpu;
55 int (*threadfn)(void *);
56 void *data;
57 mm_segment_t oldfs;
58 struct completion parked;
59 struct completion exited;
60 #ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
62 #endif
63 };
64
65 enum KTHREAD_BITS {
66 KTHREAD_IS_PER_CPU = 0,
67 KTHREAD_SHOULD_STOP,
68 KTHREAD_SHOULD_PARK,
69 };
70
71 static inline void set_kthread_struct(void *kthread)
72 {
73 /*
74 * We abuse ->set_child_tid to avoid the new member and because it
75 * can't be wrongly copied by copy_process(). We also rely on fact
76 * that the caller can't exec, so PF_KTHREAD can't be cleared.
77 */
78 current->set_child_tid = (__force void __user *)kthread;
79 }
80
81 static inline struct kthread *to_kthread(struct task_struct *k)
82 {
83 WARN_ON(!(k->flags & PF_KTHREAD));
84 return (__force void *)k->set_child_tid;
85 }
86
87 /*
88 * Variant of to_kthread() that doesn't assume @p is a kthread.
89 *
90 * Per construction; when:
91 *
92 * (p->flags & PF_KTHREAD) && p->set_child_tid
93 *
94 * the task is both a kthread and struct kthread is persistent. However
95 * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
96 * begin_new_exec()).
97 */
98 static inline struct kthread *__to_kthread(struct task_struct *p)
99 {
100 void *kthread = (__force void *)p->set_child_tid;
101 if (kthread && !(p->flags & PF_KTHREAD))
102 kthread = NULL;
103 return kthread;
104 }
105
106 void free_kthread_struct(struct task_struct *k)
107 {
108 struct kthread *kthread;
109
110 /*
111 * Can be NULL if this kthread was created by kernel_thread()
112 * or if kmalloc() in kthread() failed.
113 */
114 kthread = to_kthread(k);
115 #ifdef CONFIG_BLK_CGROUP
116 WARN_ON_ONCE(kthread && kthread->blkcg_css);
117 #endif
118 kfree(kthread);
119 }
120
121 /**
122 * kthread_should_stop - should this kthread return now?
123 *
124 * When someone calls kthread_stop() on your kthread, it will be woken
125 * and this will return true. You should then return, and your return
126 * value will be passed through to kthread_stop().
127 */
128 bool kthread_should_stop(void)
129 {
130 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
131 }
132 EXPORT_SYMBOL(kthread_should_stop);
133
134 bool __kthread_should_park(struct task_struct *k)
135 {
136 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
137 }
138 EXPORT_SYMBOL_GPL(__kthread_should_park);
139
140 /**
141 * kthread_should_park - should this kthread park now?
142 *
143 * When someone calls kthread_park() on your kthread, it will be woken
144 * and this will return true. You should then do the necessary
145 * cleanup and call kthread_parkme()
146 *
147 * Similar to kthread_should_stop(), but this keeps the thread alive
148 * and in a park position. kthread_unpark() "restarts" the thread and
149 * calls the thread function again.
150 */
151 bool kthread_should_park(void)
152 {
153 return __kthread_should_park(current);
154 }
155 EXPORT_SYMBOL_GPL(kthread_should_park);
156
157 /**
158 * kthread_freezable_should_stop - should this freezable kthread return now?
159 * @was_frozen: optional out parameter, indicates whether %current was frozen
160 *
161 * kthread_should_stop() for freezable kthreads, which will enter
162 * refrigerator if necessary. This function is safe from kthread_stop() /
163 * freezer deadlock and freezable kthreads should use this function instead
164 * of calling try_to_freeze() directly.
165 */
166 bool kthread_freezable_should_stop(bool *was_frozen)
167 {
168 bool frozen = false;
169
170 might_sleep();
171
172 if (unlikely(freezing(current)))
173 frozen = __refrigerator(true);
174
175 if (was_frozen)
176 *was_frozen = frozen;
177
178 return kthread_should_stop();
179 }
180 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
181
182 /**
183 * kthread_func - return the function specified on kthread creation
184 * @task: kthread task in question
185 *
186 * Returns NULL if the task is not a kthread.
187 */
188 void *kthread_func(struct task_struct *task)
189 {
190 struct kthread *kthread = __to_kthread(task);
191 if (kthread)
192 return kthread->threadfn;
193 return NULL;
194 }
195 EXPORT_SYMBOL_GPL(kthread_func);
196
197 /**
198 * kthread_data - return data value specified on kthread creation
199 * @task: kthread task in question
200 *
201 * Return the data value specified when kthread @task was created.
202 * The caller is responsible for ensuring the validity of @task when
203 * calling this function.
204 */
205 void *kthread_data(struct task_struct *task)
206 {
207 return to_kthread(task)->data;
208 }
209 EXPORT_SYMBOL_GPL(kthread_data);
210
211 /**
212 * kthread_probe_data - speculative version of kthread_data()
213 * @task: possible kthread task in question
214 *
215 * @task could be a kthread task. Return the data value specified when it
216 * was created if accessible. If @task isn't a kthread task or its data is
217 * inaccessible for any reason, %NULL is returned. This function requires
218 * that @task itself is safe to dereference.
219 */
220 void *kthread_probe_data(struct task_struct *task)
221 {
222 struct kthread *kthread = __to_kthread(task);
223 void *data = NULL;
224
225 if (kthread)
226 copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
227 return data;
228 }
229
230 static void __kthread_parkme(struct kthread *self)
231 {
232 for (;;) {
233 /*
234 * TASK_PARKED is a special state; we must serialize against
235 * possible pending wakeups to avoid store-store collisions on
236 * task->state.
237 *
238 * Such a collision might possibly result in the task state
239 * changin from TASK_PARKED and us failing the
240 * wait_task_inactive() in kthread_park().
241 */
242 set_special_state(TASK_PARKED);
243 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
244 break;
245
246 /*
247 * Thread is going to call schedule(), do not preempt it,
248 * or the caller of kthread_park() may spend more time in
249 * wait_task_inactive().
250 */
251 preempt_disable();
252 complete(&self->parked);
253 schedule_preempt_disabled();
254 preempt_enable();
255 }
256 __set_current_state(TASK_RUNNING);
257 }
258
259 void kthread_parkme(void)
260 {
261 __kthread_parkme(to_kthread(current));
262 }
263 EXPORT_SYMBOL_GPL(kthread_parkme);
264
265 static int kthread(void *_create)
266 {
267 /* Copy data: it's on kthread's stack */
268 struct kthread_create_info *create = _create;
269 int (*threadfn)(void *data) = create->threadfn;
270 void *data = create->data;
271 struct completion *done;
272 struct kthread *self;
273 int ret;
274
275 self = kzalloc(sizeof(*self), GFP_KERNEL);
276 set_kthread_struct(self);
277
278 /* If user was SIGKILLed, I release the structure. */
279 done = xchg(&create->done, NULL);
280 if (!done) {
281 kfree(create);
282 do_exit(-EINTR);
283 }
284
285 if (!self) {
286 create->result = ERR_PTR(-ENOMEM);
287 complete(done);
288 do_exit(-ENOMEM);
289 }
290
291 self->threadfn = threadfn;
292 self->data = data;
293 init_completion(&self->exited);
294 init_completion(&self->parked);
295 current->vfork_done = &self->exited;
296
297 /* OK, tell user we're spawned, wait for stop or wakeup */
298 __set_current_state(TASK_UNINTERRUPTIBLE);
299 create->result = current;
300 /*
301 * Thread is going to call schedule(), do not preempt it,
302 * or the creator may spend more time in wait_task_inactive().
303 */
304 preempt_disable();
305 complete(done);
306 schedule_preempt_disabled();
307 preempt_enable();
308
309 ret = -EINTR;
310 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
311 cgroup_kthread_ready();
312 __kthread_parkme(self);
313 ret = threadfn(data);
314 }
315 do_exit(ret);
316 }
317
318 /* called from kernel_clone() to get node information for about to be created task */
319 int tsk_fork_get_node(struct task_struct *tsk)
320 {
321 #ifdef CONFIG_NUMA
322 if (tsk == kthreadd_task)
323 return tsk->pref_node_fork;
324 #endif
325 return NUMA_NO_NODE;
326 }
327
328 static void create_kthread(struct kthread_create_info *create)
329 {
330 int pid;
331
332 #ifdef CONFIG_NUMA
333 current->pref_node_fork = create->node;
334 #endif
335 /* We want our own signal handler (we take no signals by default). */
336 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
337 if (pid < 0) {
338 /* If user was SIGKILLed, I release the structure. */
339 struct completion *done = xchg(&create->done, NULL);
340
341 if (!done) {
342 kfree(create);
343 return;
344 }
345 create->result = ERR_PTR(pid);
346 complete(done);
347 }
348 }
349
350 static __printf(4, 0)
351 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
352 void *data, int node,
353 const char namefmt[],
354 va_list args)
355 {
356 DECLARE_COMPLETION_ONSTACK(done);
357 struct task_struct *task;
358 struct kthread_create_info *create = kmalloc(sizeof(*create),
359 GFP_KERNEL);
360
361 if (!create)
362 return ERR_PTR(-ENOMEM);
363 create->threadfn = threadfn;
364 create->data = data;
365 create->node = node;
366 create->done = &done;
367
368 spin_lock(&kthread_create_lock);
369 list_add_tail(&create->list, &kthread_create_list);
370 spin_unlock(&kthread_create_lock);
371
372 wake_up_process(kthreadd_task);
373 /*
374 * Wait for completion in killable state, for I might be chosen by
375 * the OOM killer while kthreadd is trying to allocate memory for
376 * new kernel thread.
377 */
378 if (unlikely(wait_for_completion_killable(&done))) {
379 /*
380 * If I was SIGKILLed before kthreadd (or new kernel thread)
381 * calls complete(), leave the cleanup of this structure to
382 * that thread.
383 */
384 if (xchg(&create->done, NULL))
385 return ERR_PTR(-EINTR);
386 /*
387 * kthreadd (or new kernel thread) will call complete()
388 * shortly.
389 */
390 wait_for_completion(&done);
391 }
392 task = create->result;
393 if (!IS_ERR(task)) {
394 static const struct sched_param param = { .sched_priority = 0 };
395 char name[TASK_COMM_LEN];
396
397 /*
398 * task is already visible to other tasks, so updating
399 * COMM must be protected.
400 */
401 vsnprintf(name, sizeof(name), namefmt, args);
402 set_task_comm(task, name);
403 /*
404 * root may have changed our (kthreadd's) priority or CPU mask.
405 * The kernel thread should not inherit these properties.
406 */
407 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
408 set_cpus_allowed_ptr(task,
409 housekeeping_cpumask(HK_FLAG_KTHREAD));
410 }
411 kfree(create);
412 return task;
413 }
414
415 /**
416 * kthread_create_on_node - create a kthread.
417 * @threadfn: the function to run until signal_pending(current).
418 * @data: data ptr for @threadfn.
419 * @node: task and thread structures for the thread are allocated on this node
420 * @namefmt: printf-style name for the thread.
421 *
422 * Description: This helper function creates and names a kernel
423 * thread. The thread will be stopped: use wake_up_process() to start
424 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
425 * is affine to all CPUs.
426 *
427 * If thread is going to be bound on a particular cpu, give its node
428 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
429 * When woken, the thread will run @threadfn() with @data as its
430 * argument. @threadfn() can either call do_exit() directly if it is a
431 * standalone thread for which no one will call kthread_stop(), or
432 * return when 'kthread_should_stop()' is true (which means
433 * kthread_stop() has been called). The return value should be zero
434 * or a negative error number; it will be passed to kthread_stop().
435 *
436 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
437 */
438 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
439 void *data, int node,
440 const char namefmt[],
441 ...)
442 {
443 struct task_struct *task;
444 va_list args;
445
446 va_start(args, namefmt);
447 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
448 va_end(args);
449
450 return task;
451 }
452 EXPORT_SYMBOL(kthread_create_on_node);
453
454 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
455 {
456 unsigned long flags;
457
458 if (!wait_task_inactive(p, state)) {
459 WARN_ON(1);
460 return;
461 }
462
463 /* It's safe because the task is inactive. */
464 raw_spin_lock_irqsave(&p->pi_lock, flags);
465 do_set_cpus_allowed(p, mask);
466 p->flags |= PF_NO_SETAFFINITY;
467 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
468 }
469
470 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
471 {
472 __kthread_bind_mask(p, cpumask_of(cpu), state);
473 }
474
475 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
476 {
477 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
478 }
479
480 /**
481 * kthread_bind - bind a just-created kthread to a cpu.
482 * @p: thread created by kthread_create().
483 * @cpu: cpu (might not be online, must be possible) for @k to run on.
484 *
485 * Description: This function is equivalent to set_cpus_allowed(),
486 * except that @cpu doesn't need to be online, and the thread must be
487 * stopped (i.e., just returned from kthread_create()).
488 */
489 void kthread_bind(struct task_struct *p, unsigned int cpu)
490 {
491 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
492 }
493 EXPORT_SYMBOL(kthread_bind);
494
495 /**
496 * kthread_create_on_cpu - Create a cpu bound kthread
497 * @threadfn: the function to run until signal_pending(current).
498 * @data: data ptr for @threadfn.
499 * @cpu: The cpu on which the thread should be bound,
500 * @namefmt: printf-style name for the thread. Format is restricted
501 * to "name.*%u". Code fills in cpu number.
502 *
503 * Description: This helper function creates and names a kernel thread
504 */
505 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
506 void *data, unsigned int cpu,
507 const char *namefmt)
508 {
509 struct task_struct *p;
510
511 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
512 cpu);
513 if (IS_ERR(p))
514 return p;
515 kthread_bind(p, cpu);
516 /* CPU hotplug need to bind once again when unparking the thread. */
517 to_kthread(p)->cpu = cpu;
518 return p;
519 }
520
521 void kthread_set_per_cpu(struct task_struct *k, int cpu)
522 {
523 struct kthread *kthread = to_kthread(k);
524 if (!kthread)
525 return;
526
527 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
528
529 if (cpu < 0) {
530 clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
531 return;
532 }
533
534 kthread->cpu = cpu;
535 set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
536 }
537
538 bool kthread_is_per_cpu(struct task_struct *p)
539 {
540 struct kthread *kthread = __to_kthread(p);
541 if (!kthread)
542 return false;
543
544 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
545 }
546
547 /**
548 * kthread_unpark - unpark a thread created by kthread_create().
549 * @k: thread created by kthread_create().
550 *
551 * Sets kthread_should_park() for @k to return false, wakes it, and
552 * waits for it to return. If the thread is marked percpu then its
553 * bound to the cpu again.
554 */
555 void kthread_unpark(struct task_struct *k)
556 {
557 struct kthread *kthread = to_kthread(k);
558
559 /*
560 * Newly created kthread was parked when the CPU was offline.
561 * The binding was lost and we need to set it again.
562 */
563 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
564 __kthread_bind(k, kthread->cpu, TASK_PARKED);
565
566 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
567 /*
568 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
569 */
570 wake_up_state(k, TASK_PARKED);
571 }
572 EXPORT_SYMBOL_GPL(kthread_unpark);
573
574 /**
575 * kthread_park - park a thread created by kthread_create().
576 * @k: thread created by kthread_create().
577 *
578 * Sets kthread_should_park() for @k to return true, wakes it, and
579 * waits for it to return. This can also be called after kthread_create()
580 * instead of calling wake_up_process(): the thread will park without
581 * calling threadfn().
582 *
583 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
584 * If called by the kthread itself just the park bit is set.
585 */
586 int kthread_park(struct task_struct *k)
587 {
588 struct kthread *kthread = to_kthread(k);
589
590 if (WARN_ON(k->flags & PF_EXITING))
591 return -ENOSYS;
592
593 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
594 return -EBUSY;
595
596 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
597 if (k != current) {
598 wake_up_process(k);
599 /*
600 * Wait for __kthread_parkme() to complete(), this means we
601 * _will_ have TASK_PARKED and are about to call schedule().
602 */
603 wait_for_completion(&kthread->parked);
604 /*
605 * Now wait for that schedule() to complete and the task to
606 * get scheduled out.
607 */
608 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
609 }
610
611 return 0;
612 }
613 EXPORT_SYMBOL_GPL(kthread_park);
614
615 /**
616 * kthread_stop - stop a thread created by kthread_create().
617 * @k: thread created by kthread_create().
618 *
619 * Sets kthread_should_stop() for @k to return true, wakes it, and
620 * waits for it to exit. This can also be called after kthread_create()
621 * instead of calling wake_up_process(): the thread will exit without
622 * calling threadfn().
623 *
624 * If threadfn() may call do_exit() itself, the caller must ensure
625 * task_struct can't go away.
626 *
627 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
628 * was never called.
629 */
630 int kthread_stop(struct task_struct *k)
631 {
632 struct kthread *kthread;
633 int ret;
634
635 trace_sched_kthread_stop(k);
636
637 get_task_struct(k);
638 kthread = to_kthread(k);
639 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
640 kthread_unpark(k);
641 wake_up_process(k);
642 wait_for_completion(&kthread->exited);
643 ret = k->exit_code;
644 put_task_struct(k);
645
646 trace_sched_kthread_stop_ret(ret);
647 return ret;
648 }
649 EXPORT_SYMBOL(kthread_stop);
650
651 int kthreadd(void *unused)
652 {
653 struct task_struct *tsk = current;
654
655 /* Setup a clean context for our children to inherit. */
656 set_task_comm(tsk, "kthreadd");
657 ignore_signals(tsk);
658 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
659 set_mems_allowed(node_states[N_MEMORY]);
660
661 current->flags |= PF_NOFREEZE;
662 cgroup_init_kthreadd();
663
664 for (;;) {
665 set_current_state(TASK_INTERRUPTIBLE);
666 if (list_empty(&kthread_create_list))
667 schedule();
668 __set_current_state(TASK_RUNNING);
669
670 spin_lock(&kthread_create_lock);
671 while (!list_empty(&kthread_create_list)) {
672 struct kthread_create_info *create;
673
674 create = list_entry(kthread_create_list.next,
675 struct kthread_create_info, list);
676 list_del_init(&create->list);
677 spin_unlock(&kthread_create_lock);
678
679 create_kthread(create);
680
681 spin_lock(&kthread_create_lock);
682 }
683 spin_unlock(&kthread_create_lock);
684 }
685
686 return 0;
687 }
688
689 void __kthread_init_worker(struct kthread_worker *worker,
690 const char *name,
691 struct lock_class_key *key)
692 {
693 memset(worker, 0, sizeof(struct kthread_worker));
694 raw_spin_lock_init(&worker->lock);
695 lockdep_set_class_and_name(&worker->lock, key, name);
696 INIT_LIST_HEAD(&worker->work_list);
697 INIT_LIST_HEAD(&worker->delayed_work_list);
698 }
699 EXPORT_SYMBOL_GPL(__kthread_init_worker);
700
701 /**
702 * kthread_worker_fn - kthread function to process kthread_worker
703 * @worker_ptr: pointer to initialized kthread_worker
704 *
705 * This function implements the main cycle of kthread worker. It processes
706 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
707 * is empty.
708 *
709 * The works are not allowed to keep any locks, disable preemption or interrupts
710 * when they finish. There is defined a safe point for freezing when one work
711 * finishes and before a new one is started.
712 *
713 * Also the works must not be handled by more than one worker at the same time,
714 * see also kthread_queue_work().
715 */
716 int kthread_worker_fn(void *worker_ptr)
717 {
718 struct kthread_worker *worker = worker_ptr;
719 struct kthread_work *work;
720
721 /*
722 * FIXME: Update the check and remove the assignment when all kthread
723 * worker users are created using kthread_create_worker*() functions.
724 */
725 WARN_ON(worker->task && worker->task != current);
726 worker->task = current;
727
728 if (worker->flags & KTW_FREEZABLE)
729 set_freezable();
730
731 repeat:
732 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
733
734 if (kthread_should_stop()) {
735 __set_current_state(TASK_RUNNING);
736 raw_spin_lock_irq(&worker->lock);
737 worker->task = NULL;
738 raw_spin_unlock_irq(&worker->lock);
739 return 0;
740 }
741
742 work = NULL;
743 raw_spin_lock_irq(&worker->lock);
744 if (!list_empty(&worker->work_list)) {
745 work = list_first_entry(&worker->work_list,
746 struct kthread_work, node);
747 list_del_init(&work->node);
748 }
749 worker->current_work = work;
750 raw_spin_unlock_irq(&worker->lock);
751
752 if (work) {
753 kthread_work_func_t func = work->func;
754 __set_current_state(TASK_RUNNING);
755 trace_sched_kthread_work_execute_start(work);
756 work->func(work);
757 /*
758 * Avoid dereferencing work after this point. The trace
759 * event only cares about the address.
760 */
761 trace_sched_kthread_work_execute_end(work, func);
762 } else if (!freezing(current))
763 schedule();
764
765 try_to_freeze();
766 cond_resched();
767 goto repeat;
768 }
769 EXPORT_SYMBOL_GPL(kthread_worker_fn);
770
771 static __printf(3, 0) struct kthread_worker *
772 __kthread_create_worker(int cpu, unsigned int flags,
773 const char namefmt[], va_list args)
774 {
775 struct kthread_worker *worker;
776 struct task_struct *task;
777 int node = NUMA_NO_NODE;
778
779 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
780 if (!worker)
781 return ERR_PTR(-ENOMEM);
782
783 kthread_init_worker(worker);
784
785 if (cpu >= 0)
786 node = cpu_to_node(cpu);
787
788 task = __kthread_create_on_node(kthread_worker_fn, worker,
789 node, namefmt, args);
790 if (IS_ERR(task))
791 goto fail_task;
792
793 if (cpu >= 0)
794 kthread_bind(task, cpu);
795
796 worker->flags = flags;
797 worker->task = task;
798 wake_up_process(task);
799 return worker;
800
801 fail_task:
802 kfree(worker);
803 return ERR_CAST(task);
804 }
805
806 /**
807 * kthread_create_worker - create a kthread worker
808 * @flags: flags modifying the default behavior of the worker
809 * @namefmt: printf-style name for the kthread worker (task).
810 *
811 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
812 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
813 * when the worker was SIGKILLed.
814 */
815 struct kthread_worker *
816 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
817 {
818 struct kthread_worker *worker;
819 va_list args;
820
821 va_start(args, namefmt);
822 worker = __kthread_create_worker(-1, flags, namefmt, args);
823 va_end(args);
824
825 return worker;
826 }
827 EXPORT_SYMBOL(kthread_create_worker);
828
829 /**
830 * kthread_create_worker_on_cpu - create a kthread worker and bind it
831 * to a given CPU and the associated NUMA node.
832 * @cpu: CPU number
833 * @flags: flags modifying the default behavior of the worker
834 * @namefmt: printf-style name for the kthread worker (task).
835 *
836 * Use a valid CPU number if you want to bind the kthread worker
837 * to the given CPU and the associated NUMA node.
838 *
839 * A good practice is to add the cpu number also into the worker name.
840 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
841 *
842 * CPU hotplug:
843 * The kthread worker API is simple and generic. It just provides a way
844 * to create, use, and destroy workers.
845 *
846 * It is up to the API user how to handle CPU hotplug. They have to decide
847 * how to handle pending work items, prevent queuing new ones, and
848 * restore the functionality when the CPU goes off and on. There are a
849 * few catches:
850 *
851 * - CPU affinity gets lost when it is scheduled on an offline CPU.
852 *
853 * - The worker might not exist when the CPU was off when the user
854 * created the workers.
855 *
856 * Good practice is to implement two CPU hotplug callbacks and to
857 * destroy/create the worker when the CPU goes down/up.
858 *
859 * Return:
860 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
861 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
862 * when the worker was SIGKILLed.
863 */
864 struct kthread_worker *
865 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
866 const char namefmt[], ...)
867 {
868 struct kthread_worker *worker;
869 va_list args;
870
871 va_start(args, namefmt);
872 worker = __kthread_create_worker(cpu, flags, namefmt, args);
873 va_end(args);
874
875 return worker;
876 }
877 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
878
879 /*
880 * Returns true when the work could not be queued at the moment.
881 * It happens when it is already pending in a worker list
882 * or when it is being cancelled.
883 */
884 static inline bool queuing_blocked(struct kthread_worker *worker,
885 struct kthread_work *work)
886 {
887 lockdep_assert_held(&worker->lock);
888
889 return !list_empty(&work->node) || work->canceling;
890 }
891
892 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
893 struct kthread_work *work)
894 {
895 lockdep_assert_held(&worker->lock);
896 WARN_ON_ONCE(!list_empty(&work->node));
897 /* Do not use a work with >1 worker, see kthread_queue_work() */
898 WARN_ON_ONCE(work->worker && work->worker != worker);
899 }
900
901 /* insert @work before @pos in @worker */
902 static void kthread_insert_work(struct kthread_worker *worker,
903 struct kthread_work *work,
904 struct list_head *pos)
905 {
906 kthread_insert_work_sanity_check(worker, work);
907
908 trace_sched_kthread_work_queue_work(worker, work);
909
910 list_add_tail(&work->node, pos);
911 work->worker = worker;
912 if (!worker->current_work && likely(worker->task))
913 wake_up_process(worker->task);
914 }
915
916 /**
917 * kthread_queue_work - queue a kthread_work
918 * @worker: target kthread_worker
919 * @work: kthread_work to queue
920 *
921 * Queue @work to work processor @task for async execution. @task
922 * must have been created with kthread_worker_create(). Returns %true
923 * if @work was successfully queued, %false if it was already pending.
924 *
925 * Reinitialize the work if it needs to be used by another worker.
926 * For example, when the worker was stopped and started again.
927 */
928 bool kthread_queue_work(struct kthread_worker *worker,
929 struct kthread_work *work)
930 {
931 bool ret = false;
932 unsigned long flags;
933
934 raw_spin_lock_irqsave(&worker->lock, flags);
935 if (!queuing_blocked(worker, work)) {
936 kthread_insert_work(worker, work, &worker->work_list);
937 ret = true;
938 }
939 raw_spin_unlock_irqrestore(&worker->lock, flags);
940 return ret;
941 }
942 EXPORT_SYMBOL_GPL(kthread_queue_work);
943
944 /**
945 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
946 * delayed work when the timer expires.
947 * @t: pointer to the expired timer
948 *
949 * The format of the function is defined by struct timer_list.
950 * It should have been called from irqsafe timer with irq already off.
951 */
952 void kthread_delayed_work_timer_fn(struct timer_list *t)
953 {
954 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
955 struct kthread_work *work = &dwork->work;
956 struct kthread_worker *worker = work->worker;
957 unsigned long flags;
958
959 /*
960 * This might happen when a pending work is reinitialized.
961 * It means that it is used a wrong way.
962 */
963 if (WARN_ON_ONCE(!worker))
964 return;
965
966 raw_spin_lock_irqsave(&worker->lock, flags);
967 /* Work must not be used with >1 worker, see kthread_queue_work(). */
968 WARN_ON_ONCE(work->worker != worker);
969
970 /* Move the work from worker->delayed_work_list. */
971 WARN_ON_ONCE(list_empty(&work->node));
972 list_del_init(&work->node);
973 if (!work->canceling)
974 kthread_insert_work(worker, work, &worker->work_list);
975
976 raw_spin_unlock_irqrestore(&worker->lock, flags);
977 }
978 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
979
980 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
981 struct kthread_delayed_work *dwork,
982 unsigned long delay)
983 {
984 struct timer_list *timer = &dwork->timer;
985 struct kthread_work *work = &dwork->work;
986
987 WARN_ON_FUNCTION_MISMATCH(timer->function,
988 kthread_delayed_work_timer_fn);
989
990 /*
991 * If @delay is 0, queue @dwork->work immediately. This is for
992 * both optimization and correctness. The earliest @timer can
993 * expire is on the closest next tick and delayed_work users depend
994 * on that there's no such delay when @delay is 0.
995 */
996 if (!delay) {
997 kthread_insert_work(worker, work, &worker->work_list);
998 return;
999 }
1000
1001 /* Be paranoid and try to detect possible races already now. */
1002 kthread_insert_work_sanity_check(worker, work);
1003
1004 list_add(&work->node, &worker->delayed_work_list);
1005 work->worker = worker;
1006 timer->expires = jiffies + delay;
1007 add_timer(timer);
1008 }
1009
1010 /**
1011 * kthread_queue_delayed_work - queue the associated kthread work
1012 * after a delay.
1013 * @worker: target kthread_worker
1014 * @dwork: kthread_delayed_work to queue
1015 * @delay: number of jiffies to wait before queuing
1016 *
1017 * If the work has not been pending it starts a timer that will queue
1018 * the work after the given @delay. If @delay is zero, it queues the
1019 * work immediately.
1020 *
1021 * Return: %false if the @work has already been pending. It means that
1022 * either the timer was running or the work was queued. It returns %true
1023 * otherwise.
1024 */
1025 bool kthread_queue_delayed_work(struct kthread_worker *worker,
1026 struct kthread_delayed_work *dwork,
1027 unsigned long delay)
1028 {
1029 struct kthread_work *work = &dwork->work;
1030 unsigned long flags;
1031 bool ret = false;
1032
1033 raw_spin_lock_irqsave(&worker->lock, flags);
1034
1035 if (!queuing_blocked(worker, work)) {
1036 __kthread_queue_delayed_work(worker, dwork, delay);
1037 ret = true;
1038 }
1039
1040 raw_spin_unlock_irqrestore(&worker->lock, flags);
1041 return ret;
1042 }
1043 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1044
1045 struct kthread_flush_work {
1046 struct kthread_work work;
1047 struct completion done;
1048 };
1049
1050 static void kthread_flush_work_fn(struct kthread_work *work)
1051 {
1052 struct kthread_flush_work *fwork =
1053 container_of(work, struct kthread_flush_work, work);
1054 complete(&fwork->done);
1055 }
1056
1057 /**
1058 * kthread_flush_work - flush a kthread_work
1059 * @work: work to flush
1060 *
1061 * If @work is queued or executing, wait for it to finish execution.
1062 */
1063 void kthread_flush_work(struct kthread_work *work)
1064 {
1065 struct kthread_flush_work fwork = {
1066 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1067 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1068 };
1069 struct kthread_worker *worker;
1070 bool noop = false;
1071
1072 worker = work->worker;
1073 if (!worker)
1074 return;
1075
1076 raw_spin_lock_irq(&worker->lock);
1077 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1078 WARN_ON_ONCE(work->worker != worker);
1079
1080 if (!list_empty(&work->node))
1081 kthread_insert_work(worker, &fwork.work, work->node.next);
1082 else if (worker->current_work == work)
1083 kthread_insert_work(worker, &fwork.work,
1084 worker->work_list.next);
1085 else
1086 noop = true;
1087
1088 raw_spin_unlock_irq(&worker->lock);
1089
1090 if (!noop)
1091 wait_for_completion(&fwork.done);
1092 }
1093 EXPORT_SYMBOL_GPL(kthread_flush_work);
1094
1095 /*
1096 * This function removes the work from the worker queue. Also it makes sure
1097 * that it won't get queued later via the delayed work's timer.
1098 *
1099 * The work might still be in use when this function finishes. See the
1100 * current_work proceed by the worker.
1101 *
1102 * Return: %true if @work was pending and successfully canceled,
1103 * %false if @work was not pending
1104 */
1105 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1106 unsigned long *flags)
1107 {
1108 /* Try to cancel the timer if exists. */
1109 if (is_dwork) {
1110 struct kthread_delayed_work *dwork =
1111 container_of(work, struct kthread_delayed_work, work);
1112 struct kthread_worker *worker = work->worker;
1113
1114 /*
1115 * del_timer_sync() must be called to make sure that the timer
1116 * callback is not running. The lock must be temporary released
1117 * to avoid a deadlock with the callback. In the meantime,
1118 * any queuing is blocked by setting the canceling counter.
1119 */
1120 work->canceling++;
1121 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1122 del_timer_sync(&dwork->timer);
1123 raw_spin_lock_irqsave(&worker->lock, *flags);
1124 work->canceling--;
1125 }
1126
1127 /*
1128 * Try to remove the work from a worker list. It might either
1129 * be from worker->work_list or from worker->delayed_work_list.
1130 */
1131 if (!list_empty(&work->node)) {
1132 list_del_init(&work->node);
1133 return true;
1134 }
1135
1136 return false;
1137 }
1138
1139 /**
1140 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1141 * @worker: kthread worker to use
1142 * @dwork: kthread delayed work to queue
1143 * @delay: number of jiffies to wait before queuing
1144 *
1145 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1146 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1147 * @work is guaranteed to be queued immediately.
1148 *
1149 * Return: %true if @dwork was pending and its timer was modified,
1150 * %false otherwise.
1151 *
1152 * A special case is when the work is being canceled in parallel.
1153 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1154 * or yet another kthread_mod_delayed_work() call. We let the other command
1155 * win and return %false here. The caller is supposed to synchronize these
1156 * operations a reasonable way.
1157 *
1158 * This function is safe to call from any context including IRQ handler.
1159 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1160 * for details.
1161 */
1162 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1163 struct kthread_delayed_work *dwork,
1164 unsigned long delay)
1165 {
1166 struct kthread_work *work = &dwork->work;
1167 unsigned long flags;
1168 int ret = false;
1169
1170 raw_spin_lock_irqsave(&worker->lock, flags);
1171
1172 /* Do not bother with canceling when never queued. */
1173 if (!work->worker)
1174 goto fast_queue;
1175
1176 /* Work must not be used with >1 worker, see kthread_queue_work() */
1177 WARN_ON_ONCE(work->worker != worker);
1178
1179 /* Do not fight with another command that is canceling this work. */
1180 if (work->canceling)
1181 goto out;
1182
1183 ret = __kthread_cancel_work(work, true, &flags);
1184 fast_queue:
1185 __kthread_queue_delayed_work(worker, dwork, delay);
1186 out:
1187 raw_spin_unlock_irqrestore(&worker->lock, flags);
1188 return ret;
1189 }
1190 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1191
1192 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1193 {
1194 struct kthread_worker *worker = work->worker;
1195 unsigned long flags;
1196 int ret = false;
1197
1198 if (!worker)
1199 goto out;
1200
1201 raw_spin_lock_irqsave(&worker->lock, flags);
1202 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1203 WARN_ON_ONCE(work->worker != worker);
1204
1205 ret = __kthread_cancel_work(work, is_dwork, &flags);
1206
1207 if (worker->current_work != work)
1208 goto out_fast;
1209
1210 /*
1211 * The work is in progress and we need to wait with the lock released.
1212 * In the meantime, block any queuing by setting the canceling counter.
1213 */
1214 work->canceling++;
1215 raw_spin_unlock_irqrestore(&worker->lock, flags);
1216 kthread_flush_work(work);
1217 raw_spin_lock_irqsave(&worker->lock, flags);
1218 work->canceling--;
1219
1220 out_fast:
1221 raw_spin_unlock_irqrestore(&worker->lock, flags);
1222 out:
1223 return ret;
1224 }
1225
1226 /**
1227 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1228 * @work: the kthread work to cancel
1229 *
1230 * Cancel @work and wait for its execution to finish. This function
1231 * can be used even if the work re-queues itself. On return from this
1232 * function, @work is guaranteed to be not pending or executing on any CPU.
1233 *
1234 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1235 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1236 *
1237 * The caller must ensure that the worker on which @work was last
1238 * queued can't be destroyed before this function returns.
1239 *
1240 * Return: %true if @work was pending, %false otherwise.
1241 */
1242 bool kthread_cancel_work_sync(struct kthread_work *work)
1243 {
1244 return __kthread_cancel_work_sync(work, false);
1245 }
1246 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1247
1248 /**
1249 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1250 * wait for it to finish.
1251 * @dwork: the kthread delayed work to cancel
1252 *
1253 * This is kthread_cancel_work_sync() for delayed works.
1254 *
1255 * Return: %true if @dwork was pending, %false otherwise.
1256 */
1257 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1258 {
1259 return __kthread_cancel_work_sync(&dwork->work, true);
1260 }
1261 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1262
1263 /**
1264 * kthread_flush_worker - flush all current works on a kthread_worker
1265 * @worker: worker to flush
1266 *
1267 * Wait until all currently executing or pending works on @worker are
1268 * finished.
1269 */
1270 void kthread_flush_worker(struct kthread_worker *worker)
1271 {
1272 struct kthread_flush_work fwork = {
1273 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1274 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1275 };
1276
1277 kthread_queue_work(worker, &fwork.work);
1278 wait_for_completion(&fwork.done);
1279 }
1280 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1281
1282 /**
1283 * kthread_destroy_worker - destroy a kthread worker
1284 * @worker: worker to be destroyed
1285 *
1286 * Flush and destroy @worker. The simple flush is enough because the kthread
1287 * worker API is used only in trivial scenarios. There are no multi-step state
1288 * machines needed.
1289 */
1290 void kthread_destroy_worker(struct kthread_worker *worker)
1291 {
1292 struct task_struct *task;
1293
1294 task = worker->task;
1295 if (WARN_ON(!task))
1296 return;
1297
1298 kthread_flush_worker(worker);
1299 kthread_stop(task);
1300 WARN_ON(!list_empty(&worker->work_list));
1301 kfree(worker);
1302 }
1303 EXPORT_SYMBOL(kthread_destroy_worker);
1304
1305 /**
1306 * kthread_use_mm - make the calling kthread operate on an address space
1307 * @mm: address space to operate on
1308 */
1309 void kthread_use_mm(struct mm_struct *mm)
1310 {
1311 struct mm_struct *active_mm;
1312 struct task_struct *tsk = current;
1313
1314 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1315 WARN_ON_ONCE(tsk->mm);
1316
1317 task_lock(tsk);
1318 /* Hold off tlb flush IPIs while switching mm's */
1319 local_irq_disable();
1320 active_mm = tsk->active_mm;
1321 if (active_mm != mm) {
1322 mmgrab(mm);
1323 tsk->active_mm = mm;
1324 }
1325 tsk->mm = mm;
1326 membarrier_update_current_mm(mm);
1327 switch_mm_irqs_off(active_mm, mm, tsk);
1328 local_irq_enable();
1329 task_unlock(tsk);
1330 #ifdef finish_arch_post_lock_switch
1331 finish_arch_post_lock_switch();
1332 #endif
1333
1334 /*
1335 * When a kthread starts operating on an address space, the loop
1336 * in membarrier_{private,global}_expedited() may not observe
1337 * that tsk->mm, and not issue an IPI. Membarrier requires a
1338 * memory barrier after storing to tsk->mm, before accessing
1339 * user-space memory. A full memory barrier for membarrier
1340 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1341 * mmdrop(), or explicitly with smp_mb().
1342 */
1343 if (active_mm != mm)
1344 mmdrop(active_mm);
1345 else
1346 smp_mb();
1347
1348 to_kthread(tsk)->oldfs = force_uaccess_begin();
1349 }
1350 EXPORT_SYMBOL_GPL(kthread_use_mm);
1351
1352 /**
1353 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1354 * @mm: address space to operate on
1355 */
1356 void kthread_unuse_mm(struct mm_struct *mm)
1357 {
1358 struct task_struct *tsk = current;
1359
1360 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1361 WARN_ON_ONCE(!tsk->mm);
1362
1363 force_uaccess_end(to_kthread(tsk)->oldfs);
1364
1365 task_lock(tsk);
1366 /*
1367 * When a kthread stops operating on an address space, the loop
1368 * in membarrier_{private,global}_expedited() may not observe
1369 * that tsk->mm, and not issue an IPI. Membarrier requires a
1370 * memory barrier after accessing user-space memory, before
1371 * clearing tsk->mm.
1372 */
1373 smp_mb__after_spinlock();
1374 sync_mm_rss(mm);
1375 local_irq_disable();
1376 tsk->mm = NULL;
1377 membarrier_update_current_mm(NULL);
1378 /* active_mm is still 'mm' */
1379 enter_lazy_tlb(mm, tsk);
1380 local_irq_enable();
1381 task_unlock(tsk);
1382 }
1383 EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1384
1385 #ifdef CONFIG_BLK_CGROUP
1386 /**
1387 * kthread_associate_blkcg - associate blkcg to current kthread
1388 * @css: the cgroup info
1389 *
1390 * Current thread must be a kthread. The thread is running jobs on behalf of
1391 * other threads. In some cases, we expect the jobs attach cgroup info of
1392 * original threads instead of that of current thread. This function stores
1393 * original thread's cgroup info in current kthread context for later
1394 * retrieval.
1395 */
1396 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1397 {
1398 struct kthread *kthread;
1399
1400 if (!(current->flags & PF_KTHREAD))
1401 return;
1402 kthread = to_kthread(current);
1403 if (!kthread)
1404 return;
1405
1406 if (kthread->blkcg_css) {
1407 css_put(kthread->blkcg_css);
1408 kthread->blkcg_css = NULL;
1409 }
1410 if (css) {
1411 css_get(css);
1412 kthread->blkcg_css = css;
1413 }
1414 }
1415 EXPORT_SYMBOL(kthread_associate_blkcg);
1416
1417 /**
1418 * kthread_blkcg - get associated blkcg css of current kthread
1419 *
1420 * Current thread must be a kthread.
1421 */
1422 struct cgroup_subsys_state *kthread_blkcg(void)
1423 {
1424 struct kthread *kthread;
1425
1426 if (current->flags & PF_KTHREAD) {
1427 kthread = to_kthread(current);
1428 if (kthread)
1429 return kthread->blkcg_css;
1430 }
1431 return NULL;
1432 }
1433 EXPORT_SYMBOL(kthread_blkcg);
1434 #endif