]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - kernel/padata.c
Merge branch 'linus' into perf/kprobes
[mirror_ubuntu-hirsute-kernel.git] / kernel / padata.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * padata.c - generic interface to process data streams in parallel
4 *
5 * See Documentation/core-api/padata.rst for more information.
6 *
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include <linux/completion.h>
28 #include <linux/export.h>
29 #include <linux/cpumask.h>
30 #include <linux/err.h>
31 #include <linux/cpu.h>
32 #include <linux/padata.h>
33 #include <linux/mutex.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/sysfs.h>
37 #include <linux/rcupdate.h>
38
39 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
40
41 struct padata_work {
42 struct work_struct pw_work;
43 struct list_head pw_list; /* padata_free_works linkage */
44 void *pw_data;
45 };
46
47 static DEFINE_SPINLOCK(padata_works_lock);
48 static struct padata_work *padata_works;
49 static LIST_HEAD(padata_free_works);
50
51 struct padata_mt_job_state {
52 spinlock_t lock;
53 struct completion completion;
54 struct padata_mt_job *job;
55 int nworks;
56 int nworks_fini;
57 unsigned long chunk_size;
58 };
59
60 static void padata_free_pd(struct parallel_data *pd);
61 static void __init padata_mt_helper(struct work_struct *work);
62
63 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
64 {
65 int cpu, target_cpu;
66
67 target_cpu = cpumask_first(pd->cpumask.pcpu);
68 for (cpu = 0; cpu < cpu_index; cpu++)
69 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
70
71 return target_cpu;
72 }
73
74 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
75 {
76 /*
77 * Hash the sequence numbers to the cpus by taking
78 * seq_nr mod. number of cpus in use.
79 */
80 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
81
82 return padata_index_to_cpu(pd, cpu_index);
83 }
84
85 static struct padata_work *padata_work_alloc(void)
86 {
87 struct padata_work *pw;
88
89 lockdep_assert_held(&padata_works_lock);
90
91 if (list_empty(&padata_free_works))
92 return NULL; /* No more work items allowed to be queued. */
93
94 pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
95 list_del(&pw->pw_list);
96 return pw;
97 }
98
99 static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
100 void *data, int flags)
101 {
102 if (flags & PADATA_WORK_ONSTACK)
103 INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
104 else
105 INIT_WORK(&pw->pw_work, work_fn);
106 pw->pw_data = data;
107 }
108
109 static int __init padata_work_alloc_mt(int nworks, void *data,
110 struct list_head *head)
111 {
112 int i;
113
114 spin_lock(&padata_works_lock);
115 /* Start at 1 because the current task participates in the job. */
116 for (i = 1; i < nworks; ++i) {
117 struct padata_work *pw = padata_work_alloc();
118
119 if (!pw)
120 break;
121 padata_work_init(pw, padata_mt_helper, data, 0);
122 list_add(&pw->pw_list, head);
123 }
124 spin_unlock(&padata_works_lock);
125
126 return i;
127 }
128
129 static void padata_work_free(struct padata_work *pw)
130 {
131 lockdep_assert_held(&padata_works_lock);
132 list_add(&pw->pw_list, &padata_free_works);
133 }
134
135 static void __init padata_works_free(struct list_head *works)
136 {
137 struct padata_work *cur, *next;
138
139 if (list_empty(works))
140 return;
141
142 spin_lock(&padata_works_lock);
143 list_for_each_entry_safe(cur, next, works, pw_list) {
144 list_del(&cur->pw_list);
145 padata_work_free(cur);
146 }
147 spin_unlock(&padata_works_lock);
148 }
149
150 static void padata_parallel_worker(struct work_struct *parallel_work)
151 {
152 struct padata_work *pw = container_of(parallel_work, struct padata_work,
153 pw_work);
154 struct padata_priv *padata = pw->pw_data;
155
156 local_bh_disable();
157 padata->parallel(padata);
158 spin_lock(&padata_works_lock);
159 padata_work_free(pw);
160 spin_unlock(&padata_works_lock);
161 local_bh_enable();
162 }
163
164 /**
165 * padata_do_parallel - padata parallelization function
166 *
167 * @ps: padatashell
168 * @padata: object to be parallelized
169 * @cb_cpu: pointer to the CPU that the serialization callback function should
170 * run on. If it's not in the serial cpumask of @pinst
171 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
172 * none found, returns -EINVAL.
173 *
174 * The parallelization callback function will run with BHs off.
175 * Note: Every object which is parallelized by padata_do_parallel
176 * must be seen by padata_do_serial.
177 *
178 * Return: 0 on success or else negative error code.
179 */
180 int padata_do_parallel(struct padata_shell *ps,
181 struct padata_priv *padata, int *cb_cpu)
182 {
183 struct padata_instance *pinst = ps->pinst;
184 int i, cpu, cpu_index, err;
185 struct parallel_data *pd;
186 struct padata_work *pw;
187
188 rcu_read_lock_bh();
189
190 pd = rcu_dereference_bh(ps->pd);
191
192 err = -EINVAL;
193 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
194 goto out;
195
196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
197 if (!cpumask_weight(pd->cpumask.cbcpu))
198 goto out;
199
200 /* Select an alternate fallback CPU and notify the caller. */
201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
202
203 cpu = cpumask_first(pd->cpumask.cbcpu);
204 for (i = 0; i < cpu_index; i++)
205 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
206
207 *cb_cpu = cpu;
208 }
209
210 err = -EBUSY;
211 if ((pinst->flags & PADATA_RESET))
212 goto out;
213
214 atomic_inc(&pd->refcnt);
215 padata->pd = pd;
216 padata->cb_cpu = *cb_cpu;
217
218 spin_lock(&padata_works_lock);
219 padata->seq_nr = ++pd->seq_nr;
220 pw = padata_work_alloc();
221 spin_unlock(&padata_works_lock);
222
223 rcu_read_unlock_bh();
224
225 if (pw) {
226 padata_work_init(pw, padata_parallel_worker, padata, 0);
227 queue_work(pinst->parallel_wq, &pw->pw_work);
228 } else {
229 /* Maximum works limit exceeded, run in the current task. */
230 padata->parallel(padata);
231 }
232
233 return 0;
234 out:
235 rcu_read_unlock_bh();
236
237 return err;
238 }
239 EXPORT_SYMBOL(padata_do_parallel);
240
241 /*
242 * padata_find_next - Find the next object that needs serialization.
243 *
244 * Return:
245 * * A pointer to the control struct of the next object that needs
246 * serialization, if present in one of the percpu reorder queues.
247 * * NULL, if the next object that needs serialization will
248 * be parallel processed by another cpu and is not yet present in
249 * the cpu's reorder queue.
250 */
251 static struct padata_priv *padata_find_next(struct parallel_data *pd,
252 bool remove_object)
253 {
254 struct padata_priv *padata;
255 struct padata_list *reorder;
256 int cpu = pd->cpu;
257
258 reorder = per_cpu_ptr(pd->reorder_list, cpu);
259
260 spin_lock(&reorder->lock);
261 if (list_empty(&reorder->list)) {
262 spin_unlock(&reorder->lock);
263 return NULL;
264 }
265
266 padata = list_entry(reorder->list.next, struct padata_priv, list);
267
268 /*
269 * Checks the rare case where two or more parallel jobs have hashed to
270 * the same CPU and one of the later ones finishes first.
271 */
272 if (padata->seq_nr != pd->processed) {
273 spin_unlock(&reorder->lock);
274 return NULL;
275 }
276
277 if (remove_object) {
278 list_del_init(&padata->list);
279 ++pd->processed;
280 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
281 }
282
283 spin_unlock(&reorder->lock);
284 return padata;
285 }
286
287 static void padata_reorder(struct parallel_data *pd)
288 {
289 struct padata_instance *pinst = pd->ps->pinst;
290 int cb_cpu;
291 struct padata_priv *padata;
292 struct padata_serial_queue *squeue;
293 struct padata_list *reorder;
294
295 /*
296 * We need to ensure that only one cpu can work on dequeueing of
297 * the reorder queue the time. Calculating in which percpu reorder
298 * queue the next object will arrive takes some time. A spinlock
299 * would be highly contended. Also it is not clear in which order
300 * the objects arrive to the reorder queues. So a cpu could wait to
301 * get the lock just to notice that there is nothing to do at the
302 * moment. Therefore we use a trylock and let the holder of the lock
303 * care for all the objects enqueued during the holdtime of the lock.
304 */
305 if (!spin_trylock_bh(&pd->lock))
306 return;
307
308 while (1) {
309 padata = padata_find_next(pd, true);
310
311 /*
312 * If the next object that needs serialization is parallel
313 * processed by another cpu and is still on it's way to the
314 * cpu's reorder queue, nothing to do for now.
315 */
316 if (!padata)
317 break;
318
319 cb_cpu = padata->cb_cpu;
320 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
321
322 spin_lock(&squeue->serial.lock);
323 list_add_tail(&padata->list, &squeue->serial.list);
324 spin_unlock(&squeue->serial.lock);
325
326 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
327 }
328
329 spin_unlock_bh(&pd->lock);
330
331 /*
332 * The next object that needs serialization might have arrived to
333 * the reorder queues in the meantime.
334 *
335 * Ensure reorder queue is read after pd->lock is dropped so we see
336 * new objects from another task in padata_do_serial. Pairs with
337 * smp_mb in padata_do_serial.
338 */
339 smp_mb();
340
341 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
342 if (!list_empty(&reorder->list) && padata_find_next(pd, false))
343 queue_work(pinst->serial_wq, &pd->reorder_work);
344 }
345
346 static void invoke_padata_reorder(struct work_struct *work)
347 {
348 struct parallel_data *pd;
349
350 local_bh_disable();
351 pd = container_of(work, struct parallel_data, reorder_work);
352 padata_reorder(pd);
353 local_bh_enable();
354 }
355
356 static void padata_serial_worker(struct work_struct *serial_work)
357 {
358 struct padata_serial_queue *squeue;
359 struct parallel_data *pd;
360 LIST_HEAD(local_list);
361 int cnt;
362
363 local_bh_disable();
364 squeue = container_of(serial_work, struct padata_serial_queue, work);
365 pd = squeue->pd;
366
367 spin_lock(&squeue->serial.lock);
368 list_replace_init(&squeue->serial.list, &local_list);
369 spin_unlock(&squeue->serial.lock);
370
371 cnt = 0;
372
373 while (!list_empty(&local_list)) {
374 struct padata_priv *padata;
375
376 padata = list_entry(local_list.next,
377 struct padata_priv, list);
378
379 list_del_init(&padata->list);
380
381 padata->serial(padata);
382 cnt++;
383 }
384 local_bh_enable();
385
386 if (atomic_sub_and_test(cnt, &pd->refcnt))
387 padata_free_pd(pd);
388 }
389
390 /**
391 * padata_do_serial - padata serialization function
392 *
393 * @padata: object to be serialized.
394 *
395 * padata_do_serial must be called for every parallelized object.
396 * The serialization callback function will run with BHs off.
397 */
398 void padata_do_serial(struct padata_priv *padata)
399 {
400 struct parallel_data *pd = padata->pd;
401 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
402 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
403 struct padata_priv *cur;
404
405 spin_lock(&reorder->lock);
406 /* Sort in ascending order of sequence number. */
407 list_for_each_entry_reverse(cur, &reorder->list, list)
408 if (cur->seq_nr < padata->seq_nr)
409 break;
410 list_add(&padata->list, &cur->list);
411 spin_unlock(&reorder->lock);
412
413 /*
414 * Ensure the addition to the reorder list is ordered correctly
415 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
416 * in padata_reorder.
417 */
418 smp_mb();
419
420 padata_reorder(pd);
421 }
422 EXPORT_SYMBOL(padata_do_serial);
423
424 static int padata_setup_cpumasks(struct padata_instance *pinst)
425 {
426 struct workqueue_attrs *attrs;
427 int err;
428
429 attrs = alloc_workqueue_attrs();
430 if (!attrs)
431 return -ENOMEM;
432
433 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
434 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
435 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
436 free_workqueue_attrs(attrs);
437
438 return err;
439 }
440
441 static void __init padata_mt_helper(struct work_struct *w)
442 {
443 struct padata_work *pw = container_of(w, struct padata_work, pw_work);
444 struct padata_mt_job_state *ps = pw->pw_data;
445 struct padata_mt_job *job = ps->job;
446 bool done;
447
448 spin_lock(&ps->lock);
449
450 while (job->size > 0) {
451 unsigned long start, size, end;
452
453 start = job->start;
454 /* So end is chunk size aligned if enough work remains. */
455 size = roundup(start + 1, ps->chunk_size) - start;
456 size = min(size, job->size);
457 end = start + size;
458
459 job->start = end;
460 job->size -= size;
461
462 spin_unlock(&ps->lock);
463 job->thread_fn(start, end, job->fn_arg);
464 spin_lock(&ps->lock);
465 }
466
467 ++ps->nworks_fini;
468 done = (ps->nworks_fini == ps->nworks);
469 spin_unlock(&ps->lock);
470
471 if (done)
472 complete(&ps->completion);
473 }
474
475 /**
476 * padata_do_multithreaded - run a multithreaded job
477 * @job: Description of the job.
478 *
479 * See the definition of struct padata_mt_job for more details.
480 */
481 void __init padata_do_multithreaded(struct padata_mt_job *job)
482 {
483 /* In case threads finish at different times. */
484 static const unsigned long load_balance_factor = 4;
485 struct padata_work my_work, *pw;
486 struct padata_mt_job_state ps;
487 LIST_HEAD(works);
488 int nworks;
489
490 if (job->size == 0)
491 return;
492
493 /* Ensure at least one thread when size < min_chunk. */
494 nworks = max(job->size / job->min_chunk, 1ul);
495 nworks = min(nworks, job->max_threads);
496
497 if (nworks == 1) {
498 /* Single thread, no coordination needed, cut to the chase. */
499 job->thread_fn(job->start, job->start + job->size, job->fn_arg);
500 return;
501 }
502
503 spin_lock_init(&ps.lock);
504 init_completion(&ps.completion);
505 ps.job = job;
506 ps.nworks = padata_work_alloc_mt(nworks, &ps, &works);
507 ps.nworks_fini = 0;
508
509 /*
510 * Chunk size is the amount of work a helper does per call to the
511 * thread function. Load balance large jobs between threads by
512 * increasing the number of chunks, guarantee at least the minimum
513 * chunk size from the caller, and honor the caller's alignment.
514 */
515 ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
516 ps.chunk_size = max(ps.chunk_size, job->min_chunk);
517 ps.chunk_size = roundup(ps.chunk_size, job->align);
518
519 list_for_each_entry(pw, &works, pw_list)
520 queue_work(system_unbound_wq, &pw->pw_work);
521
522 /* Use the current thread, which saves starting a workqueue worker. */
523 padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
524 padata_mt_helper(&my_work.pw_work);
525
526 /* Wait for all the helpers to finish. */
527 wait_for_completion(&ps.completion);
528
529 destroy_work_on_stack(&my_work.pw_work);
530 padata_works_free(&works);
531 }
532
533 static void __padata_list_init(struct padata_list *pd_list)
534 {
535 INIT_LIST_HEAD(&pd_list->list);
536 spin_lock_init(&pd_list->lock);
537 }
538
539 /* Initialize all percpu queues used by serial workers */
540 static void padata_init_squeues(struct parallel_data *pd)
541 {
542 int cpu;
543 struct padata_serial_queue *squeue;
544
545 for_each_cpu(cpu, pd->cpumask.cbcpu) {
546 squeue = per_cpu_ptr(pd->squeue, cpu);
547 squeue->pd = pd;
548 __padata_list_init(&squeue->serial);
549 INIT_WORK(&squeue->work, padata_serial_worker);
550 }
551 }
552
553 /* Initialize per-CPU reorder lists */
554 static void padata_init_reorder_list(struct parallel_data *pd)
555 {
556 int cpu;
557 struct padata_list *list;
558
559 for_each_cpu(cpu, pd->cpumask.pcpu) {
560 list = per_cpu_ptr(pd->reorder_list, cpu);
561 __padata_list_init(list);
562 }
563 }
564
565 /* Allocate and initialize the internal cpumask dependend resources. */
566 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
567 {
568 struct padata_instance *pinst = ps->pinst;
569 struct parallel_data *pd;
570
571 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
572 if (!pd)
573 goto err;
574
575 pd->reorder_list = alloc_percpu(struct padata_list);
576 if (!pd->reorder_list)
577 goto err_free_pd;
578
579 pd->squeue = alloc_percpu(struct padata_serial_queue);
580 if (!pd->squeue)
581 goto err_free_reorder_list;
582
583 pd->ps = ps;
584
585 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
586 goto err_free_squeue;
587 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
588 goto err_free_pcpu;
589
590 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
591 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
592
593 padata_init_reorder_list(pd);
594 padata_init_squeues(pd);
595 pd->seq_nr = -1;
596 atomic_set(&pd->refcnt, 1);
597 spin_lock_init(&pd->lock);
598 pd->cpu = cpumask_first(pd->cpumask.pcpu);
599 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
600
601 return pd;
602
603 err_free_pcpu:
604 free_cpumask_var(pd->cpumask.pcpu);
605 err_free_squeue:
606 free_percpu(pd->squeue);
607 err_free_reorder_list:
608 free_percpu(pd->reorder_list);
609 err_free_pd:
610 kfree(pd);
611 err:
612 return NULL;
613 }
614
615 static void padata_free_pd(struct parallel_data *pd)
616 {
617 free_cpumask_var(pd->cpumask.pcpu);
618 free_cpumask_var(pd->cpumask.cbcpu);
619 free_percpu(pd->reorder_list);
620 free_percpu(pd->squeue);
621 kfree(pd);
622 }
623
624 static void __padata_start(struct padata_instance *pinst)
625 {
626 pinst->flags |= PADATA_INIT;
627 }
628
629 static void __padata_stop(struct padata_instance *pinst)
630 {
631 if (!(pinst->flags & PADATA_INIT))
632 return;
633
634 pinst->flags &= ~PADATA_INIT;
635
636 synchronize_rcu();
637 }
638
639 /* Replace the internal control structure with a new one. */
640 static int padata_replace_one(struct padata_shell *ps)
641 {
642 struct parallel_data *pd_new;
643
644 pd_new = padata_alloc_pd(ps);
645 if (!pd_new)
646 return -ENOMEM;
647
648 ps->opd = rcu_dereference_protected(ps->pd, 1);
649 rcu_assign_pointer(ps->pd, pd_new);
650
651 return 0;
652 }
653
654 static int padata_replace(struct padata_instance *pinst)
655 {
656 struct padata_shell *ps;
657 int err = 0;
658
659 pinst->flags |= PADATA_RESET;
660
661 list_for_each_entry(ps, &pinst->pslist, list) {
662 err = padata_replace_one(ps);
663 if (err)
664 break;
665 }
666
667 synchronize_rcu();
668
669 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
670 if (atomic_dec_and_test(&ps->opd->refcnt))
671 padata_free_pd(ps->opd);
672
673 pinst->flags &= ~PADATA_RESET;
674
675 return err;
676 }
677
678 /* If cpumask contains no active cpu, we mark the instance as invalid. */
679 static bool padata_validate_cpumask(struct padata_instance *pinst,
680 const struct cpumask *cpumask)
681 {
682 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
683 pinst->flags |= PADATA_INVALID;
684 return false;
685 }
686
687 pinst->flags &= ~PADATA_INVALID;
688 return true;
689 }
690
691 static int __padata_set_cpumasks(struct padata_instance *pinst,
692 cpumask_var_t pcpumask,
693 cpumask_var_t cbcpumask)
694 {
695 int valid;
696 int err;
697
698 valid = padata_validate_cpumask(pinst, pcpumask);
699 if (!valid) {
700 __padata_stop(pinst);
701 goto out_replace;
702 }
703
704 valid = padata_validate_cpumask(pinst, cbcpumask);
705 if (!valid)
706 __padata_stop(pinst);
707
708 out_replace:
709 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
710 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
711
712 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
713
714 if (valid)
715 __padata_start(pinst);
716
717 return err;
718 }
719
720 /**
721 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
722 * equivalent to @cpumask.
723 * @pinst: padata instance
724 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
725 * to parallel and serial cpumasks respectively.
726 * @cpumask: the cpumask to use
727 *
728 * Return: 0 on success or negative error code
729 */
730 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
731 cpumask_var_t cpumask)
732 {
733 struct cpumask *serial_mask, *parallel_mask;
734 int err = -EINVAL;
735
736 get_online_cpus();
737 mutex_lock(&pinst->lock);
738
739 switch (cpumask_type) {
740 case PADATA_CPU_PARALLEL:
741 serial_mask = pinst->cpumask.cbcpu;
742 parallel_mask = cpumask;
743 break;
744 case PADATA_CPU_SERIAL:
745 parallel_mask = pinst->cpumask.pcpu;
746 serial_mask = cpumask;
747 break;
748 default:
749 goto out;
750 }
751
752 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
753
754 out:
755 mutex_unlock(&pinst->lock);
756 put_online_cpus();
757
758 return err;
759 }
760 EXPORT_SYMBOL(padata_set_cpumask);
761
762 #ifdef CONFIG_HOTPLUG_CPU
763
764 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
765 {
766 int err = 0;
767
768 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
769 err = padata_replace(pinst);
770
771 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
772 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
773 __padata_start(pinst);
774 }
775
776 return err;
777 }
778
779 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
780 {
781 int err = 0;
782
783 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
784 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
785 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
786 __padata_stop(pinst);
787
788 err = padata_replace(pinst);
789 }
790
791 return err;
792 }
793
794 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
795 {
796 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
797 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
798 }
799
800 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
801 {
802 struct padata_instance *pinst;
803 int ret;
804
805 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
806 if (!pinst_has_cpu(pinst, cpu))
807 return 0;
808
809 mutex_lock(&pinst->lock);
810 ret = __padata_add_cpu(pinst, cpu);
811 mutex_unlock(&pinst->lock);
812 return ret;
813 }
814
815 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
816 {
817 struct padata_instance *pinst;
818 int ret;
819
820 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
821 if (!pinst_has_cpu(pinst, cpu))
822 return 0;
823
824 mutex_lock(&pinst->lock);
825 ret = __padata_remove_cpu(pinst, cpu);
826 mutex_unlock(&pinst->lock);
827 return ret;
828 }
829
830 static enum cpuhp_state hp_online;
831 #endif
832
833 static void __padata_free(struct padata_instance *pinst)
834 {
835 #ifdef CONFIG_HOTPLUG_CPU
836 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
837 &pinst->cpu_dead_node);
838 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
839 #endif
840
841 WARN_ON(!list_empty(&pinst->pslist));
842
843 free_cpumask_var(pinst->cpumask.pcpu);
844 free_cpumask_var(pinst->cpumask.cbcpu);
845 destroy_workqueue(pinst->serial_wq);
846 destroy_workqueue(pinst->parallel_wq);
847 kfree(pinst);
848 }
849
850 #define kobj2pinst(_kobj) \
851 container_of(_kobj, struct padata_instance, kobj)
852 #define attr2pentry(_attr) \
853 container_of(_attr, struct padata_sysfs_entry, attr)
854
855 static void padata_sysfs_release(struct kobject *kobj)
856 {
857 struct padata_instance *pinst = kobj2pinst(kobj);
858 __padata_free(pinst);
859 }
860
861 struct padata_sysfs_entry {
862 struct attribute attr;
863 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
864 ssize_t (*store)(struct padata_instance *, struct attribute *,
865 const char *, size_t);
866 };
867
868 static ssize_t show_cpumask(struct padata_instance *pinst,
869 struct attribute *attr, char *buf)
870 {
871 struct cpumask *cpumask;
872 ssize_t len;
873
874 mutex_lock(&pinst->lock);
875 if (!strcmp(attr->name, "serial_cpumask"))
876 cpumask = pinst->cpumask.cbcpu;
877 else
878 cpumask = pinst->cpumask.pcpu;
879
880 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
881 nr_cpu_ids, cpumask_bits(cpumask));
882 mutex_unlock(&pinst->lock);
883 return len < PAGE_SIZE ? len : -EINVAL;
884 }
885
886 static ssize_t store_cpumask(struct padata_instance *pinst,
887 struct attribute *attr,
888 const char *buf, size_t count)
889 {
890 cpumask_var_t new_cpumask;
891 ssize_t ret;
892 int mask_type;
893
894 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
895 return -ENOMEM;
896
897 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
898 nr_cpumask_bits);
899 if (ret < 0)
900 goto out;
901
902 mask_type = !strcmp(attr->name, "serial_cpumask") ?
903 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
904 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
905 if (!ret)
906 ret = count;
907
908 out:
909 free_cpumask_var(new_cpumask);
910 return ret;
911 }
912
913 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
914 static struct padata_sysfs_entry _name##_attr = \
915 __ATTR(_name, 0644, _show_name, _store_name)
916 #define PADATA_ATTR_RO(_name, _show_name) \
917 static struct padata_sysfs_entry _name##_attr = \
918 __ATTR(_name, 0400, _show_name, NULL)
919
920 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
921 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
922
923 /*
924 * Padata sysfs provides the following objects:
925 * serial_cpumask [RW] - cpumask for serial workers
926 * parallel_cpumask [RW] - cpumask for parallel workers
927 */
928 static struct attribute *padata_default_attrs[] = {
929 &serial_cpumask_attr.attr,
930 &parallel_cpumask_attr.attr,
931 NULL,
932 };
933 ATTRIBUTE_GROUPS(padata_default);
934
935 static ssize_t padata_sysfs_show(struct kobject *kobj,
936 struct attribute *attr, char *buf)
937 {
938 struct padata_instance *pinst;
939 struct padata_sysfs_entry *pentry;
940 ssize_t ret = -EIO;
941
942 pinst = kobj2pinst(kobj);
943 pentry = attr2pentry(attr);
944 if (pentry->show)
945 ret = pentry->show(pinst, attr, buf);
946
947 return ret;
948 }
949
950 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
951 const char *buf, size_t count)
952 {
953 struct padata_instance *pinst;
954 struct padata_sysfs_entry *pentry;
955 ssize_t ret = -EIO;
956
957 pinst = kobj2pinst(kobj);
958 pentry = attr2pentry(attr);
959 if (pentry->show)
960 ret = pentry->store(pinst, attr, buf, count);
961
962 return ret;
963 }
964
965 static const struct sysfs_ops padata_sysfs_ops = {
966 .show = padata_sysfs_show,
967 .store = padata_sysfs_store,
968 };
969
970 static struct kobj_type padata_attr_type = {
971 .sysfs_ops = &padata_sysfs_ops,
972 .default_groups = padata_default_groups,
973 .release = padata_sysfs_release,
974 };
975
976 /**
977 * padata_alloc - allocate and initialize a padata instance
978 * @name: used to identify the instance
979 *
980 * Return: new instance on success, NULL on error
981 */
982 struct padata_instance *padata_alloc(const char *name)
983 {
984 struct padata_instance *pinst;
985
986 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
987 if (!pinst)
988 goto err;
989
990 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
991 name);
992 if (!pinst->parallel_wq)
993 goto err_free_inst;
994
995 get_online_cpus();
996
997 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
998 WQ_CPU_INTENSIVE, 1, name);
999 if (!pinst->serial_wq)
1000 goto err_put_cpus;
1001
1002 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1003 goto err_free_serial_wq;
1004 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1005 free_cpumask_var(pinst->cpumask.pcpu);
1006 goto err_free_serial_wq;
1007 }
1008
1009 INIT_LIST_HEAD(&pinst->pslist);
1010
1011 cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1012 cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1013
1014 if (padata_setup_cpumasks(pinst))
1015 goto err_free_masks;
1016
1017 __padata_start(pinst);
1018
1019 kobject_init(&pinst->kobj, &padata_attr_type);
1020 mutex_init(&pinst->lock);
1021
1022 #ifdef CONFIG_HOTPLUG_CPU
1023 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1024 &pinst->cpu_online_node);
1025 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1026 &pinst->cpu_dead_node);
1027 #endif
1028
1029 put_online_cpus();
1030
1031 return pinst;
1032
1033 err_free_masks:
1034 free_cpumask_var(pinst->cpumask.pcpu);
1035 free_cpumask_var(pinst->cpumask.cbcpu);
1036 err_free_serial_wq:
1037 destroy_workqueue(pinst->serial_wq);
1038 err_put_cpus:
1039 put_online_cpus();
1040 destroy_workqueue(pinst->parallel_wq);
1041 err_free_inst:
1042 kfree(pinst);
1043 err:
1044 return NULL;
1045 }
1046 EXPORT_SYMBOL(padata_alloc);
1047
1048 /**
1049 * padata_free - free a padata instance
1050 *
1051 * @pinst: padata instance to free
1052 */
1053 void padata_free(struct padata_instance *pinst)
1054 {
1055 kobject_put(&pinst->kobj);
1056 }
1057 EXPORT_SYMBOL(padata_free);
1058
1059 /**
1060 * padata_alloc_shell - Allocate and initialize padata shell.
1061 *
1062 * @pinst: Parent padata_instance object.
1063 *
1064 * Return: new shell on success, NULL on error
1065 */
1066 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1067 {
1068 struct parallel_data *pd;
1069 struct padata_shell *ps;
1070
1071 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1072 if (!ps)
1073 goto out;
1074
1075 ps->pinst = pinst;
1076
1077 get_online_cpus();
1078 pd = padata_alloc_pd(ps);
1079 put_online_cpus();
1080
1081 if (!pd)
1082 goto out_free_ps;
1083
1084 mutex_lock(&pinst->lock);
1085 RCU_INIT_POINTER(ps->pd, pd);
1086 list_add(&ps->list, &pinst->pslist);
1087 mutex_unlock(&pinst->lock);
1088
1089 return ps;
1090
1091 out_free_ps:
1092 kfree(ps);
1093 out:
1094 return NULL;
1095 }
1096 EXPORT_SYMBOL(padata_alloc_shell);
1097
1098 /**
1099 * padata_free_shell - free a padata shell
1100 *
1101 * @ps: padata shell to free
1102 */
1103 void padata_free_shell(struct padata_shell *ps)
1104 {
1105 if (!ps)
1106 return;
1107
1108 mutex_lock(&ps->pinst->lock);
1109 list_del(&ps->list);
1110 padata_free_pd(rcu_dereference_protected(ps->pd, 1));
1111 mutex_unlock(&ps->pinst->lock);
1112
1113 kfree(ps);
1114 }
1115 EXPORT_SYMBOL(padata_free_shell);
1116
1117 void __init padata_init(void)
1118 {
1119 unsigned int i, possible_cpus;
1120 #ifdef CONFIG_HOTPLUG_CPU
1121 int ret;
1122
1123 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1124 padata_cpu_online, NULL);
1125 if (ret < 0)
1126 goto err;
1127 hp_online = ret;
1128
1129 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1130 NULL, padata_cpu_dead);
1131 if (ret < 0)
1132 goto remove_online_state;
1133 #endif
1134
1135 possible_cpus = num_possible_cpus();
1136 padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1137 GFP_KERNEL);
1138 if (!padata_works)
1139 goto remove_dead_state;
1140
1141 for (i = 0; i < possible_cpus; ++i)
1142 list_add(&padata_works[i].pw_list, &padata_free_works);
1143
1144 return;
1145
1146 remove_dead_state:
1147 #ifdef CONFIG_HOTPLUG_CPU
1148 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1149 remove_online_state:
1150 cpuhp_remove_multi_state(hp_online);
1151 err:
1152 #endif
1153 pr_warn("padata: initialization failed\n");
1154 }