2 * padata.c - generic interface to process data streams in parallel
4 * Copyright (C) 2008, 2009 secunet Security Networks AG
5 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/module.h>
22 #include <linux/cpumask.h>
23 #include <linux/err.h>
24 #include <linux/cpu.h>
25 #include <linux/padata.h>
26 #include <linux/mutex.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/sysfs.h>
30 #include <linux/rcupdate.h>
32 #define MAX_SEQ_NR (INT_MAX - NR_CPUS)
33 #define MAX_OBJ_NUM 1000
35 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
39 target_cpu
= cpumask_first(pd
->cpumask
.pcpu
);
40 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
41 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
.pcpu
);
46 static int padata_cpu_hash(struct padata_priv
*padata
)
49 struct parallel_data
*pd
;
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
57 cpu_index
= padata
->seq_nr
% cpumask_weight(pd
->cpumask
.pcpu
);
59 return padata_index_to_cpu(pd
, cpu_index
);
62 static void padata_parallel_worker(struct work_struct
*parallel_work
)
64 struct padata_parallel_queue
*pqueue
;
65 struct parallel_data
*pd
;
66 struct padata_instance
*pinst
;
67 LIST_HEAD(local_list
);
70 pqueue
= container_of(parallel_work
,
71 struct padata_parallel_queue
, work
);
75 spin_lock(&pqueue
->parallel
.lock
);
76 list_replace_init(&pqueue
->parallel
.list
, &local_list
);
77 spin_unlock(&pqueue
->parallel
.lock
);
79 while (!list_empty(&local_list
)) {
80 struct padata_priv
*padata
;
82 padata
= list_entry(local_list
.next
,
83 struct padata_priv
, list
);
85 list_del_init(&padata
->list
);
87 padata
->parallel(padata
);
94 * padata_do_parallel - padata parallelization function
96 * @pinst: padata instance
97 * @padata: object to be parallelized
98 * @cb_cpu: cpu the serialization callback function will run on,
99 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
101 * The parallelization callback function will run with BHs off.
102 * Note: Every object which is parallelized by padata_do_parallel
103 * must be seen by padata_do_serial.
105 int padata_do_parallel(struct padata_instance
*pinst
,
106 struct padata_priv
*padata
, int cb_cpu
)
109 struct padata_parallel_queue
*queue
;
110 struct parallel_data
*pd
;
114 pd
= rcu_dereference(pinst
->pd
);
117 if (!(pinst
->flags
& PADATA_INIT
))
120 if (!cpumask_test_cpu(cb_cpu
, pd
->cpumask
.cbcpu
))
124 if ((pinst
->flags
& PADATA_RESET
))
127 if (atomic_read(&pd
->refcnt
) >= MAX_OBJ_NUM
)
131 atomic_inc(&pd
->refcnt
);
133 padata
->cb_cpu
= cb_cpu
;
135 if (unlikely(atomic_read(&pd
->seq_nr
) == pd
->max_seq_nr
))
136 atomic_set(&pd
->seq_nr
, -1);
138 padata
->seq_nr
= atomic_inc_return(&pd
->seq_nr
);
140 target_cpu
= padata_cpu_hash(padata
);
141 queue
= per_cpu_ptr(pd
->pqueue
, target_cpu
);
143 spin_lock(&queue
->parallel
.lock
);
144 list_add_tail(&padata
->list
, &queue
->parallel
.list
);
145 spin_unlock(&queue
->parallel
.lock
);
147 queue_work_on(target_cpu
, pinst
->wq
, &queue
->work
);
150 rcu_read_unlock_bh();
154 EXPORT_SYMBOL(padata_do_parallel
);
157 * padata_get_next - Get the next object that needs serialization.
161 * A pointer to the control struct of the next object that needs
162 * serialization, if present in one of the percpu reorder queues.
164 * NULL, if all percpu reorder queues are empty.
166 * -EINPROGRESS, if the next object that needs serialization will
167 * be parallel processed by another cpu and is not yet present in
168 * the cpu's reorder queue.
170 * -ENODATA, if this cpu has to do the parallel processing for
173 static struct padata_priv
*padata_get_next(struct parallel_data
*pd
)
176 int next_nr
, next_index
;
177 struct padata_parallel_queue
*queue
, *next_queue
;
178 struct padata_priv
*padata
;
179 struct padata_list
*reorder
;
181 num_cpus
= cpumask_weight(pd
->cpumask
.pcpu
);
184 * Calculate the percpu reorder queue and the sequence
185 * number of the next object.
187 next_nr
= pd
->processed
;
188 next_index
= next_nr
% num_cpus
;
189 cpu
= padata_index_to_cpu(pd
, next_index
);
190 next_queue
= per_cpu_ptr(pd
->pqueue
, cpu
);
192 if (unlikely(next_nr
> pd
->max_seq_nr
)) {
193 next_nr
= next_nr
- pd
->max_seq_nr
- 1;
194 next_index
= next_nr
% num_cpus
;
195 cpu
= padata_index_to_cpu(pd
, next_index
);
196 next_queue
= per_cpu_ptr(pd
->pqueue
, cpu
);
202 reorder
= &next_queue
->reorder
;
204 if (!list_empty(&reorder
->list
)) {
205 padata
= list_entry(reorder
->list
.next
,
206 struct padata_priv
, list
);
208 BUG_ON(next_nr
!= padata
->seq_nr
);
210 spin_lock(&reorder
->lock
);
211 list_del_init(&padata
->list
);
212 atomic_dec(&pd
->reorder_objects
);
213 spin_unlock(&reorder
->lock
);
220 queue
= per_cpu_ptr(pd
->pqueue
, smp_processor_id());
221 if (queue
->cpu_index
== next_queue
->cpu_index
) {
222 padata
= ERR_PTR(-ENODATA
);
226 padata
= ERR_PTR(-EINPROGRESS
);
231 static void padata_reorder(struct parallel_data
*pd
)
233 struct padata_priv
*padata
;
234 struct padata_serial_queue
*squeue
;
235 struct padata_instance
*pinst
= pd
->pinst
;
238 * We need to ensure that only one cpu can work on dequeueing of
239 * the reorder queue the time. Calculating in which percpu reorder
240 * queue the next object will arrive takes some time. A spinlock
241 * would be highly contended. Also it is not clear in which order
242 * the objects arrive to the reorder queues. So a cpu could wait to
243 * get the lock just to notice that there is nothing to do at the
244 * moment. Therefore we use a trylock and let the holder of the lock
245 * care for all the objects enqueued during the holdtime of the lock.
247 if (!spin_trylock_bh(&pd
->lock
))
251 padata
= padata_get_next(pd
);
254 * All reorder queues are empty, or the next object that needs
255 * serialization is parallel processed by another cpu and is
256 * still on it's way to the cpu's reorder queue, nothing to
259 if (!padata
|| PTR_ERR(padata
) == -EINPROGRESS
)
263 * This cpu has to do the parallel processing of the next
264 * object. It's waiting in the cpu's parallelization queue,
265 * so exit imediately.
267 if (PTR_ERR(padata
) == -ENODATA
) {
268 del_timer(&pd
->timer
);
269 spin_unlock_bh(&pd
->lock
);
273 squeue
= per_cpu_ptr(pd
->squeue
, padata
->cb_cpu
);
275 spin_lock(&squeue
->serial
.lock
);
276 list_add_tail(&padata
->list
, &squeue
->serial
.list
);
277 spin_unlock(&squeue
->serial
.lock
);
279 queue_work_on(padata
->cb_cpu
, pinst
->wq
, &squeue
->work
);
282 spin_unlock_bh(&pd
->lock
);
285 * The next object that needs serialization might have arrived to
286 * the reorder queues in the meantime, we will be called again
287 * from the timer function if noone else cares for it.
289 if (atomic_read(&pd
->reorder_objects
)
290 && !(pinst
->flags
& PADATA_RESET
))
291 mod_timer(&pd
->timer
, jiffies
+ HZ
);
293 del_timer(&pd
->timer
);
298 static void padata_reorder_timer(unsigned long arg
)
300 struct parallel_data
*pd
= (struct parallel_data
*)arg
;
305 static void padata_serial_worker(struct work_struct
*serial_work
)
307 struct padata_serial_queue
*squeue
;
308 struct parallel_data
*pd
;
309 LIST_HEAD(local_list
);
312 squeue
= container_of(serial_work
, struct padata_serial_queue
, work
);
315 spin_lock(&squeue
->serial
.lock
);
316 list_replace_init(&squeue
->serial
.list
, &local_list
);
317 spin_unlock(&squeue
->serial
.lock
);
319 while (!list_empty(&local_list
)) {
320 struct padata_priv
*padata
;
322 padata
= list_entry(local_list
.next
,
323 struct padata_priv
, list
);
325 list_del_init(&padata
->list
);
327 padata
->serial(padata
);
328 atomic_dec(&pd
->refcnt
);
334 * padata_do_serial - padata serialization function
336 * @padata: object to be serialized.
338 * padata_do_serial must be called for every parallelized object.
339 * The serialization callback function will run with BHs off.
341 void padata_do_serial(struct padata_priv
*padata
)
344 struct padata_parallel_queue
*pqueue
;
345 struct parallel_data
*pd
;
350 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
352 spin_lock(&pqueue
->reorder
.lock
);
353 atomic_inc(&pd
->reorder_objects
);
354 list_add_tail(&padata
->list
, &pqueue
->reorder
.list
);
355 spin_unlock(&pqueue
->reorder
.lock
);
361 EXPORT_SYMBOL(padata_do_serial
);
363 static int padata_setup_cpumasks(struct parallel_data
*pd
,
364 const struct cpumask
*pcpumask
,
365 const struct cpumask
*cbcpumask
)
367 if (!alloc_cpumask_var(&pd
->cpumask
.pcpu
, GFP_KERNEL
))
370 cpumask_and(pd
->cpumask
.pcpu
, pcpumask
, cpu_active_mask
);
371 if (!alloc_cpumask_var(&pd
->cpumask
.cbcpu
, GFP_KERNEL
)) {
372 free_cpumask_var(pd
->cpumask
.cbcpu
);
376 cpumask_and(pd
->cpumask
.cbcpu
, cbcpumask
, cpu_active_mask
);
380 static void __padata_list_init(struct padata_list
*pd_list
)
382 INIT_LIST_HEAD(&pd_list
->list
);
383 spin_lock_init(&pd_list
->lock
);
386 /* Initialize all percpu queues used by serial workers */
387 static void padata_init_squeues(struct parallel_data
*pd
)
390 struct padata_serial_queue
*squeue
;
392 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
393 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
395 __padata_list_init(&squeue
->serial
);
396 INIT_WORK(&squeue
->work
, padata_serial_worker
);
400 /* Initialize all percpu queues used by parallel workers */
401 static void padata_init_pqueues(struct parallel_data
*pd
)
403 int cpu_index
, num_cpus
, cpu
;
404 struct padata_parallel_queue
*pqueue
;
407 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
408 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
410 pqueue
->cpu_index
= cpu_index
;
412 __padata_list_init(&pqueue
->reorder
);
413 __padata_list_init(&pqueue
->parallel
);
414 INIT_WORK(&pqueue
->work
, padata_parallel_worker
);
415 atomic_set(&pqueue
->num_obj
, 0);
418 num_cpus
= cpumask_weight(pd
->cpumask
.pcpu
);
419 pd
->max_seq_nr
= (MAX_SEQ_NR
/ num_cpus
) * num_cpus
- 1;
422 /* Allocate and initialize the internal cpumask dependend resources. */
423 static struct parallel_data
*padata_alloc_pd(struct padata_instance
*pinst
,
424 const struct cpumask
*pcpumask
,
425 const struct cpumask
*cbcpumask
)
427 struct parallel_data
*pd
;
429 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
433 pd
->pqueue
= alloc_percpu(struct padata_parallel_queue
);
437 pd
->squeue
= alloc_percpu(struct padata_serial_queue
);
439 goto err_free_pqueue
;
440 if (padata_setup_cpumasks(pd
, pcpumask
, cbcpumask
) < 0)
441 goto err_free_squeue
;
443 padata_init_pqueues(pd
);
444 padata_init_squeues(pd
);
445 setup_timer(&pd
->timer
, padata_reorder_timer
, (unsigned long)pd
);
446 atomic_set(&pd
->seq_nr
, -1);
447 atomic_set(&pd
->reorder_objects
, 0);
448 atomic_set(&pd
->refcnt
, 0);
450 spin_lock_init(&pd
->lock
);
455 free_percpu(pd
->squeue
);
457 free_percpu(pd
->pqueue
);
464 static void padata_free_pd(struct parallel_data
*pd
)
466 free_cpumask_var(pd
->cpumask
.pcpu
);
467 free_cpumask_var(pd
->cpumask
.cbcpu
);
468 free_percpu(pd
->pqueue
);
469 free_percpu(pd
->squeue
);
473 /* Flush all objects out of the padata queues. */
474 static void padata_flush_queues(struct parallel_data
*pd
)
477 struct padata_parallel_queue
*pqueue
;
478 struct padata_serial_queue
*squeue
;
480 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
481 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
482 flush_work(&pqueue
->work
);
485 del_timer_sync(&pd
->timer
);
487 if (atomic_read(&pd
->reorder_objects
))
490 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
491 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
492 flush_work(&squeue
->work
);
495 BUG_ON(atomic_read(&pd
->refcnt
) != 0);
498 static void __padata_start(struct padata_instance
*pinst
)
500 pinst
->flags
|= PADATA_INIT
;
503 static void __padata_stop(struct padata_instance
*pinst
)
505 if (!(pinst
->flags
& PADATA_INIT
))
508 pinst
->flags
&= ~PADATA_INIT
;
513 padata_flush_queues(pinst
->pd
);
517 /* Replace the internal control stucture with a new one. */
518 static void padata_replace(struct padata_instance
*pinst
,
519 struct parallel_data
*pd_new
)
521 struct parallel_data
*pd_old
= pinst
->pd
;
522 int notification_mask
= 0;
524 pinst
->flags
|= PADATA_RESET
;
526 rcu_assign_pointer(pinst
->pd
, pd_new
);
532 padata_flush_queues(pd_old
);
533 if (!cpumask_equal(pd_old
->cpumask
.pcpu
, pd_new
->cpumask
.pcpu
))
534 notification_mask
|= PADATA_CPU_PARALLEL
;
535 if (!cpumask_equal(pd_old
->cpumask
.cbcpu
, pd_new
->cpumask
.cbcpu
))
536 notification_mask
|= PADATA_CPU_SERIAL
;
538 padata_free_pd(pd_old
);
539 if (notification_mask
)
540 blocking_notifier_call_chain(&pinst
->cpumask_change_notifier
,
541 notification_mask
, pinst
);
544 pinst
->flags
&= ~PADATA_RESET
;
548 * padata_register_cpumask_notifier - Registers a notifier that will be called
549 * if either pcpu or cbcpu or both cpumasks change.
551 * @pinst: A poineter to padata instance
552 * @nblock: A pointer to notifier block.
554 int padata_register_cpumask_notifier(struct padata_instance
*pinst
,
555 struct notifier_block
*nblock
)
557 return blocking_notifier_chain_register(&pinst
->cpumask_change_notifier
,
560 EXPORT_SYMBOL(padata_register_cpumask_notifier
);
563 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
564 * registered earlier using padata_register_cpumask_notifier
566 * @pinst: A pointer to data instance.
567 * @nlock: A pointer to notifier block.
569 int padata_unregister_cpumask_notifier(struct padata_instance
*pinst
,
570 struct notifier_block
*nblock
)
572 return blocking_notifier_chain_unregister(
573 &pinst
->cpumask_change_notifier
,
576 EXPORT_SYMBOL(padata_unregister_cpumask_notifier
);
579 /* If cpumask contains no active cpu, we mark the instance as invalid. */
580 static bool padata_validate_cpumask(struct padata_instance
*pinst
,
581 const struct cpumask
*cpumask
)
583 if (!cpumask_intersects(cpumask
, cpu_active_mask
)) {
584 pinst
->flags
|= PADATA_INVALID
;
588 pinst
->flags
&= ~PADATA_INVALID
;
593 * padata_get_cpumask: Fetch serial or parallel cpumask from the
594 * given padata instance and copy it to @out_mask
596 * @pinst: A pointer to padata instance
597 * @cpumask_type: Specifies which cpumask will be copied.
598 * Possible values are PADATA_CPU_SERIAL *or* PADATA_CPU_PARALLEL
599 * corresponding to serial and parallel cpumask respectively.
600 * @out_mask: A pointer to cpumask structure where selected
601 * cpumask will be copied.
603 int padata_get_cpumask(struct padata_instance
*pinst
,
604 int cpumask_type
, struct cpumask
*out_mask
)
606 struct parallel_data
*pd
;
610 pd
= rcu_dereference(pinst
->pd
);
611 switch (cpumask_type
) {
612 case PADATA_CPU_SERIAL
:
613 cpumask_copy(out_mask
, pd
->cpumask
.cbcpu
);
615 case PADATA_CPU_PARALLEL
:
616 cpumask_copy(out_mask
, pd
->cpumask
.pcpu
);
622 rcu_read_unlock_bh();
625 EXPORT_SYMBOL(padata_get_cpumask
);
628 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
629 * equivalent to @cpumask.
631 * @pinst: padata instance
632 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
633 * to parallel and serial cpumasks respectively.
634 * @cpumask: the cpumask to use
636 int padata_set_cpumask(struct padata_instance
*pinst
, int cpumask_type
,
637 cpumask_var_t cpumask
)
639 struct cpumask
*serial_mask
, *parallel_mask
;
641 switch (cpumask_type
) {
642 case PADATA_CPU_PARALLEL
:
643 serial_mask
= pinst
->cpumask
.cbcpu
;
644 parallel_mask
= cpumask
;
646 case PADATA_CPU_SERIAL
:
647 parallel_mask
= pinst
->cpumask
.pcpu
;
648 serial_mask
= cpumask
;
654 return __padata_set_cpumasks(pinst
, parallel_mask
, serial_mask
);
656 EXPORT_SYMBOL(padata_set_cpumask
);
659 * __padata_set_cpumasks - Set both parallel and serial cpumasks. The first
660 * one is used by parallel workers and the second one
661 * by the wokers doing serialization.
663 * @pinst: padata instance
664 * @pcpumask: the cpumask to use for parallel workers
665 * @cbcpumask: the cpumsak to use for serial workers
667 int __padata_set_cpumasks(struct padata_instance
*pinst
,
668 cpumask_var_t pcpumask
, cpumask_var_t cbcpumask
)
672 struct parallel_data
*pd
= NULL
;
674 mutex_lock(&pinst
->lock
);
676 valid
= padata_validate_cpumask(pinst
, pcpumask
);
678 __padata_stop(pinst
);
682 valid
= padata_validate_cpumask(pinst
, cbcpumask
);
684 __padata_stop(pinst
);
690 pd
= padata_alloc_pd(pinst
, pcpumask
, cbcpumask
);
697 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
698 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
700 padata_replace(pinst
, pd
);
703 __padata_start(pinst
);
708 mutex_unlock(&pinst
->lock
);
713 EXPORT_SYMBOL(__padata_set_cpumasks
);
715 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
717 struct parallel_data
*pd
;
719 if (cpumask_test_cpu(cpu
, cpu_active_mask
)) {
720 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
.pcpu
,
721 pinst
->cpumask
.cbcpu
);
725 padata_replace(pinst
, pd
);
727 if (padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) &&
728 padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
729 __padata_start(pinst
);
736 * padata_add_cpu - add a cpu to one or both(parallel and serial)
739 * @pinst: padata instance
741 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
742 * The @mask may be any combination of the following flags:
743 * PADATA_CPU_SERIAL - serial cpumask
744 * PADATA_CPU_PARALLEL - parallel cpumask
747 int padata_add_cpu(struct padata_instance
*pinst
, int cpu
, int mask
)
751 if (!(mask
& (PADATA_CPU_SERIAL
| PADATA_CPU_PARALLEL
)))
754 mutex_lock(&pinst
->lock
);
757 if (mask
& PADATA_CPU_SERIAL
)
758 cpumask_set_cpu(cpu
, pinst
->cpumask
.cbcpu
);
759 if (mask
& PADATA_CPU_PARALLEL
)
760 cpumask_set_cpu(cpu
, pinst
->cpumask
.pcpu
);
762 err
= __padata_add_cpu(pinst
, cpu
);
765 mutex_unlock(&pinst
->lock
);
769 EXPORT_SYMBOL(padata_add_cpu
);
771 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
773 struct parallel_data
*pd
= NULL
;
775 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
777 if (!padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) ||
778 !padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
)) {
779 __padata_stop(pinst
);
780 padata_replace(pinst
, pd
);
784 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
.pcpu
,
785 pinst
->cpumask
.cbcpu
);
789 padata_replace(pinst
, pd
);
797 * padata_remove_cpu - remove a cpu from the one or both(serial and paralell)
800 * @pinst: padata instance
801 * @cpu: cpu to remove
802 * @mask: bitmask specifying from which cpumask @cpu should be removed
803 * The @mask may be any combination of the following flags:
804 * PADATA_CPU_SERIAL - serial cpumask
805 * PADATA_CPU_PARALLEL - parallel cpumask
807 int padata_remove_cpu(struct padata_instance
*pinst
, int cpu
, int mask
)
811 if (!(mask
& (PADATA_CPU_SERIAL
| PADATA_CPU_PARALLEL
)))
814 mutex_lock(&pinst
->lock
);
817 if (mask
& PADATA_CPU_SERIAL
)
818 cpumask_clear_cpu(cpu
, pinst
->cpumask
.cbcpu
);
819 if (mask
& PADATA_CPU_PARALLEL
)
820 cpumask_clear_cpu(cpu
, pinst
->cpumask
.pcpu
);
822 err
= __padata_remove_cpu(pinst
, cpu
);
825 mutex_unlock(&pinst
->lock
);
829 EXPORT_SYMBOL(padata_remove_cpu
);
832 * padata_start - start the parallel processing
834 * @pinst: padata instance to start
836 int padata_start(struct padata_instance
*pinst
)
840 mutex_lock(&pinst
->lock
);
842 if (pinst
->flags
& PADATA_INVALID
)
845 __padata_start(pinst
);
847 mutex_unlock(&pinst
->lock
);
851 EXPORT_SYMBOL(padata_start
);
854 * padata_stop - stop the parallel processing
856 * @pinst: padata instance to stop
858 void padata_stop(struct padata_instance
*pinst
)
860 mutex_lock(&pinst
->lock
);
861 __padata_stop(pinst
);
862 mutex_unlock(&pinst
->lock
);
864 EXPORT_SYMBOL(padata_stop
);
866 #ifdef CONFIG_HOTPLUG_CPU
868 static inline int pinst_has_cpu(struct padata_instance
*pinst
, int cpu
)
870 return cpumask_test_cpu(cpu
, pinst
->cpumask
.pcpu
) ||
871 cpumask_test_cpu(cpu
, pinst
->cpumask
.cbcpu
);
875 static int padata_cpu_callback(struct notifier_block
*nfb
,
876 unsigned long action
, void *hcpu
)
879 struct padata_instance
*pinst
;
880 int cpu
= (unsigned long)hcpu
;
882 pinst
= container_of(nfb
, struct padata_instance
, cpu_notifier
);
886 case CPU_ONLINE_FROZEN
:
887 if (!pinst_has_cpu(pinst
, cpu
))
889 mutex_lock(&pinst
->lock
);
890 err
= __padata_add_cpu(pinst
, cpu
);
891 mutex_unlock(&pinst
->lock
);
896 case CPU_DOWN_PREPARE
:
897 case CPU_DOWN_PREPARE_FROZEN
:
898 if (!pinst_has_cpu(pinst
, cpu
))
900 mutex_lock(&pinst
->lock
);
901 err
= __padata_remove_cpu(pinst
, cpu
);
902 mutex_unlock(&pinst
->lock
);
907 case CPU_UP_CANCELED
:
908 case CPU_UP_CANCELED_FROZEN
:
909 if (!pinst_has_cpu(pinst
, cpu
))
911 mutex_lock(&pinst
->lock
);
912 __padata_remove_cpu(pinst
, cpu
);
913 mutex_unlock(&pinst
->lock
);
915 case CPU_DOWN_FAILED
:
916 case CPU_DOWN_FAILED_FROZEN
:
917 if (!pinst_has_cpu(pinst
, cpu
))
919 mutex_lock(&pinst
->lock
);
920 __padata_add_cpu(pinst
, cpu
);
921 mutex_unlock(&pinst
->lock
);
928 static void __padata_free(struct padata_instance
*pinst
)
930 #ifdef CONFIG_HOTPLUG_CPU
931 unregister_hotcpu_notifier(&pinst
->cpu_notifier
);
935 padata_free_pd(pinst
->pd
);
936 free_cpumask_var(pinst
->cpumask
.pcpu
);
937 free_cpumask_var(pinst
->cpumask
.cbcpu
);
941 #define kobj2pinst(_kobj) \
942 container_of(_kobj, struct padata_instance, kobj)
943 #define attr2pentry(_attr) \
944 container_of(_attr, struct padata_sysfs_entry, attr)
946 static void padata_sysfs_release(struct kobject
*kobj
)
948 struct padata_instance
*pinst
= kobj2pinst(kobj
);
949 __padata_free(pinst
);
952 struct padata_sysfs_entry
{
953 struct attribute attr
;
954 ssize_t (*show
)(struct padata_instance
*, struct attribute
*, char *);
955 ssize_t (*store
)(struct padata_instance
*, struct attribute
*,
956 const char *, size_t);
959 static ssize_t
show_cpumask(struct padata_instance
*pinst
,
960 struct attribute
*attr
, char *buf
)
962 struct cpumask
*cpumask
;
965 mutex_lock(&pinst
->lock
);
966 if (!strcmp(attr
->name
, "serial_cpumask"))
967 cpumask
= pinst
->cpumask
.cbcpu
;
969 cpumask
= pinst
->cpumask
.pcpu
;
971 len
= bitmap_scnprintf(buf
, PAGE_SIZE
, cpumask_bits(cpumask
),
973 if (PAGE_SIZE
- len
< 2)
976 len
+= sprintf(buf
+ len
, "\n");
978 mutex_unlock(&pinst
->lock
);
982 static ssize_t
store_cpumask(struct padata_instance
*pinst
,
983 struct attribute
*attr
,
984 const char *buf
, size_t count
)
986 cpumask_var_t new_cpumask
;
990 if (!alloc_cpumask_var(&new_cpumask
, GFP_KERNEL
))
993 ret
= bitmap_parse(buf
, count
, cpumask_bits(new_cpumask
),
998 mask_type
= !strcmp(attr
->name
, "serial_cpumask") ?
999 PADATA_CPU_SERIAL
: PADATA_CPU_PARALLEL
;
1000 ret
= padata_set_cpumask(pinst
, mask_type
, new_cpumask
);
1005 free_cpumask_var(new_cpumask
);
1009 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
1010 static struct padata_sysfs_entry _name##_attr = \
1011 __ATTR(_name, 0644, _show_name, _store_name)
1012 #define PADATA_ATTR_RO(_name, _show_name) \
1013 static struct padata_sysfs_entry _name##_attr = \
1014 __ATTR(_name, 0400, _show_name, NULL)
1016 PADATA_ATTR_RW(serial_cpumask
, show_cpumask
, store_cpumask
);
1017 PADATA_ATTR_RW(parallel_cpumask
, show_cpumask
, store_cpumask
);
1020 * Padata sysfs provides the following objects:
1021 * serial_cpumask [RW] - cpumask for serial workers
1022 * parallel_cpumask [RW] - cpumask for parallel workers
1024 static struct attribute
*padata_default_attrs
[] = {
1025 &serial_cpumask_attr
.attr
,
1026 ¶llel_cpumask_attr
.attr
,
1030 static ssize_t
padata_sysfs_show(struct kobject
*kobj
,
1031 struct attribute
*attr
, char *buf
)
1033 struct padata_instance
*pinst
;
1034 struct padata_sysfs_entry
*pentry
;
1037 pinst
= kobj2pinst(kobj
);
1038 pentry
= attr2pentry(attr
);
1040 ret
= pentry
->show(pinst
, attr
, buf
);
1045 static ssize_t
padata_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
1046 const char *buf
, size_t count
)
1048 struct padata_instance
*pinst
;
1049 struct padata_sysfs_entry
*pentry
;
1052 pinst
= kobj2pinst(kobj
);
1053 pentry
= attr2pentry(attr
);
1055 ret
= pentry
->store(pinst
, attr
, buf
, count
);
1060 static const struct sysfs_ops padata_sysfs_ops
= {
1061 .show
= padata_sysfs_show
,
1062 .store
= padata_sysfs_store
,
1065 static struct kobj_type padata_attr_type
= {
1066 .sysfs_ops
= &padata_sysfs_ops
,
1067 .default_attrs
= padata_default_attrs
,
1068 .release
= padata_sysfs_release
,
1072 * padata_alloc - Allocate and initialize padata instance.
1073 * Use default cpumask(cpu_possible_mask)
1074 * for serial and parallel workes.
1076 * @wq: workqueue to use for the allocated padata instance
1078 struct padata_instance
*padata_alloc(struct workqueue_struct
*wq
)
1080 return __padata_alloc(wq
, cpu_possible_mask
, cpu_possible_mask
);
1082 EXPORT_SYMBOL(padata_alloc
);
1085 * __padata_alloc - allocate and initialize a padata instance
1086 * and specify cpumasks for serial and parallel workers.
1088 * @wq: workqueue to use for the allocated padata instance
1089 * @pcpumask: cpumask that will be used for padata parallelization
1090 * @cbcpumask: cpumask that will be used for padata serialization
1092 struct padata_instance
*__padata_alloc(struct workqueue_struct
*wq
,
1093 const struct cpumask
*pcpumask
,
1094 const struct cpumask
*cbcpumask
)
1096 struct padata_instance
*pinst
;
1097 struct parallel_data
*pd
= NULL
;
1099 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
1104 if (!alloc_cpumask_var(&pinst
->cpumask
.pcpu
, GFP_KERNEL
))
1106 if (!alloc_cpumask_var(&pinst
->cpumask
.cbcpu
, GFP_KERNEL
)) {
1107 free_cpumask_var(pinst
->cpumask
.pcpu
);
1110 if (!padata_validate_cpumask(pinst
, pcpumask
) ||
1111 !padata_validate_cpumask(pinst
, cbcpumask
))
1112 goto err_free_masks
;
1114 pd
= padata_alloc_pd(pinst
, pcpumask
, cbcpumask
);
1116 goto err_free_masks
;
1118 rcu_assign_pointer(pinst
->pd
, pd
);
1122 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
1123 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
1127 #ifdef CONFIG_HOTPLUG_CPU
1128 pinst
->cpu_notifier
.notifier_call
= padata_cpu_callback
;
1129 pinst
->cpu_notifier
.priority
= 0;
1130 register_hotcpu_notifier(&pinst
->cpu_notifier
);
1135 BLOCKING_INIT_NOTIFIER_HEAD(&pinst
->cpumask_change_notifier
);
1136 kobject_init(&pinst
->kobj
, &padata_attr_type
);
1137 mutex_init(&pinst
->lock
);
1142 free_cpumask_var(pinst
->cpumask
.pcpu
);
1143 free_cpumask_var(pinst
->cpumask
.cbcpu
);
1150 EXPORT_SYMBOL(__padata_alloc
);
1153 * padata_free - free a padata instance
1155 * @padata_inst: padata instance to free
1157 void padata_free(struct padata_instance
*pinst
)
1159 kobject_put(&pinst
->kobj
);
1161 EXPORT_SYMBOL(padata_free
);