2 * padata.c - generic interface to process data streams in parallel
4 * See Documentation/padata.txt for an api documentation.
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/export.h>
24 #include <linux/cpumask.h>
25 #include <linux/err.h>
26 #include <linux/cpu.h>
27 #include <linux/padata.h>
28 #include <linux/mutex.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/sysfs.h>
32 #include <linux/rcupdate.h>
33 #include <linux/module.h>
35 #define MAX_OBJ_NUM 1000
37 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
41 target_cpu
= cpumask_first(pd
->cpumask
.pcpu
);
42 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
43 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
.pcpu
);
48 static int padata_cpu_hash(struct parallel_data
*pd
)
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
58 seq_nr
= atomic_inc_return(&pd
->seq_nr
);
59 cpu_index
= seq_nr
% cpumask_weight(pd
->cpumask
.pcpu
);
61 return padata_index_to_cpu(pd
, cpu_index
);
64 static void padata_parallel_worker(struct work_struct
*parallel_work
)
66 struct padata_parallel_queue
*pqueue
;
67 LIST_HEAD(local_list
);
70 pqueue
= container_of(parallel_work
,
71 struct padata_parallel_queue
, work
);
73 spin_lock(&pqueue
->parallel
.lock
);
74 list_replace_init(&pqueue
->parallel
.list
, &local_list
);
75 spin_unlock(&pqueue
->parallel
.lock
);
77 while (!list_empty(&local_list
)) {
78 struct padata_priv
*padata
;
80 padata
= list_entry(local_list
.next
,
81 struct padata_priv
, list
);
83 list_del_init(&padata
->list
);
85 padata
->parallel(padata
);
92 * padata_do_parallel - padata parallelization function
94 * @pinst: padata instance
95 * @padata: object to be parallelized
96 * @cb_cpu: cpu the serialization callback function will run on,
97 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
103 int padata_do_parallel(struct padata_instance
*pinst
,
104 struct padata_priv
*padata
, int cb_cpu
)
107 struct padata_parallel_queue
*queue
;
108 struct parallel_data
*pd
;
112 pd
= rcu_dereference_bh(pinst
->pd
);
115 if (!(pinst
->flags
& PADATA_INIT
) || pinst
->flags
& PADATA_INVALID
)
118 if (!cpumask_test_cpu(cb_cpu
, pd
->cpumask
.cbcpu
))
122 if ((pinst
->flags
& PADATA_RESET
))
125 if (atomic_read(&pd
->refcnt
) >= MAX_OBJ_NUM
)
129 atomic_inc(&pd
->refcnt
);
131 padata
->cb_cpu
= cb_cpu
;
133 target_cpu
= padata_cpu_hash(pd
);
134 queue
= per_cpu_ptr(pd
->pqueue
, target_cpu
);
136 spin_lock(&queue
->parallel
.lock
);
137 list_add_tail(&padata
->list
, &queue
->parallel
.list
);
138 spin_unlock(&queue
->parallel
.lock
);
140 queue_work_on(target_cpu
, pinst
->wq
, &queue
->work
);
143 rcu_read_unlock_bh();
147 EXPORT_SYMBOL(padata_do_parallel
);
150 * padata_get_next - Get the next object that needs serialization.
154 * A pointer to the control struct of the next object that needs
155 * serialization, if present in one of the percpu reorder queues.
157 * NULL, if all percpu reorder queues are empty.
159 * -EINPROGRESS, if the next object that needs serialization will
160 * be parallel processed by another cpu and is not yet present in
161 * the cpu's reorder queue.
163 * -ENODATA, if this cpu has to do the parallel processing for
166 static struct padata_priv
*padata_get_next(struct parallel_data
*pd
)
169 unsigned int next_nr
, next_index
;
170 struct padata_parallel_queue
*next_queue
;
171 struct padata_priv
*padata
;
172 struct padata_list
*reorder
;
174 num_cpus
= cpumask_weight(pd
->cpumask
.pcpu
);
177 * Calculate the percpu reorder queue and the sequence
178 * number of the next object.
180 next_nr
= pd
->processed
;
181 next_index
= next_nr
% num_cpus
;
182 cpu
= padata_index_to_cpu(pd
, next_index
);
183 next_queue
= per_cpu_ptr(pd
->pqueue
, cpu
);
187 reorder
= &next_queue
->reorder
;
189 if (!list_empty(&reorder
->list
)) {
190 padata
= list_entry(reorder
->list
.next
,
191 struct padata_priv
, list
);
193 spin_lock(&reorder
->lock
);
194 list_del_init(&padata
->list
);
195 atomic_dec(&pd
->reorder_objects
);
196 spin_unlock(&reorder
->lock
);
203 if (__this_cpu_read(pd
->pqueue
->cpu_index
) == next_queue
->cpu_index
) {
204 padata
= ERR_PTR(-ENODATA
);
208 padata
= ERR_PTR(-EINPROGRESS
);
213 static void padata_reorder(struct parallel_data
*pd
)
216 struct padata_priv
*padata
;
217 struct padata_serial_queue
*squeue
;
218 struct padata_instance
*pinst
= pd
->pinst
;
221 * We need to ensure that only one cpu can work on dequeueing of
222 * the reorder queue the time. Calculating in which percpu reorder
223 * queue the next object will arrive takes some time. A spinlock
224 * would be highly contended. Also it is not clear in which order
225 * the objects arrive to the reorder queues. So a cpu could wait to
226 * get the lock just to notice that there is nothing to do at the
227 * moment. Therefore we use a trylock and let the holder of the lock
228 * care for all the objects enqueued during the holdtime of the lock.
230 if (!spin_trylock_bh(&pd
->lock
))
234 padata
= padata_get_next(pd
);
237 * All reorder queues are empty, or the next object that needs
238 * serialization is parallel processed by another cpu and is
239 * still on it's way to the cpu's reorder queue, nothing to
242 if (!padata
|| PTR_ERR(padata
) == -EINPROGRESS
)
246 * This cpu has to do the parallel processing of the next
247 * object. It's waiting in the cpu's parallelization queue,
248 * so exit immediately.
250 if (PTR_ERR(padata
) == -ENODATA
) {
251 del_timer(&pd
->timer
);
252 spin_unlock_bh(&pd
->lock
);
256 cb_cpu
= padata
->cb_cpu
;
257 squeue
= per_cpu_ptr(pd
->squeue
, cb_cpu
);
259 spin_lock(&squeue
->serial
.lock
);
260 list_add_tail(&padata
->list
, &squeue
->serial
.list
);
261 spin_unlock(&squeue
->serial
.lock
);
263 queue_work_on(cb_cpu
, pinst
->wq
, &squeue
->work
);
266 spin_unlock_bh(&pd
->lock
);
269 * The next object that needs serialization might have arrived to
270 * the reorder queues in the meantime, we will be called again
271 * from the timer function if no one else cares for it.
273 if (atomic_read(&pd
->reorder_objects
)
274 && !(pinst
->flags
& PADATA_RESET
))
275 mod_timer(&pd
->timer
, jiffies
+ HZ
);
277 del_timer(&pd
->timer
);
282 static void padata_reorder_timer(unsigned long arg
)
284 struct parallel_data
*pd
= (struct parallel_data
*)arg
;
289 static void padata_serial_worker(struct work_struct
*serial_work
)
291 struct padata_serial_queue
*squeue
;
292 struct parallel_data
*pd
;
293 LIST_HEAD(local_list
);
296 squeue
= container_of(serial_work
, struct padata_serial_queue
, work
);
299 spin_lock(&squeue
->serial
.lock
);
300 list_replace_init(&squeue
->serial
.list
, &local_list
);
301 spin_unlock(&squeue
->serial
.lock
);
303 while (!list_empty(&local_list
)) {
304 struct padata_priv
*padata
;
306 padata
= list_entry(local_list
.next
,
307 struct padata_priv
, list
);
309 list_del_init(&padata
->list
);
311 padata
->serial(padata
);
312 atomic_dec(&pd
->refcnt
);
318 * padata_do_serial - padata serialization function
320 * @padata: object to be serialized.
322 * padata_do_serial must be called for every parallelized object.
323 * The serialization callback function will run with BHs off.
325 void padata_do_serial(struct padata_priv
*padata
)
328 struct padata_parallel_queue
*pqueue
;
329 struct parallel_data
*pd
;
334 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
336 spin_lock(&pqueue
->reorder
.lock
);
337 atomic_inc(&pd
->reorder_objects
);
338 list_add_tail(&padata
->list
, &pqueue
->reorder
.list
);
339 spin_unlock(&pqueue
->reorder
.lock
);
345 EXPORT_SYMBOL(padata_do_serial
);
347 static int padata_setup_cpumasks(struct parallel_data
*pd
,
348 const struct cpumask
*pcpumask
,
349 const struct cpumask
*cbcpumask
)
351 if (!alloc_cpumask_var(&pd
->cpumask
.pcpu
, GFP_KERNEL
))
354 cpumask_and(pd
->cpumask
.pcpu
, pcpumask
, cpu_online_mask
);
355 if (!alloc_cpumask_var(&pd
->cpumask
.cbcpu
, GFP_KERNEL
)) {
356 free_cpumask_var(pd
->cpumask
.cbcpu
);
360 cpumask_and(pd
->cpumask
.cbcpu
, cbcpumask
, cpu_online_mask
);
364 static void __padata_list_init(struct padata_list
*pd_list
)
366 INIT_LIST_HEAD(&pd_list
->list
);
367 spin_lock_init(&pd_list
->lock
);
370 /* Initialize all percpu queues used by serial workers */
371 static void padata_init_squeues(struct parallel_data
*pd
)
374 struct padata_serial_queue
*squeue
;
376 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
377 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
379 __padata_list_init(&squeue
->serial
);
380 INIT_WORK(&squeue
->work
, padata_serial_worker
);
384 /* Initialize all percpu queues used by parallel workers */
385 static void padata_init_pqueues(struct parallel_data
*pd
)
388 struct padata_parallel_queue
*pqueue
;
391 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
392 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
394 pqueue
->cpu_index
= cpu_index
;
397 __padata_list_init(&pqueue
->reorder
);
398 __padata_list_init(&pqueue
->parallel
);
399 INIT_WORK(&pqueue
->work
, padata_parallel_worker
);
400 atomic_set(&pqueue
->num_obj
, 0);
404 /* Allocate and initialize the internal cpumask dependend resources. */
405 static struct parallel_data
*padata_alloc_pd(struct padata_instance
*pinst
,
406 const struct cpumask
*pcpumask
,
407 const struct cpumask
*cbcpumask
)
409 struct parallel_data
*pd
;
411 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
415 pd
->pqueue
= alloc_percpu(struct padata_parallel_queue
);
419 pd
->squeue
= alloc_percpu(struct padata_serial_queue
);
421 goto err_free_pqueue
;
422 if (padata_setup_cpumasks(pd
, pcpumask
, cbcpumask
) < 0)
423 goto err_free_squeue
;
425 padata_init_pqueues(pd
);
426 padata_init_squeues(pd
);
427 setup_timer(&pd
->timer
, padata_reorder_timer
, (unsigned long)pd
);
428 atomic_set(&pd
->seq_nr
, -1);
429 atomic_set(&pd
->reorder_objects
, 0);
430 atomic_set(&pd
->refcnt
, 0);
432 spin_lock_init(&pd
->lock
);
437 free_percpu(pd
->squeue
);
439 free_percpu(pd
->pqueue
);
446 static void padata_free_pd(struct parallel_data
*pd
)
448 free_cpumask_var(pd
->cpumask
.pcpu
);
449 free_cpumask_var(pd
->cpumask
.cbcpu
);
450 free_percpu(pd
->pqueue
);
451 free_percpu(pd
->squeue
);
455 /* Flush all objects out of the padata queues. */
456 static void padata_flush_queues(struct parallel_data
*pd
)
459 struct padata_parallel_queue
*pqueue
;
460 struct padata_serial_queue
*squeue
;
462 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
463 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
464 flush_work(&pqueue
->work
);
467 del_timer_sync(&pd
->timer
);
469 if (atomic_read(&pd
->reorder_objects
))
472 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
473 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
474 flush_work(&squeue
->work
);
477 BUG_ON(atomic_read(&pd
->refcnt
) != 0);
480 static void __padata_start(struct padata_instance
*pinst
)
482 pinst
->flags
|= PADATA_INIT
;
485 static void __padata_stop(struct padata_instance
*pinst
)
487 if (!(pinst
->flags
& PADATA_INIT
))
490 pinst
->flags
&= ~PADATA_INIT
;
495 padata_flush_queues(pinst
->pd
);
499 /* Replace the internal control structure with a new one. */
500 static void padata_replace(struct padata_instance
*pinst
,
501 struct parallel_data
*pd_new
)
503 struct parallel_data
*pd_old
= pinst
->pd
;
504 int notification_mask
= 0;
506 pinst
->flags
|= PADATA_RESET
;
508 rcu_assign_pointer(pinst
->pd
, pd_new
);
512 if (!cpumask_equal(pd_old
->cpumask
.pcpu
, pd_new
->cpumask
.pcpu
))
513 notification_mask
|= PADATA_CPU_PARALLEL
;
514 if (!cpumask_equal(pd_old
->cpumask
.cbcpu
, pd_new
->cpumask
.cbcpu
))
515 notification_mask
|= PADATA_CPU_SERIAL
;
517 padata_flush_queues(pd_old
);
518 padata_free_pd(pd_old
);
520 if (notification_mask
)
521 blocking_notifier_call_chain(&pinst
->cpumask_change_notifier
,
525 pinst
->flags
&= ~PADATA_RESET
;
529 * padata_register_cpumask_notifier - Registers a notifier that will be called
530 * if either pcpu or cbcpu or both cpumasks change.
532 * @pinst: A poineter to padata instance
533 * @nblock: A pointer to notifier block.
535 int padata_register_cpumask_notifier(struct padata_instance
*pinst
,
536 struct notifier_block
*nblock
)
538 return blocking_notifier_chain_register(&pinst
->cpumask_change_notifier
,
541 EXPORT_SYMBOL(padata_register_cpumask_notifier
);
544 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
545 * registered earlier using padata_register_cpumask_notifier
547 * @pinst: A pointer to data instance.
548 * @nlock: A pointer to notifier block.
550 int padata_unregister_cpumask_notifier(struct padata_instance
*pinst
,
551 struct notifier_block
*nblock
)
553 return blocking_notifier_chain_unregister(
554 &pinst
->cpumask_change_notifier
,
557 EXPORT_SYMBOL(padata_unregister_cpumask_notifier
);
560 /* If cpumask contains no active cpu, we mark the instance as invalid. */
561 static bool padata_validate_cpumask(struct padata_instance
*pinst
,
562 const struct cpumask
*cpumask
)
564 if (!cpumask_intersects(cpumask
, cpu_online_mask
)) {
565 pinst
->flags
|= PADATA_INVALID
;
569 pinst
->flags
&= ~PADATA_INVALID
;
573 static int __padata_set_cpumasks(struct padata_instance
*pinst
,
574 cpumask_var_t pcpumask
,
575 cpumask_var_t cbcpumask
)
578 struct parallel_data
*pd
;
580 valid
= padata_validate_cpumask(pinst
, pcpumask
);
582 __padata_stop(pinst
);
586 valid
= padata_validate_cpumask(pinst
, cbcpumask
);
588 __padata_stop(pinst
);
591 pd
= padata_alloc_pd(pinst
, pcpumask
, cbcpumask
);
595 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
596 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
598 padata_replace(pinst
, pd
);
601 __padata_start(pinst
);
607 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
608 * equivalent to @cpumask.
610 * @pinst: padata instance
611 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
612 * to parallel and serial cpumasks respectively.
613 * @cpumask: the cpumask to use
615 int padata_set_cpumask(struct padata_instance
*pinst
, int cpumask_type
,
616 cpumask_var_t cpumask
)
618 struct cpumask
*serial_mask
, *parallel_mask
;
621 mutex_lock(&pinst
->lock
);
624 switch (cpumask_type
) {
625 case PADATA_CPU_PARALLEL
:
626 serial_mask
= pinst
->cpumask
.cbcpu
;
627 parallel_mask
= cpumask
;
629 case PADATA_CPU_SERIAL
:
630 parallel_mask
= pinst
->cpumask
.pcpu
;
631 serial_mask
= cpumask
;
637 err
= __padata_set_cpumasks(pinst
, parallel_mask
, serial_mask
);
641 mutex_unlock(&pinst
->lock
);
645 EXPORT_SYMBOL(padata_set_cpumask
);
648 * padata_start - start the parallel processing
650 * @pinst: padata instance to start
652 int padata_start(struct padata_instance
*pinst
)
656 mutex_lock(&pinst
->lock
);
658 if (pinst
->flags
& PADATA_INVALID
)
661 __padata_start(pinst
);
663 mutex_unlock(&pinst
->lock
);
667 EXPORT_SYMBOL(padata_start
);
670 * padata_stop - stop the parallel processing
672 * @pinst: padata instance to stop
674 void padata_stop(struct padata_instance
*pinst
)
676 mutex_lock(&pinst
->lock
);
677 __padata_stop(pinst
);
678 mutex_unlock(&pinst
->lock
);
680 EXPORT_SYMBOL(padata_stop
);
682 #ifdef CONFIG_HOTPLUG_CPU
684 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
686 struct parallel_data
*pd
;
688 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
689 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
.pcpu
,
690 pinst
->cpumask
.cbcpu
);
694 padata_replace(pinst
, pd
);
696 if (padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) &&
697 padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
698 __padata_start(pinst
);
704 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
706 struct parallel_data
*pd
= NULL
;
708 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
710 if (!padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) ||
711 !padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
712 __padata_stop(pinst
);
714 pd
= padata_alloc_pd(pinst
, pinst
->cpumask
.pcpu
,
715 pinst
->cpumask
.cbcpu
);
719 padata_replace(pinst
, pd
);
721 cpumask_clear_cpu(cpu
, pd
->cpumask
.cbcpu
);
722 cpumask_clear_cpu(cpu
, pd
->cpumask
.pcpu
);
729 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
732 * @pinst: padata instance
733 * @cpu: cpu to remove
734 * @mask: bitmask specifying from which cpumask @cpu should be removed
735 * The @mask may be any combination of the following flags:
736 * PADATA_CPU_SERIAL - serial cpumask
737 * PADATA_CPU_PARALLEL - parallel cpumask
739 int padata_remove_cpu(struct padata_instance
*pinst
, int cpu
, int mask
)
743 if (!(mask
& (PADATA_CPU_SERIAL
| PADATA_CPU_PARALLEL
)))
746 mutex_lock(&pinst
->lock
);
749 if (mask
& PADATA_CPU_SERIAL
)
750 cpumask_clear_cpu(cpu
, pinst
->cpumask
.cbcpu
);
751 if (mask
& PADATA_CPU_PARALLEL
)
752 cpumask_clear_cpu(cpu
, pinst
->cpumask
.pcpu
);
754 err
= __padata_remove_cpu(pinst
, cpu
);
757 mutex_unlock(&pinst
->lock
);
761 EXPORT_SYMBOL(padata_remove_cpu
);
763 static inline int pinst_has_cpu(struct padata_instance
*pinst
, int cpu
)
765 return cpumask_test_cpu(cpu
, pinst
->cpumask
.pcpu
) ||
766 cpumask_test_cpu(cpu
, pinst
->cpumask
.cbcpu
);
769 static int padata_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
771 struct padata_instance
*pinst
;
774 pinst
= hlist_entry_safe(node
, struct padata_instance
, node
);
775 if (!pinst_has_cpu(pinst
, cpu
))
778 mutex_lock(&pinst
->lock
);
779 ret
= __padata_add_cpu(pinst
, cpu
);
780 mutex_unlock(&pinst
->lock
);
784 static int padata_cpu_prep_down(unsigned int cpu
, struct hlist_node
*node
)
786 struct padata_instance
*pinst
;
789 pinst
= hlist_entry_safe(node
, struct padata_instance
, node
);
790 if (!pinst_has_cpu(pinst
, cpu
))
793 mutex_lock(&pinst
->lock
);
794 ret
= __padata_remove_cpu(pinst
, cpu
);
795 mutex_unlock(&pinst
->lock
);
799 static enum cpuhp_state hp_online
;
802 static void __padata_free(struct padata_instance
*pinst
)
804 #ifdef CONFIG_HOTPLUG_CPU
805 cpuhp_state_remove_instance_nocalls(hp_online
, &pinst
->node
);
809 padata_free_pd(pinst
->pd
);
810 free_cpumask_var(pinst
->cpumask
.pcpu
);
811 free_cpumask_var(pinst
->cpumask
.cbcpu
);
815 #define kobj2pinst(_kobj) \
816 container_of(_kobj, struct padata_instance, kobj)
817 #define attr2pentry(_attr) \
818 container_of(_attr, struct padata_sysfs_entry, attr)
820 static void padata_sysfs_release(struct kobject
*kobj
)
822 struct padata_instance
*pinst
= kobj2pinst(kobj
);
823 __padata_free(pinst
);
826 struct padata_sysfs_entry
{
827 struct attribute attr
;
828 ssize_t (*show
)(struct padata_instance
*, struct attribute
*, char *);
829 ssize_t (*store
)(struct padata_instance
*, struct attribute
*,
830 const char *, size_t);
833 static ssize_t
show_cpumask(struct padata_instance
*pinst
,
834 struct attribute
*attr
, char *buf
)
836 struct cpumask
*cpumask
;
839 mutex_lock(&pinst
->lock
);
840 if (!strcmp(attr
->name
, "serial_cpumask"))
841 cpumask
= pinst
->cpumask
.cbcpu
;
843 cpumask
= pinst
->cpumask
.pcpu
;
845 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n",
846 nr_cpu_ids
, cpumask_bits(cpumask
));
847 mutex_unlock(&pinst
->lock
);
848 return len
< PAGE_SIZE
? len
: -EINVAL
;
851 static ssize_t
store_cpumask(struct padata_instance
*pinst
,
852 struct attribute
*attr
,
853 const char *buf
, size_t count
)
855 cpumask_var_t new_cpumask
;
859 if (!alloc_cpumask_var(&new_cpumask
, GFP_KERNEL
))
862 ret
= bitmap_parse(buf
, count
, cpumask_bits(new_cpumask
),
867 mask_type
= !strcmp(attr
->name
, "serial_cpumask") ?
868 PADATA_CPU_SERIAL
: PADATA_CPU_PARALLEL
;
869 ret
= padata_set_cpumask(pinst
, mask_type
, new_cpumask
);
874 free_cpumask_var(new_cpumask
);
878 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
879 static struct padata_sysfs_entry _name##_attr = \
880 __ATTR(_name, 0644, _show_name, _store_name)
881 #define PADATA_ATTR_RO(_name, _show_name) \
882 static struct padata_sysfs_entry _name##_attr = \
883 __ATTR(_name, 0400, _show_name, NULL)
885 PADATA_ATTR_RW(serial_cpumask
, show_cpumask
, store_cpumask
);
886 PADATA_ATTR_RW(parallel_cpumask
, show_cpumask
, store_cpumask
);
889 * Padata sysfs provides the following objects:
890 * serial_cpumask [RW] - cpumask for serial workers
891 * parallel_cpumask [RW] - cpumask for parallel workers
893 static struct attribute
*padata_default_attrs
[] = {
894 &serial_cpumask_attr
.attr
,
895 ¶llel_cpumask_attr
.attr
,
899 static ssize_t
padata_sysfs_show(struct kobject
*kobj
,
900 struct attribute
*attr
, char *buf
)
902 struct padata_instance
*pinst
;
903 struct padata_sysfs_entry
*pentry
;
906 pinst
= kobj2pinst(kobj
);
907 pentry
= attr2pentry(attr
);
909 ret
= pentry
->show(pinst
, attr
, buf
);
914 static ssize_t
padata_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
915 const char *buf
, size_t count
)
917 struct padata_instance
*pinst
;
918 struct padata_sysfs_entry
*pentry
;
921 pinst
= kobj2pinst(kobj
);
922 pentry
= attr2pentry(attr
);
924 ret
= pentry
->store(pinst
, attr
, buf
, count
);
929 static const struct sysfs_ops padata_sysfs_ops
= {
930 .show
= padata_sysfs_show
,
931 .store
= padata_sysfs_store
,
934 static struct kobj_type padata_attr_type
= {
935 .sysfs_ops
= &padata_sysfs_ops
,
936 .default_attrs
= padata_default_attrs
,
937 .release
= padata_sysfs_release
,
941 * padata_alloc_possible - Allocate and initialize padata instance.
942 * Use the cpu_possible_mask for serial and
945 * @wq: workqueue to use for the allocated padata instance
947 struct padata_instance
*padata_alloc_possible(struct workqueue_struct
*wq
)
949 return padata_alloc(wq
, cpu_possible_mask
, cpu_possible_mask
);
951 EXPORT_SYMBOL(padata_alloc_possible
);
954 * padata_alloc - allocate and initialize a padata instance and specify
955 * cpumasks for serial and parallel workers.
957 * @wq: workqueue to use for the allocated padata instance
958 * @pcpumask: cpumask that will be used for padata parallelization
959 * @cbcpumask: cpumask that will be used for padata serialization
961 struct padata_instance
*padata_alloc(struct workqueue_struct
*wq
,
962 const struct cpumask
*pcpumask
,
963 const struct cpumask
*cbcpumask
)
965 struct padata_instance
*pinst
;
966 struct parallel_data
*pd
= NULL
;
968 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
973 if (!alloc_cpumask_var(&pinst
->cpumask
.pcpu
, GFP_KERNEL
))
975 if (!alloc_cpumask_var(&pinst
->cpumask
.cbcpu
, GFP_KERNEL
)) {
976 free_cpumask_var(pinst
->cpumask
.pcpu
);
979 if (!padata_validate_cpumask(pinst
, pcpumask
) ||
980 !padata_validate_cpumask(pinst
, cbcpumask
))
983 pd
= padata_alloc_pd(pinst
, pcpumask
, cbcpumask
);
987 rcu_assign_pointer(pinst
->pd
, pd
);
991 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
992 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
998 BLOCKING_INIT_NOTIFIER_HEAD(&pinst
->cpumask_change_notifier
);
999 kobject_init(&pinst
->kobj
, &padata_attr_type
);
1000 mutex_init(&pinst
->lock
);
1002 #ifdef CONFIG_HOTPLUG_CPU
1003 cpuhp_state_add_instance_nocalls(hp_online
, &pinst
->node
);
1008 free_cpumask_var(pinst
->cpumask
.pcpu
);
1009 free_cpumask_var(pinst
->cpumask
.cbcpu
);
1018 * padata_free - free a padata instance
1020 * @padata_inst: padata instance to free
1022 void padata_free(struct padata_instance
*pinst
)
1024 kobject_put(&pinst
->kobj
);
1026 EXPORT_SYMBOL(padata_free
);
1028 #ifdef CONFIG_HOTPLUG_CPU
1030 static __init
int padata_driver_init(void)
1034 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "padata:online",
1036 padata_cpu_prep_down
);
1042 module_init(padata_driver_init
);
1044 static __exit
void padata_driver_exit(void)
1046 cpuhp_remove_multi_state(hp_online
);
1048 module_exit(padata_driver_exit
);