]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Fast batching percpu counters. | |
4 | */ | |
5 | ||
6 | #include <linux/percpu_counter.h> | |
7 | #include <linux/notifier.h> | |
8 | #include <linux/mutex.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/cpu.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/debugobjects.h> | |
13 | ||
14 | #ifdef CONFIG_HOTPLUG_CPU | |
15 | static LIST_HEAD(percpu_counters); | |
16 | static DEFINE_SPINLOCK(percpu_counters_lock); | |
17 | #endif | |
18 | ||
19 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER | |
20 | ||
21 | static struct debug_obj_descr percpu_counter_debug_descr; | |
22 | ||
23 | static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state) | |
24 | { | |
25 | struct percpu_counter *fbc = addr; | |
26 | ||
27 | switch (state) { | |
28 | case ODEBUG_STATE_ACTIVE: | |
29 | percpu_counter_destroy(fbc); | |
30 | debug_object_free(fbc, &percpu_counter_debug_descr); | |
31 | return true; | |
32 | default: | |
33 | return false; | |
34 | } | |
35 | } | |
36 | ||
37 | static struct debug_obj_descr percpu_counter_debug_descr = { | |
38 | .name = "percpu_counter", | |
39 | .fixup_free = percpu_counter_fixup_free, | |
40 | }; | |
41 | ||
42 | static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) | |
43 | { | |
44 | debug_object_init(fbc, &percpu_counter_debug_descr); | |
45 | debug_object_activate(fbc, &percpu_counter_debug_descr); | |
46 | } | |
47 | ||
48 | static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) | |
49 | { | |
50 | debug_object_deactivate(fbc, &percpu_counter_debug_descr); | |
51 | debug_object_free(fbc, &percpu_counter_debug_descr); | |
52 | } | |
53 | ||
54 | #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ | |
55 | static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) | |
56 | { } | |
57 | static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) | |
58 | { } | |
59 | #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ | |
60 | ||
61 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |
62 | { | |
63 | int cpu; | |
64 | unsigned long flags; | |
65 | ||
66 | raw_spin_lock_irqsave(&fbc->lock, flags); | |
67 | for_each_possible_cpu(cpu) { | |
68 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | |
69 | *pcount = 0; | |
70 | } | |
71 | fbc->count = amount; | |
72 | raw_spin_unlock_irqrestore(&fbc->lock, flags); | |
73 | } | |
74 | EXPORT_SYMBOL(percpu_counter_set); | |
75 | ||
76 | /** | |
77 | * This function is both preempt and irq safe. The former is due to explicit | |
78 | * preemption disable. The latter is guaranteed by the fact that the slow path | |
79 | * is explicitly protected by an irq-safe spinlock whereas the fast patch uses | |
80 | * this_cpu_add which is irq-safe by definition. Hence there is no need muck | |
81 | * with irq state before calling this one | |
82 | */ | |
83 | void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) | |
84 | { | |
85 | s64 count; | |
86 | ||
87 | preempt_disable(); | |
88 | count = __this_cpu_read(*fbc->counters) + amount; | |
89 | if (count >= batch || count <= -batch) { | |
90 | unsigned long flags; | |
91 | raw_spin_lock_irqsave(&fbc->lock, flags); | |
92 | fbc->count += count; | |
93 | __this_cpu_sub(*fbc->counters, count - amount); | |
94 | raw_spin_unlock_irqrestore(&fbc->lock, flags); | |
95 | } else { | |
96 | this_cpu_add(*fbc->counters, amount); | |
97 | } | |
98 | preempt_enable(); | |
99 | } | |
100 | EXPORT_SYMBOL(percpu_counter_add_batch); | |
101 | ||
102 | /* | |
103 | * Add up all the per-cpu counts, return the result. This is a more accurate | |
104 | * but much slower version of percpu_counter_read_positive() | |
105 | */ | |
106 | s64 __percpu_counter_sum(struct percpu_counter *fbc) | |
107 | { | |
108 | s64 ret; | |
109 | int cpu; | |
110 | unsigned long flags; | |
111 | ||
112 | raw_spin_lock_irqsave(&fbc->lock, flags); | |
113 | ret = fbc->count; | |
114 | for_each_online_cpu(cpu) { | |
115 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | |
116 | ret += *pcount; | |
117 | } | |
118 | raw_spin_unlock_irqrestore(&fbc->lock, flags); | |
119 | return ret; | |
120 | } | |
121 | EXPORT_SYMBOL(__percpu_counter_sum); | |
122 | ||
123 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, | |
124 | struct lock_class_key *key) | |
125 | { | |
126 | unsigned long flags __maybe_unused; | |
127 | ||
128 | raw_spin_lock_init(&fbc->lock); | |
129 | lockdep_set_class(&fbc->lock, key); | |
130 | fbc->count = amount; | |
131 | fbc->counters = alloc_percpu_gfp(s32, gfp); | |
132 | if (!fbc->counters) | |
133 | return -ENOMEM; | |
134 | ||
135 | debug_percpu_counter_activate(fbc); | |
136 | ||
137 | #ifdef CONFIG_HOTPLUG_CPU | |
138 | INIT_LIST_HEAD(&fbc->list); | |
139 | spin_lock_irqsave(&percpu_counters_lock, flags); | |
140 | list_add(&fbc->list, &percpu_counters); | |
141 | spin_unlock_irqrestore(&percpu_counters_lock, flags); | |
142 | #endif | |
143 | return 0; | |
144 | } | |
145 | EXPORT_SYMBOL(__percpu_counter_init); | |
146 | ||
147 | void percpu_counter_destroy(struct percpu_counter *fbc) | |
148 | { | |
149 | unsigned long flags __maybe_unused; | |
150 | ||
151 | if (!fbc->counters) | |
152 | return; | |
153 | ||
154 | debug_percpu_counter_deactivate(fbc); | |
155 | ||
156 | #ifdef CONFIG_HOTPLUG_CPU | |
157 | spin_lock_irqsave(&percpu_counters_lock, flags); | |
158 | list_del(&fbc->list); | |
159 | spin_unlock_irqrestore(&percpu_counters_lock, flags); | |
160 | #endif | |
161 | free_percpu(fbc->counters); | |
162 | fbc->counters = NULL; | |
163 | } | |
164 | EXPORT_SYMBOL(percpu_counter_destroy); | |
165 | ||
166 | int percpu_counter_batch __read_mostly = 32; | |
167 | EXPORT_SYMBOL(percpu_counter_batch); | |
168 | ||
169 | static int compute_batch_value(unsigned int cpu) | |
170 | { | |
171 | int nr = num_online_cpus(); | |
172 | ||
173 | percpu_counter_batch = max(32, nr*2); | |
174 | return 0; | |
175 | } | |
176 | ||
177 | static int percpu_counter_cpu_dead(unsigned int cpu) | |
178 | { | |
179 | #ifdef CONFIG_HOTPLUG_CPU | |
180 | struct percpu_counter *fbc; | |
181 | ||
182 | compute_batch_value(cpu); | |
183 | ||
184 | spin_lock_irq(&percpu_counters_lock); | |
185 | list_for_each_entry(fbc, &percpu_counters, list) { | |
186 | s32 *pcount; | |
187 | ||
188 | raw_spin_lock(&fbc->lock); | |
189 | pcount = per_cpu_ptr(fbc->counters, cpu); | |
190 | fbc->count += *pcount; | |
191 | *pcount = 0; | |
192 | raw_spin_unlock(&fbc->lock); | |
193 | } | |
194 | spin_unlock_irq(&percpu_counters_lock); | |
195 | #endif | |
196 | return 0; | |
197 | } | |
198 | ||
199 | /* | |
200 | * Compare counter against given value. | |
201 | * Return 1 if greater, 0 if equal and -1 if less | |
202 | */ | |
203 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) | |
204 | { | |
205 | s64 count; | |
206 | ||
207 | count = percpu_counter_read(fbc); | |
208 | /* Check to see if rough count will be sufficient for comparison */ | |
209 | if (abs(count - rhs) > (batch * num_online_cpus())) { | |
210 | if (count > rhs) | |
211 | return 1; | |
212 | else | |
213 | return -1; | |
214 | } | |
215 | /* Need to use precise count */ | |
216 | count = percpu_counter_sum(fbc); | |
217 | if (count > rhs) | |
218 | return 1; | |
219 | else if (count < rhs) | |
220 | return -1; | |
221 | else | |
222 | return 0; | |
223 | } | |
224 | EXPORT_SYMBOL(__percpu_counter_compare); | |
225 | ||
226 | static int __init percpu_counter_startup(void) | |
227 | { | |
228 | int ret; | |
229 | ||
230 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online", | |
231 | compute_batch_value, NULL); | |
232 | WARN_ON(ret < 0); | |
233 | ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD, | |
234 | "lib/percpu_cnt:dead", NULL, | |
235 | percpu_counter_cpu_dead); | |
236 | WARN_ON(ret < 0); | |
237 | return 0; | |
238 | } | |
239 | module_init(percpu_counter_startup); |