]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* flow.c: Generic flow cache. |
2 | * | |
3 | * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru) | |
4 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/jhash.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/notifier.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/cpumask.h> | |
4a3e2f71 | 23 | #include <linux/mutex.h> |
1da177e4 LT |
24 | #include <net/flow.h> |
25 | #include <asm/atomic.h> | |
df71837d | 26 | #include <linux/security.h> |
1da177e4 LT |
27 | |
28 | struct flow_cache_entry { | |
29 | struct flow_cache_entry *next; | |
30 | u16 family; | |
31 | u8 dir; | |
1da177e4 | 32 | u32 genid; |
dd5a1843 | 33 | struct flowi key; |
1da177e4 LT |
34 | void *object; |
35 | atomic_t *object_ref; | |
36 | }; | |
37 | ||
38 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | |
39 | ||
40 | static u32 flow_hash_shift; | |
41 | #define flow_hash_size (1 << flow_hash_shift) | |
42 | static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; | |
43 | ||
44 | #define flow_table(cpu) (per_cpu(flow_tables, cpu)) | |
45 | ||
e18b890b | 46 | static struct kmem_cache *flow_cachep __read_mostly; |
1da177e4 LT |
47 | |
48 | static int flow_lwm, flow_hwm; | |
49 | ||
50 | struct flow_percpu_info { | |
51 | int hash_rnd_recalc; | |
52 | u32 hash_rnd; | |
53 | int count; | |
5f58a5c8 | 54 | }; |
1da177e4 LT |
55 | static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; |
56 | ||
57 | #define flow_hash_rnd_recalc(cpu) \ | |
58 | (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) | |
59 | #define flow_hash_rnd(cpu) \ | |
60 | (per_cpu(flow_hash_info, cpu).hash_rnd) | |
61 | #define flow_count(cpu) \ | |
62 | (per_cpu(flow_hash_info, cpu).count) | |
63 | ||
64 | static struct timer_list flow_hash_rnd_timer; | |
65 | ||
66 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
67 | ||
68 | struct flow_flush_info { | |
69 | atomic_t cpuleft; | |
70 | struct completion completion; | |
71 | }; | |
72 | static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; | |
73 | ||
74 | #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) | |
75 | ||
76 | static void flow_cache_new_hashrnd(unsigned long arg) | |
77 | { | |
78 | int i; | |
79 | ||
6f912042 | 80 | for_each_possible_cpu(i) |
1da177e4 LT |
81 | flow_hash_rnd_recalc(i) = 1; |
82 | ||
83 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | |
84 | add_timer(&flow_hash_rnd_timer); | |
85 | } | |
86 | ||
134b0fc5 JM |
87 | static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) |
88 | { | |
89 | if (fle->object) | |
90 | atomic_dec(fle->object_ref); | |
91 | kmem_cache_free(flow_cachep, fle); | |
92 | flow_count(cpu)--; | |
93 | } | |
94 | ||
1da177e4 LT |
95 | static void __flow_cache_shrink(int cpu, int shrink_to) |
96 | { | |
97 | struct flow_cache_entry *fle, **flp; | |
98 | int i; | |
99 | ||
100 | for (i = 0; i < flow_hash_size; i++) { | |
101 | int k = 0; | |
102 | ||
103 | flp = &flow_table(cpu)[i]; | |
104 | while ((fle = *flp) != NULL && k < shrink_to) { | |
105 | k++; | |
106 | flp = &fle->next; | |
107 | } | |
108 | while ((fle = *flp) != NULL) { | |
109 | *flp = fle->next; | |
134b0fc5 | 110 | flow_entry_kill(cpu, fle); |
1da177e4 LT |
111 | } |
112 | } | |
113 | } | |
114 | ||
115 | static void flow_cache_shrink(int cpu) | |
116 | { | |
117 | int shrink_to = flow_lwm / flow_hash_size; | |
118 | ||
119 | __flow_cache_shrink(cpu, shrink_to); | |
120 | } | |
121 | ||
122 | static void flow_new_hash_rnd(int cpu) | |
123 | { | |
124 | get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); | |
125 | flow_hash_rnd_recalc(cpu) = 0; | |
126 | ||
127 | __flow_cache_shrink(cpu, 0); | |
128 | } | |
129 | ||
130 | static u32 flow_hash_code(struct flowi *key, int cpu) | |
131 | { | |
132 | u32 *k = (u32 *) key; | |
133 | ||
134 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & | |
135 | (flow_hash_size - 1)); | |
136 | } | |
137 | ||
138 | #if (BITS_PER_LONG == 64) | |
139 | typedef u64 flow_compare_t; | |
140 | #else | |
141 | typedef u32 flow_compare_t; | |
142 | #endif | |
143 | ||
1da177e4 LT |
144 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
145 | * important assumptions that we can here, such as alignment and | |
146 | * constant size. | |
147 | */ | |
148 | static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |
149 | { | |
150 | flow_compare_t *k1, *k1_lim, *k2; | |
151 | const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); | |
152 | ||
f0fe91de | 153 | BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); |
1da177e4 LT |
154 | |
155 | k1 = (flow_compare_t *) key1; | |
156 | k1_lim = k1 + n_elem; | |
157 | ||
158 | k2 = (flow_compare_t *) key2; | |
159 | ||
160 | do { | |
161 | if (*k1++ != *k2++) | |
162 | return 1; | |
163 | } while (k1 < k1_lim); | |
164 | ||
165 | return 0; | |
166 | } | |
167 | ||
e0d1caa7 | 168 | void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, |
1da177e4 LT |
169 | flow_resolve_t resolver) |
170 | { | |
171 | struct flow_cache_entry *fle, **head; | |
172 | unsigned int hash; | |
173 | int cpu; | |
174 | ||
175 | local_bh_disable(); | |
176 | cpu = smp_processor_id(); | |
177 | ||
178 | fle = NULL; | |
179 | /* Packet really early in init? Making flow_cache_init a | |
180 | * pre-smp initcall would solve this. --RR */ | |
181 | if (!flow_table(cpu)) | |
182 | goto nocache; | |
183 | ||
184 | if (flow_hash_rnd_recalc(cpu)) | |
185 | flow_new_hash_rnd(cpu); | |
186 | hash = flow_hash_code(key, cpu); | |
187 | ||
188 | head = &flow_table(cpu)[hash]; | |
189 | for (fle = *head; fle; fle = fle->next) { | |
190 | if (fle->family == family && | |
191 | fle->dir == dir && | |
192 | flow_key_compare(key, &fle->key) == 0) { | |
193 | if (fle->genid == atomic_read(&flow_cache_genid)) { | |
194 | void *ret = fle->object; | |
195 | ||
196 | if (ret) | |
197 | atomic_inc(fle->object_ref); | |
198 | local_bh_enable(); | |
199 | ||
200 | return ret; | |
201 | } | |
202 | break; | |
203 | } | |
204 | } | |
205 | ||
206 | if (!fle) { | |
207 | if (flow_count(cpu) > flow_hwm) | |
208 | flow_cache_shrink(cpu); | |
209 | ||
54e6ecb2 | 210 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
1da177e4 LT |
211 | if (fle) { |
212 | fle->next = *head; | |
213 | *head = fle; | |
214 | fle->family = family; | |
215 | fle->dir = dir; | |
216 | memcpy(&fle->key, key, sizeof(*key)); | |
217 | fle->object = NULL; | |
218 | flow_count(cpu)++; | |
219 | } | |
220 | } | |
221 | ||
222 | nocache: | |
223 | { | |
134b0fc5 | 224 | int err; |
1da177e4 LT |
225 | void *obj; |
226 | atomic_t *obj_ref; | |
227 | ||
134b0fc5 | 228 | err = resolver(key, family, dir, &obj, &obj_ref); |
1da177e4 | 229 | |
e0e8f1c8 HX |
230 | if (fle && !err) { |
231 | fle->genid = atomic_read(&flow_cache_genid); | |
232 | ||
233 | if (fle->object) | |
234 | atomic_dec(fle->object_ref); | |
235 | ||
236 | fle->object = obj; | |
237 | fle->object_ref = obj_ref; | |
238 | if (obj) | |
239 | atomic_inc(fle->object_ref); | |
1da177e4 LT |
240 | } |
241 | local_bh_enable(); | |
242 | ||
134b0fc5 JM |
243 | if (err) |
244 | obj = ERR_PTR(err); | |
1da177e4 LT |
245 | return obj; |
246 | } | |
247 | } | |
248 | ||
249 | static void flow_cache_flush_tasklet(unsigned long data) | |
250 | { | |
251 | struct flow_flush_info *info = (void *)data; | |
252 | int i; | |
253 | int cpu; | |
254 | ||
255 | cpu = smp_processor_id(); | |
256 | for (i = 0; i < flow_hash_size; i++) { | |
257 | struct flow_cache_entry *fle; | |
258 | ||
259 | fle = flow_table(cpu)[i]; | |
260 | for (; fle; fle = fle->next) { | |
261 | unsigned genid = atomic_read(&flow_cache_genid); | |
262 | ||
263 | if (!fle->object || fle->genid == genid) | |
264 | continue; | |
265 | ||
266 | fle->object = NULL; | |
267 | atomic_dec(fle->object_ref); | |
268 | } | |
269 | } | |
270 | ||
271 | if (atomic_dec_and_test(&info->cpuleft)) | |
272 | complete(&info->completion); | |
273 | } | |
274 | ||
275 | static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__)); | |
276 | static void flow_cache_flush_per_cpu(void *data) | |
277 | { | |
278 | struct flow_flush_info *info = data; | |
279 | int cpu; | |
280 | struct tasklet_struct *tasklet; | |
281 | ||
282 | cpu = smp_processor_id(); | |
283 | ||
284 | tasklet = flow_flush_tasklet(cpu); | |
285 | tasklet->data = (unsigned long)info; | |
286 | tasklet_schedule(tasklet); | |
287 | } | |
288 | ||
289 | void flow_cache_flush(void) | |
290 | { | |
291 | struct flow_flush_info info; | |
4a3e2f71 | 292 | static DEFINE_MUTEX(flow_flush_sem); |
1da177e4 LT |
293 | |
294 | /* Don't want cpus going down or up during this. */ | |
86ef5c9a | 295 | get_online_cpus(); |
4a3e2f71 | 296 | mutex_lock(&flow_flush_sem); |
1da177e4 LT |
297 | atomic_set(&info.cpuleft, num_online_cpus()); |
298 | init_completion(&info.completion); | |
299 | ||
300 | local_bh_disable(); | |
8691e5a8 | 301 | smp_call_function(flow_cache_flush_per_cpu, &info, 0); |
1da177e4 LT |
302 | flow_cache_flush_tasklet((unsigned long)&info); |
303 | local_bh_enable(); | |
304 | ||
305 | wait_for_completion(&info.completion); | |
4a3e2f71 | 306 | mutex_unlock(&flow_flush_sem); |
86ef5c9a | 307 | put_online_cpus(); |
1da177e4 LT |
308 | } |
309 | ||
310 | static void __devinit flow_cache_cpu_prepare(int cpu) | |
311 | { | |
312 | struct tasklet_struct *tasklet; | |
313 | unsigned long order; | |
314 | ||
315 | for (order = 0; | |
316 | (PAGE_SIZE << order) < | |
317 | (sizeof(struct flow_cache_entry *)*flow_hash_size); | |
318 | order++) | |
319 | /* NOTHING */; | |
320 | ||
321 | flow_table(cpu) = (struct flow_cache_entry **) | |
77d04bd9 | 322 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); |
1da177e4 LT |
323 | if (!flow_table(cpu)) |
324 | panic("NET: failed to allocate flow cache order %lu\n", order); | |
325 | ||
1da177e4 LT |
326 | flow_hash_rnd_recalc(cpu) = 1; |
327 | flow_count(cpu) = 0; | |
328 | ||
329 | tasklet = flow_flush_tasklet(cpu); | |
330 | tasklet_init(tasklet, flow_cache_flush_tasklet, 0); | |
331 | } | |
332 | ||
1da177e4 LT |
333 | static int flow_cache_cpu(struct notifier_block *nfb, |
334 | unsigned long action, | |
335 | void *hcpu) | |
336 | { | |
8bb78442 | 337 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
1da177e4 LT |
338 | __flow_cache_shrink((unsigned long)hcpu, 0); |
339 | return NOTIFY_OK; | |
340 | } | |
1da177e4 LT |
341 | |
342 | static int __init flow_cache_init(void) | |
343 | { | |
344 | int i; | |
345 | ||
346 | flow_cachep = kmem_cache_create("flow_cache", | |
347 | sizeof(struct flow_cache_entry), | |
dd5a1843 | 348 | 0, SLAB_PANIC, |
20c2df83 | 349 | NULL); |
1da177e4 LT |
350 | flow_hash_shift = 10; |
351 | flow_lwm = 2 * flow_hash_size; | |
352 | flow_hwm = 4 * flow_hash_size; | |
353 | ||
b24b8a24 | 354 | setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); |
1da177e4 LT |
355 | flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
356 | add_timer(&flow_hash_rnd_timer); | |
357 | ||
6f912042 | 358 | for_each_possible_cpu(i) |
1da177e4 LT |
359 | flow_cache_cpu_prepare(i); |
360 | ||
361 | hotcpu_notifier(flow_cache_cpu, 0); | |
362 | return 0; | |
363 | } | |
364 | ||
365 | module_init(flow_cache_init); | |
366 | ||
367 | EXPORT_SYMBOL(flow_cache_genid); | |
368 | EXPORT_SYMBOL(flow_cache_lookup); |