]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* flow.c: Generic flow cache. |
2 | * | |
3 | * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru) | |
4 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/jhash.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/notifier.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/cpumask.h> | |
4a3e2f71 | 23 | #include <linux/mutex.h> |
1da177e4 | 24 | #include <net/flow.h> |
60063497 | 25 | #include <linux/atomic.h> |
df71837d | 26 | #include <linux/security.h> |
1da177e4 LT |
27 | |
28 | struct flow_cache_entry { | |
8e479560 TT |
29 | union { |
30 | struct hlist_node hlist; | |
31 | struct list_head gc_list; | |
32 | } u; | |
0542b69e | 33 | struct net *net; |
fe1a5f03 TT |
34 | u16 family; |
35 | u8 dir; | |
36 | u32 genid; | |
37 | struct flowi key; | |
38 | struct flow_cache_object *object; | |
1da177e4 LT |
39 | }; |
40 | ||
d7997fe1 | 41 | struct flow_cache_percpu { |
8e479560 | 42 | struct hlist_head *hash_table; |
d7997fe1 TT |
43 | int hash_count; |
44 | u32 hash_rnd; | |
45 | int hash_rnd_recalc; | |
46 | struct tasklet_struct flush_tasklet; | |
5f58a5c8 | 47 | }; |
1da177e4 LT |
48 | |
49 | struct flow_flush_info { | |
fe1a5f03 | 50 | struct flow_cache *cache; |
d7997fe1 TT |
51 | atomic_t cpuleft; |
52 | struct completion completion; | |
1da177e4 | 53 | }; |
1da177e4 | 54 | |
d7997fe1 TT |
55 | struct flow_cache { |
56 | u32 hash_shift; | |
83b6b1f5 | 57 | struct flow_cache_percpu __percpu *percpu; |
d7997fe1 TT |
58 | struct notifier_block hotcpu_notifier; |
59 | int low_watermark; | |
60 | int high_watermark; | |
61 | struct timer_list rnd_timer; | |
62 | }; | |
63 | ||
64 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | |
9e34a5b5 | 65 | EXPORT_SYMBOL(flow_cache_genid); |
d7997fe1 | 66 | static struct flow_cache flow_cache_global; |
83b6b1f5 | 67 | static struct kmem_cache *flow_cachep __read_mostly; |
d7997fe1 | 68 | |
8e479560 TT |
69 | static DEFINE_SPINLOCK(flow_cache_gc_lock); |
70 | static LIST_HEAD(flow_cache_gc_list); | |
71 | ||
d7997fe1 TT |
72 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) |
73 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
1da177e4 LT |
74 | |
75 | static void flow_cache_new_hashrnd(unsigned long arg) | |
76 | { | |
d7997fe1 | 77 | struct flow_cache *fc = (void *) arg; |
1da177e4 LT |
78 | int i; |
79 | ||
6f912042 | 80 | for_each_possible_cpu(i) |
d7997fe1 | 81 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
1da177e4 | 82 | |
d7997fe1 TT |
83 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
84 | add_timer(&fc->rnd_timer); | |
1da177e4 LT |
85 | } |
86 | ||
fe1a5f03 TT |
87 | static int flow_entry_valid(struct flow_cache_entry *fle) |
88 | { | |
89 | if (atomic_read(&flow_cache_genid) != fle->genid) | |
90 | return 0; | |
91 | if (fle->object && !fle->object->ops->check(fle->object)) | |
92 | return 0; | |
93 | return 1; | |
94 | } | |
95 | ||
8e479560 | 96 | static void flow_entry_kill(struct flow_cache_entry *fle) |
134b0fc5 JM |
97 | { |
98 | if (fle->object) | |
fe1a5f03 | 99 | fle->object->ops->delete(fle->object); |
134b0fc5 | 100 | kmem_cache_free(flow_cachep, fle); |
8e479560 TT |
101 | } |
102 | ||
103 | static void flow_cache_gc_task(struct work_struct *work) | |
104 | { | |
105 | struct list_head gc_list; | |
106 | struct flow_cache_entry *fce, *n; | |
107 | ||
108 | INIT_LIST_HEAD(&gc_list); | |
109 | spin_lock_bh(&flow_cache_gc_lock); | |
110 | list_splice_tail_init(&flow_cache_gc_list, &gc_list); | |
111 | spin_unlock_bh(&flow_cache_gc_lock); | |
112 | ||
113 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) | |
114 | flow_entry_kill(fce); | |
115 | } | |
116 | static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task); | |
117 | ||
118 | static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, | |
119 | int deleted, struct list_head *gc_list) | |
120 | { | |
121 | if (deleted) { | |
122 | fcp->hash_count -= deleted; | |
123 | spin_lock_bh(&flow_cache_gc_lock); | |
124 | list_splice_tail(gc_list, &flow_cache_gc_list); | |
125 | spin_unlock_bh(&flow_cache_gc_lock); | |
126 | schedule_work(&flow_cache_gc_work); | |
127 | } | |
134b0fc5 JM |
128 | } |
129 | ||
d7997fe1 TT |
130 | static void __flow_cache_shrink(struct flow_cache *fc, |
131 | struct flow_cache_percpu *fcp, | |
132 | int shrink_to) | |
1da177e4 | 133 | { |
8e479560 TT |
134 | struct flow_cache_entry *fle; |
135 | struct hlist_node *entry, *tmp; | |
136 | LIST_HEAD(gc_list); | |
137 | int i, deleted = 0; | |
1da177e4 | 138 | |
d7997fe1 | 139 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
fe1a5f03 | 140 | int saved = 0; |
1da177e4 | 141 | |
8e479560 TT |
142 | hlist_for_each_entry_safe(fle, entry, tmp, |
143 | &fcp->hash_table[i], u.hlist) { | |
fe1a5f03 TT |
144 | if (saved < shrink_to && |
145 | flow_entry_valid(fle)) { | |
146 | saved++; | |
fe1a5f03 | 147 | } else { |
8e479560 TT |
148 | deleted++; |
149 | hlist_del(&fle->u.hlist); | |
150 | list_add_tail(&fle->u.gc_list, &gc_list); | |
fe1a5f03 | 151 | } |
1da177e4 LT |
152 | } |
153 | } | |
8e479560 TT |
154 | |
155 | flow_cache_queue_garbage(fcp, deleted, &gc_list); | |
1da177e4 LT |
156 | } |
157 | ||
d7997fe1 TT |
158 | static void flow_cache_shrink(struct flow_cache *fc, |
159 | struct flow_cache_percpu *fcp) | |
1da177e4 | 160 | { |
d7997fe1 | 161 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
1da177e4 | 162 | |
d7997fe1 | 163 | __flow_cache_shrink(fc, fcp, shrink_to); |
1da177e4 LT |
164 | } |
165 | ||
d7997fe1 TT |
166 | static void flow_new_hash_rnd(struct flow_cache *fc, |
167 | struct flow_cache_percpu *fcp) | |
1da177e4 | 168 | { |
d7997fe1 TT |
169 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); |
170 | fcp->hash_rnd_recalc = 0; | |
171 | __flow_cache_shrink(fc, fcp, 0); | |
1da177e4 LT |
172 | } |
173 | ||
d7997fe1 TT |
174 | static u32 flow_hash_code(struct flow_cache *fc, |
175 | struct flow_cache_percpu *fcp, | |
aa1c366e | 176 | const struct flowi *key, |
177 | size_t keysize) | |
1da177e4 | 178 | { |
dee9f4bc | 179 | const u32 *k = (const u32 *) key; |
aa1c366e | 180 | const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); |
1da177e4 | 181 | |
aa1c366e | 182 | return jhash2(k, length, fcp->hash_rnd) |
a02cec21 | 183 | & (flow_cache_hash_size(fc) - 1); |
1da177e4 LT |
184 | } |
185 | ||
1da177e4 | 186 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
aa1c366e | 187 | * important assumptions that we can here, such as alignment. |
1da177e4 | 188 | */ |
aa1c366e | 189 | static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, |
190 | size_t keysize) | |
1da177e4 | 191 | { |
dee9f4bc | 192 | const flow_compare_t *k1, *k1_lim, *k2; |
1da177e4 | 193 | |
dee9f4bc | 194 | k1 = (const flow_compare_t *) key1; |
aa1c366e | 195 | k1_lim = k1 + keysize; |
1da177e4 | 196 | |
dee9f4bc | 197 | k2 = (const flow_compare_t *) key2; |
1da177e4 LT |
198 | |
199 | do { | |
200 | if (*k1++ != *k2++) | |
201 | return 1; | |
202 | } while (k1 < k1_lim); | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
fe1a5f03 | 207 | struct flow_cache_object * |
dee9f4bc | 208 | flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, |
fe1a5f03 | 209 | flow_resolve_t resolver, void *ctx) |
1da177e4 | 210 | { |
d7997fe1 TT |
211 | struct flow_cache *fc = &flow_cache_global; |
212 | struct flow_cache_percpu *fcp; | |
8e479560 TT |
213 | struct flow_cache_entry *fle, *tfle; |
214 | struct hlist_node *entry; | |
fe1a5f03 | 215 | struct flow_cache_object *flo; |
aa1c366e | 216 | size_t keysize; |
1da177e4 | 217 | unsigned int hash; |
1da177e4 LT |
218 | |
219 | local_bh_disable(); | |
7a9b2d59 | 220 | fcp = this_cpu_ptr(fc->percpu); |
1da177e4 LT |
221 | |
222 | fle = NULL; | |
fe1a5f03 | 223 | flo = NULL; |
aa1c366e | 224 | |
225 | keysize = flow_key_size(family); | |
226 | if (!keysize) | |
227 | goto nocache; | |
228 | ||
1da177e4 LT |
229 | /* Packet really early in init? Making flow_cache_init a |
230 | * pre-smp initcall would solve this. --RR */ | |
d7997fe1 | 231 | if (!fcp->hash_table) |
1da177e4 LT |
232 | goto nocache; |
233 | ||
d7997fe1 TT |
234 | if (fcp->hash_rnd_recalc) |
235 | flow_new_hash_rnd(fc, fcp); | |
1da177e4 | 236 | |
aa1c366e | 237 | hash = flow_hash_code(fc, fcp, key, keysize); |
8e479560 | 238 | hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { |
0542b69e | 239 | if (tfle->net == net && |
240 | tfle->family == family && | |
8e479560 | 241 | tfle->dir == dir && |
aa1c366e | 242 | flow_key_compare(key, &tfle->key, keysize) == 0) { |
8e479560 | 243 | fle = tfle; |
1da177e4 | 244 | break; |
8e479560 | 245 | } |
1da177e4 LT |
246 | } |
247 | ||
fe1a5f03 | 248 | if (unlikely(!fle)) { |
d7997fe1 TT |
249 | if (fcp->hash_count > fc->high_watermark) |
250 | flow_cache_shrink(fc, fcp); | |
1da177e4 | 251 | |
54e6ecb2 | 252 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
1da177e4 | 253 | if (fle) { |
0542b69e | 254 | fle->net = net; |
1da177e4 LT |
255 | fle->family = family; |
256 | fle->dir = dir; | |
aa1c366e | 257 | memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); |
1da177e4 | 258 | fle->object = NULL; |
8e479560 | 259 | hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); |
d7997fe1 | 260 | fcp->hash_count++; |
1da177e4 | 261 | } |
fe1a5f03 TT |
262 | } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { |
263 | flo = fle->object; | |
264 | if (!flo) | |
265 | goto ret_object; | |
266 | flo = flo->ops->get(flo); | |
267 | if (flo) | |
268 | goto ret_object; | |
269 | } else if (fle->object) { | |
270 | flo = fle->object; | |
271 | flo->ops->delete(flo); | |
272 | fle->object = NULL; | |
1da177e4 LT |
273 | } |
274 | ||
275 | nocache: | |
fe1a5f03 TT |
276 | flo = NULL; |
277 | if (fle) { | |
278 | flo = fle->object; | |
279 | fle->object = NULL; | |
280 | } | |
281 | flo = resolver(net, key, family, dir, flo, ctx); | |
282 | if (fle) { | |
283 | fle->genid = atomic_read(&flow_cache_genid); | |
284 | if (!IS_ERR(flo)) | |
285 | fle->object = flo; | |
286 | else | |
287 | fle->genid--; | |
288 | } else { | |
8fbcec24 | 289 | if (!IS_ERR_OR_NULL(flo)) |
fe1a5f03 | 290 | flo->ops->delete(flo); |
1da177e4 | 291 | } |
fe1a5f03 TT |
292 | ret_object: |
293 | local_bh_enable(); | |
294 | return flo; | |
1da177e4 | 295 | } |
9e34a5b5 | 296 | EXPORT_SYMBOL(flow_cache_lookup); |
1da177e4 LT |
297 | |
298 | static void flow_cache_flush_tasklet(unsigned long data) | |
299 | { | |
300 | struct flow_flush_info *info = (void *)data; | |
d7997fe1 TT |
301 | struct flow_cache *fc = info->cache; |
302 | struct flow_cache_percpu *fcp; | |
8e479560 TT |
303 | struct flow_cache_entry *fle; |
304 | struct hlist_node *entry, *tmp; | |
305 | LIST_HEAD(gc_list); | |
306 | int i, deleted = 0; | |
1da177e4 | 307 | |
7a9b2d59 | 308 | fcp = this_cpu_ptr(fc->percpu); |
d7997fe1 | 309 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
8e479560 TT |
310 | hlist_for_each_entry_safe(fle, entry, tmp, |
311 | &fcp->hash_table[i], u.hlist) { | |
fe1a5f03 | 312 | if (flow_entry_valid(fle)) |
1da177e4 LT |
313 | continue; |
314 | ||
8e479560 TT |
315 | deleted++; |
316 | hlist_del(&fle->u.hlist); | |
317 | list_add_tail(&fle->u.gc_list, &gc_list); | |
1da177e4 LT |
318 | } |
319 | } | |
320 | ||
8e479560 TT |
321 | flow_cache_queue_garbage(fcp, deleted, &gc_list); |
322 | ||
1da177e4 LT |
323 | if (atomic_dec_and_test(&info->cpuleft)) |
324 | complete(&info->completion); | |
325 | } | |
326 | ||
1da177e4 LT |
327 | static void flow_cache_flush_per_cpu(void *data) |
328 | { | |
329 | struct flow_flush_info *info = data; | |
1da177e4 LT |
330 | struct tasklet_struct *tasklet; |
331 | ||
1f743b07 | 332 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); |
1da177e4 LT |
333 | tasklet->data = (unsigned long)info; |
334 | tasklet_schedule(tasklet); | |
335 | } | |
336 | ||
337 | void flow_cache_flush(void) | |
338 | { | |
339 | struct flow_flush_info info; | |
4a3e2f71 | 340 | static DEFINE_MUTEX(flow_flush_sem); |
1da177e4 LT |
341 | |
342 | /* Don't want cpus going down or up during this. */ | |
86ef5c9a | 343 | get_online_cpus(); |
4a3e2f71 | 344 | mutex_lock(&flow_flush_sem); |
d7997fe1 | 345 | info.cache = &flow_cache_global; |
1da177e4 LT |
346 | atomic_set(&info.cpuleft, num_online_cpus()); |
347 | init_completion(&info.completion); | |
348 | ||
349 | local_bh_disable(); | |
8691e5a8 | 350 | smp_call_function(flow_cache_flush_per_cpu, &info, 0); |
1da177e4 LT |
351 | flow_cache_flush_tasklet((unsigned long)&info); |
352 | local_bh_enable(); | |
353 | ||
354 | wait_for_completion(&info.completion); | |
4a3e2f71 | 355 | mutex_unlock(&flow_flush_sem); |
86ef5c9a | 356 | put_online_cpus(); |
1da177e4 LT |
357 | } |
358 | ||
c0ed1c14 SK |
359 | static void flow_cache_flush_task(struct work_struct *work) |
360 | { | |
361 | flow_cache_flush(); | |
362 | } | |
363 | ||
364 | static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); | |
365 | ||
366 | void flow_cache_flush_deferred(void) | |
367 | { | |
368 | schedule_work(&flow_cache_flush_work); | |
369 | } | |
370 | ||
83b6b1f5 | 371 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) |
1da177e4 | 372 | { |
83b6b1f5 ED |
373 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
374 | size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); | |
d7997fe1 | 375 | |
83b6b1f5 ED |
376 | if (!fcp->hash_table) { |
377 | fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); | |
378 | if (!fcp->hash_table) { | |
379 | pr_err("NET: failed to allocate flow cache sz %zu\n", sz); | |
380 | return -ENOMEM; | |
381 | } | |
382 | fcp->hash_rnd_recalc = 1; | |
383 | fcp->hash_count = 0; | |
384 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); | |
385 | } | |
386 | return 0; | |
1da177e4 LT |
387 | } |
388 | ||
83b6b1f5 | 389 | static int __cpuinit flow_cache_cpu(struct notifier_block *nfb, |
1da177e4 LT |
390 | unsigned long action, |
391 | void *hcpu) | |
392 | { | |
d7997fe1 | 393 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); |
83b6b1f5 | 394 | int res, cpu = (unsigned long) hcpu; |
d7997fe1 TT |
395 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
396 | ||
83b6b1f5 ED |
397 | switch (action) { |
398 | case CPU_UP_PREPARE: | |
399 | case CPU_UP_PREPARE_FROZEN: | |
400 | res = flow_cache_cpu_prepare(fc, cpu); | |
401 | if (res) | |
402 | return notifier_from_errno(res); | |
403 | break; | |
404 | case CPU_DEAD: | |
405 | case CPU_DEAD_FROZEN: | |
d7997fe1 | 406 | __flow_cache_shrink(fc, fcp, 0); |
83b6b1f5 ED |
407 | break; |
408 | } | |
1da177e4 LT |
409 | return NOTIFY_OK; |
410 | } | |
1da177e4 | 411 | |
83b6b1f5 | 412 | static int __init flow_cache_init(struct flow_cache *fc) |
1da177e4 LT |
413 | { |
414 | int i; | |
415 | ||
d7997fe1 TT |
416 | fc->hash_shift = 10; |
417 | fc->low_watermark = 2 * flow_cache_hash_size(fc); | |
418 | fc->high_watermark = 4 * flow_cache_hash_size(fc); | |
419 | ||
d7997fe1 | 420 | fc->percpu = alloc_percpu(struct flow_cache_percpu); |
83b6b1f5 ED |
421 | if (!fc->percpu) |
422 | return -ENOMEM; | |
1da177e4 | 423 | |
83b6b1f5 ED |
424 | for_each_online_cpu(i) { |
425 | if (flow_cache_cpu_prepare(fc, i)) | |
6ccc3abd | 426 | goto err; |
83b6b1f5 | 427 | } |
d7997fe1 TT |
428 | fc->hotcpu_notifier = (struct notifier_block){ |
429 | .notifier_call = flow_cache_cpu, | |
430 | }; | |
431 | register_hotcpu_notifier(&fc->hotcpu_notifier); | |
1da177e4 | 432 | |
83b6b1f5 ED |
433 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
434 | (unsigned long) fc); | |
435 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | |
436 | add_timer(&fc->rnd_timer); | |
437 | ||
1da177e4 | 438 | return 0; |
6ccc3abd | 439 | |
440 | err: | |
441 | for_each_possible_cpu(i) { | |
442 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); | |
443 | kfree(fcp->hash_table); | |
444 | fcp->hash_table = NULL; | |
445 | } | |
446 | ||
447 | free_percpu(fc->percpu); | |
448 | fc->percpu = NULL; | |
449 | ||
450 | return -ENOMEM; | |
1da177e4 LT |
451 | } |
452 | ||
d7997fe1 TT |
453 | static int __init flow_cache_init_global(void) |
454 | { | |
455 | flow_cachep = kmem_cache_create("flow_cache", | |
456 | sizeof(struct flow_cache_entry), | |
457 | 0, SLAB_PANIC, NULL); | |
458 | ||
459 | return flow_cache_init(&flow_cache_global); | |
460 | } | |
461 | ||
462 | module_init(flow_cache_init_global); |