]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/core/flow.c
[IPV4]: route: fix crash ip_route_input
[mirror_ubuntu-zesty-kernel.git] / net / core / flow.c
CommitLineData
1da177e4
LT
1/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
4a3e2f71 23#include <linux/mutex.h>
1da177e4
LT
24#include <net/flow.h>
25#include <asm/atomic.h>
26#include <asm/semaphore.h>
df71837d 27#include <linux/security.h>
1da177e4
LT
28
29struct flow_cache_entry {
30 struct flow_cache_entry *next;
31 u16 family;
32 u8 dir;
33 struct flowi key;
34 u32 genid;
35 void *object;
36 atomic_t *object_ref;
37};
38
39atomic_t flow_cache_genid = ATOMIC_INIT(0);
40
41static u32 flow_hash_shift;
42#define flow_hash_size (1 << flow_hash_shift)
43static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
44
45#define flow_table(cpu) (per_cpu(flow_tables, cpu))
46
e18b890b 47static struct kmem_cache *flow_cachep __read_mostly;
1da177e4
LT
48
49static int flow_lwm, flow_hwm;
50
51struct flow_percpu_info {
52 int hash_rnd_recalc;
53 u32 hash_rnd;
54 int count;
55} ____cacheline_aligned;
56static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
57
58#define flow_hash_rnd_recalc(cpu) \
59 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
60#define flow_hash_rnd(cpu) \
61 (per_cpu(flow_hash_info, cpu).hash_rnd)
62#define flow_count(cpu) \
63 (per_cpu(flow_hash_info, cpu).count)
64
65static struct timer_list flow_hash_rnd_timer;
66
67#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
68
69struct flow_flush_info {
70 atomic_t cpuleft;
71 struct completion completion;
72};
73static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
74
75#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
76
77static void flow_cache_new_hashrnd(unsigned long arg)
78{
79 int i;
80
6f912042 81 for_each_possible_cpu(i)
1da177e4
LT
82 flow_hash_rnd_recalc(i) = 1;
83
84 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
85 add_timer(&flow_hash_rnd_timer);
86}
87
134b0fc5
JM
88static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
89{
90 if (fle->object)
91 atomic_dec(fle->object_ref);
92 kmem_cache_free(flow_cachep, fle);
93 flow_count(cpu)--;
94}
95
1da177e4
LT
96static void __flow_cache_shrink(int cpu, int shrink_to)
97{
98 struct flow_cache_entry *fle, **flp;
99 int i;
100
101 for (i = 0; i < flow_hash_size; i++) {
102 int k = 0;
103
104 flp = &flow_table(cpu)[i];
105 while ((fle = *flp) != NULL && k < shrink_to) {
106 k++;
107 flp = &fle->next;
108 }
109 while ((fle = *flp) != NULL) {
110 *flp = fle->next;
134b0fc5 111 flow_entry_kill(cpu, fle);
1da177e4
LT
112 }
113 }
114}
115
116static void flow_cache_shrink(int cpu)
117{
118 int shrink_to = flow_lwm / flow_hash_size;
119
120 __flow_cache_shrink(cpu, shrink_to);
121}
122
123static void flow_new_hash_rnd(int cpu)
124{
125 get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
126 flow_hash_rnd_recalc(cpu) = 0;
127
128 __flow_cache_shrink(cpu, 0);
129}
130
131static u32 flow_hash_code(struct flowi *key, int cpu)
132{
133 u32 *k = (u32 *) key;
134
135 return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
136 (flow_hash_size - 1));
137}
138
139#if (BITS_PER_LONG == 64)
140typedef u64 flow_compare_t;
141#else
142typedef u32 flow_compare_t;
143#endif
144
1da177e4
LT
145/* I hear what you're saying, use memcmp. But memcmp cannot make
146 * important assumptions that we can here, such as alignment and
147 * constant size.
148 */
149static int flow_key_compare(struct flowi *key1, struct flowi *key2)
150{
151 flow_compare_t *k1, *k1_lim, *k2;
152 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
153
f0fe91de 154 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
1da177e4
LT
155
156 k1 = (flow_compare_t *) key1;
157 k1_lim = k1 + n_elem;
158
159 k2 = (flow_compare_t *) key2;
160
161 do {
162 if (*k1++ != *k2++)
163 return 1;
164 } while (k1 < k1_lim);
165
166 return 0;
167}
168
e0d1caa7 169void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
1da177e4
LT
170 flow_resolve_t resolver)
171{
172 struct flow_cache_entry *fle, **head;
173 unsigned int hash;
174 int cpu;
175
176 local_bh_disable();
177 cpu = smp_processor_id();
178
179 fle = NULL;
180 /* Packet really early in init? Making flow_cache_init a
181 * pre-smp initcall would solve this. --RR */
182 if (!flow_table(cpu))
183 goto nocache;
184
185 if (flow_hash_rnd_recalc(cpu))
186 flow_new_hash_rnd(cpu);
187 hash = flow_hash_code(key, cpu);
188
189 head = &flow_table(cpu)[hash];
190 for (fle = *head; fle; fle = fle->next) {
191 if (fle->family == family &&
192 fle->dir == dir &&
193 flow_key_compare(key, &fle->key) == 0) {
194 if (fle->genid == atomic_read(&flow_cache_genid)) {
195 void *ret = fle->object;
196
197 if (ret)
198 atomic_inc(fle->object_ref);
199 local_bh_enable();
200
201 return ret;
202 }
203 break;
204 }
205 }
206
207 if (!fle) {
208 if (flow_count(cpu) > flow_hwm)
209 flow_cache_shrink(cpu);
210
54e6ecb2 211 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
1da177e4
LT
212 if (fle) {
213 fle->next = *head;
214 *head = fle;
215 fle->family = family;
216 fle->dir = dir;
217 memcpy(&fle->key, key, sizeof(*key));
218 fle->object = NULL;
219 flow_count(cpu)++;
220 }
221 }
222
223nocache:
224 {
134b0fc5 225 int err;
1da177e4
LT
226 void *obj;
227 atomic_t *obj_ref;
228
134b0fc5 229 err = resolver(key, family, dir, &obj, &obj_ref);
1da177e4 230
e0e8f1c8
HX
231 if (fle && !err) {
232 fle->genid = atomic_read(&flow_cache_genid);
233
234 if (fle->object)
235 atomic_dec(fle->object_ref);
236
237 fle->object = obj;
238 fle->object_ref = obj_ref;
239 if (obj)
240 atomic_inc(fle->object_ref);
1da177e4
LT
241 }
242 local_bh_enable();
243
134b0fc5
JM
244 if (err)
245 obj = ERR_PTR(err);
1da177e4
LT
246 return obj;
247 }
248}
249
250static void flow_cache_flush_tasklet(unsigned long data)
251{
252 struct flow_flush_info *info = (void *)data;
253 int i;
254 int cpu;
255
256 cpu = smp_processor_id();
257 for (i = 0; i < flow_hash_size; i++) {
258 struct flow_cache_entry *fle;
259
260 fle = flow_table(cpu)[i];
261 for (; fle; fle = fle->next) {
262 unsigned genid = atomic_read(&flow_cache_genid);
263
264 if (!fle->object || fle->genid == genid)
265 continue;
266
267 fle->object = NULL;
268 atomic_dec(fle->object_ref);
269 }
270 }
271
272 if (atomic_dec_and_test(&info->cpuleft))
273 complete(&info->completion);
274}
275
276static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
277static void flow_cache_flush_per_cpu(void *data)
278{
279 struct flow_flush_info *info = data;
280 int cpu;
281 struct tasklet_struct *tasklet;
282
283 cpu = smp_processor_id();
284
285 tasklet = flow_flush_tasklet(cpu);
286 tasklet->data = (unsigned long)info;
287 tasklet_schedule(tasklet);
288}
289
290void flow_cache_flush(void)
291{
292 struct flow_flush_info info;
4a3e2f71 293 static DEFINE_MUTEX(flow_flush_sem);
1da177e4
LT
294
295 /* Don't want cpus going down or up during this. */
86ef5c9a 296 get_online_cpus();
4a3e2f71 297 mutex_lock(&flow_flush_sem);
1da177e4
LT
298 atomic_set(&info.cpuleft, num_online_cpus());
299 init_completion(&info.completion);
300
301 local_bh_disable();
302 smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
303 flow_cache_flush_tasklet((unsigned long)&info);
304 local_bh_enable();
305
306 wait_for_completion(&info.completion);
4a3e2f71 307 mutex_unlock(&flow_flush_sem);
86ef5c9a 308 put_online_cpus();
1da177e4
LT
309}
310
311static void __devinit flow_cache_cpu_prepare(int cpu)
312{
313 struct tasklet_struct *tasklet;
314 unsigned long order;
315
316 for (order = 0;
317 (PAGE_SIZE << order) <
318 (sizeof(struct flow_cache_entry *)*flow_hash_size);
319 order++)
320 /* NOTHING */;
321
322 flow_table(cpu) = (struct flow_cache_entry **)
77d04bd9 323 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
1da177e4
LT
324 if (!flow_table(cpu))
325 panic("NET: failed to allocate flow cache order %lu\n", order);
326
1da177e4
LT
327 flow_hash_rnd_recalc(cpu) = 1;
328 flow_count(cpu) = 0;
329
330 tasklet = flow_flush_tasklet(cpu);
331 tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
332}
333
1da177e4
LT
334static int flow_cache_cpu(struct notifier_block *nfb,
335 unsigned long action,
336 void *hcpu)
337{
8bb78442 338 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1da177e4
LT
339 __flow_cache_shrink((unsigned long)hcpu, 0);
340 return NOTIFY_OK;
341}
1da177e4
LT
342
343static int __init flow_cache_init(void)
344{
345 int i;
346
347 flow_cachep = kmem_cache_create("flow_cache",
348 sizeof(struct flow_cache_entry),
e5d679f3 349 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
20c2df83 350 NULL);
1da177e4
LT
351 flow_hash_shift = 10;
352 flow_lwm = 2 * flow_hash_size;
353 flow_hwm = 4 * flow_hash_size;
354
b24b8a24 355 setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
1da177e4
LT
356 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
357 add_timer(&flow_hash_rnd_timer);
358
6f912042 359 for_each_possible_cpu(i)
1da177e4
LT
360 flow_cache_cpu_prepare(i);
361
362 hotcpu_notifier(flow_cache_cpu, 0);
363 return 0;
364}
365
366module_init(flow_cache_init);
367
368EXPORT_SYMBOL(flow_cache_genid);
369EXPORT_SYMBOL(flow_cache_lookup);