]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007-2014 Nicira, Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
16 | * 02110-1301, USA | |
17 | */ | |
18 | ||
19 | #include "flow.h" | |
20 | #include "datapath.h" | |
21 | #include "flow_netlink.h" | |
22 | #include <linux/uaccess.h> | |
23 | #include <linux/netdevice.h> | |
24 | #include <linux/etherdevice.h> | |
25 | #include <linux/if_ether.h> | |
26 | #include <linux/if_vlan.h> | |
27 | #include <net/llc_pdu.h> | |
28 | #include <linux/kernel.h> | |
29 | #include <linux/jhash.h> | |
30 | #include <linux/jiffies.h> | |
31 | #include <linux/llc.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/in.h> | |
34 | #include <linux/rcupdate.h> | |
35 | #include <linux/cpumask.h> | |
36 | #include <linux/if_arp.h> | |
37 | #include <linux/ip.h> | |
38 | #include <linux/ipv6.h> | |
39 | #include <linux/sctp.h> | |
40 | #include <linux/tcp.h> | |
41 | #include <linux/udp.h> | |
42 | #include <linux/icmp.h> | |
43 | #include <linux/icmpv6.h> | |
44 | #include <linux/rculist.h> | |
45 | #include <net/ip.h> | |
46 | #include <net/ipv6.h> | |
47 | #include <net/ndisc.h> | |
48 | ||
49 | #define TBL_MIN_BUCKETS 1024 | |
50 | #define REHASH_INTERVAL (10 * 60 * HZ) | |
51 | ||
52 | static struct kmem_cache *flow_cache; | |
53 | struct kmem_cache *flow_stats_cache __read_mostly; | |
54 | ||
55 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | |
56 | { | |
57 | return range->end - range->start; | |
58 | } | |
59 | ||
60 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | |
61 | bool full, const struct sw_flow_mask *mask) | |
62 | { | |
63 | int start = full ? 0 : mask->range.start; | |
64 | int len = full ? sizeof *dst : range_n_bytes(&mask->range); | |
65 | const long *m = (const long *)((const u8 *)&mask->key + start); | |
66 | const long *s = (const long *)((const u8 *)src + start); | |
67 | long *d = (long *)((u8 *)dst + start); | |
68 | int i; | |
69 | ||
70 | /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, | |
71 | * if 'full' is false the memory outside of the 'mask->range' is left | |
72 | * uninitialized. This can be used as an optimization when further | |
73 | * operations on 'dst' only use contents within 'mask->range'. | |
74 | */ | |
75 | for (i = 0; i < len; i += sizeof(long)) | |
76 | *d++ = *s++ & *m++; | |
77 | } | |
78 | ||
79 | struct sw_flow *ovs_flow_alloc(void) | |
80 | { | |
81 | struct sw_flow *flow; | |
82 | struct flow_stats *stats; | |
83 | ||
84 | flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); | |
85 | if (!flow) | |
86 | return ERR_PTR(-ENOMEM); | |
87 | ||
88 | flow->stats_last_writer = -1; | |
89 | ||
90 | /* Initialize the default stat node. */ | |
91 | stats = kmem_cache_alloc_node(flow_stats_cache, | |
92 | GFP_KERNEL | __GFP_ZERO, | |
93 | node_online(0) ? 0 : NUMA_NO_NODE); | |
94 | if (!stats) | |
95 | goto err; | |
96 | ||
97 | spin_lock_init(&stats->lock); | |
98 | ||
99 | RCU_INIT_POINTER(flow->stats[0], stats); | |
100 | ||
101 | return flow; | |
102 | err: | |
103 | kmem_cache_free(flow_cache, flow); | |
104 | return ERR_PTR(-ENOMEM); | |
105 | } | |
106 | ||
107 | int ovs_flow_tbl_count(const struct flow_table *table) | |
108 | { | |
109 | return table->count; | |
110 | } | |
111 | ||
112 | static struct flex_array *alloc_buckets(unsigned int n_buckets) | |
113 | { | |
114 | struct flex_array *buckets; | |
115 | int i, err; | |
116 | ||
117 | buckets = flex_array_alloc(sizeof(struct hlist_head), | |
118 | n_buckets, GFP_KERNEL); | |
119 | if (!buckets) | |
120 | return NULL; | |
121 | ||
122 | err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); | |
123 | if (err) { | |
124 | flex_array_free(buckets); | |
125 | return NULL; | |
126 | } | |
127 | ||
128 | for (i = 0; i < n_buckets; i++) | |
129 | INIT_HLIST_HEAD((struct hlist_head *) | |
130 | flex_array_get(buckets, i)); | |
131 | ||
132 | return buckets; | |
133 | } | |
134 | ||
135 | static void flow_free(struct sw_flow *flow) | |
136 | { | |
137 | int cpu; | |
138 | ||
139 | if (ovs_identifier_is_key(&flow->id)) | |
140 | kfree(flow->id.unmasked_key); | |
141 | if (flow->sf_acts) | |
142 | ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts); | |
143 | /* We open code this to make sure cpu 0 is always considered */ | |
144 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) | |
145 | if (flow->stats[cpu]) | |
146 | kmem_cache_free(flow_stats_cache, | |
147 | (struct flow_stats __force *)flow->stats[cpu]); | |
148 | kmem_cache_free(flow_cache, flow); | |
149 | } | |
150 | ||
151 | static void rcu_free_flow_callback(struct rcu_head *rcu) | |
152 | { | |
153 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | |
154 | ||
155 | flow_free(flow); | |
156 | } | |
157 | ||
158 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | |
159 | { | |
160 | if (!flow) | |
161 | return; | |
162 | ||
163 | if (deferred) | |
164 | call_rcu(&flow->rcu, rcu_free_flow_callback); | |
165 | else | |
166 | flow_free(flow); | |
167 | } | |
168 | ||
169 | static void free_buckets(struct flex_array *buckets) | |
170 | { | |
171 | flex_array_free(buckets); | |
172 | } | |
173 | ||
174 | ||
175 | static void __table_instance_destroy(struct table_instance *ti) | |
176 | { | |
177 | free_buckets(ti->buckets); | |
178 | kfree(ti); | |
179 | } | |
180 | ||
181 | static struct table_instance *table_instance_alloc(int new_size) | |
182 | { | |
183 | struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); | |
184 | ||
185 | if (!ti) | |
186 | return NULL; | |
187 | ||
188 | ti->buckets = alloc_buckets(new_size); | |
189 | ||
190 | if (!ti->buckets) { | |
191 | kfree(ti); | |
192 | return NULL; | |
193 | } | |
194 | ti->n_buckets = new_size; | |
195 | ti->node_ver = 0; | |
196 | ti->keep_flows = false; | |
197 | get_random_bytes(&ti->hash_seed, sizeof(u32)); | |
198 | ||
199 | return ti; | |
200 | } | |
201 | ||
202 | int ovs_flow_tbl_init(struct flow_table *table) | |
203 | { | |
204 | struct table_instance *ti, *ufid_ti; | |
205 | ||
206 | ti = table_instance_alloc(TBL_MIN_BUCKETS); | |
207 | ||
208 | if (!ti) | |
209 | return -ENOMEM; | |
210 | ||
211 | ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); | |
212 | if (!ufid_ti) | |
213 | goto free_ti; | |
214 | ||
215 | rcu_assign_pointer(table->ti, ti); | |
216 | rcu_assign_pointer(table->ufid_ti, ufid_ti); | |
217 | INIT_LIST_HEAD(&table->mask_list); | |
218 | table->last_rehash = jiffies; | |
219 | table->count = 0; | |
220 | table->ufid_count = 0; | |
221 | return 0; | |
222 | ||
223 | free_ti: | |
224 | __table_instance_destroy(ti); | |
225 | return -ENOMEM; | |
226 | } | |
227 | ||
228 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | |
229 | { | |
230 | struct table_instance *ti = container_of(rcu, struct table_instance, rcu); | |
231 | ||
232 | __table_instance_destroy(ti); | |
233 | } | |
234 | ||
235 | static void table_instance_destroy(struct table_instance *ti, | |
236 | struct table_instance *ufid_ti, | |
237 | bool deferred) | |
238 | { | |
239 | int i; | |
240 | ||
241 | if (!ti) | |
242 | return; | |
243 | ||
244 | BUG_ON(!ufid_ti); | |
245 | if (ti->keep_flows) | |
246 | goto skip_flows; | |
247 | ||
248 | for (i = 0; i < ti->n_buckets; i++) { | |
249 | struct sw_flow *flow; | |
250 | struct hlist_head *head = flex_array_get(ti->buckets, i); | |
251 | struct hlist_node *n; | |
252 | int ver = ti->node_ver; | |
253 | int ufid_ver = ufid_ti->node_ver; | |
254 | ||
255 | hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) { | |
256 | hlist_del_rcu(&flow->flow_table.node[ver]); | |
257 | if (ovs_identifier_is_ufid(&flow->id)) | |
258 | hlist_del_rcu(&flow->ufid_table.node[ufid_ver]); | |
259 | ovs_flow_free(flow, deferred); | |
260 | } | |
261 | } | |
262 | ||
263 | skip_flows: | |
264 | if (deferred) { | |
265 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | |
266 | call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); | |
267 | } else { | |
268 | __table_instance_destroy(ti); | |
269 | __table_instance_destroy(ufid_ti); | |
270 | } | |
271 | } | |
272 | ||
273 | /* No need for locking this function is called from RCU callback or | |
274 | * error path. | |
275 | */ | |
276 | void ovs_flow_tbl_destroy(struct flow_table *table) | |
277 | { | |
278 | struct table_instance *ti = rcu_dereference_raw(table->ti); | |
279 | struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); | |
280 | ||
281 | table_instance_destroy(ti, ufid_ti, false); | |
282 | } | |
283 | ||
284 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | |
285 | u32 *bucket, u32 *last) | |
286 | { | |
287 | struct sw_flow *flow; | |
288 | struct hlist_head *head; | |
289 | int ver; | |
290 | int i; | |
291 | ||
292 | ver = ti->node_ver; | |
293 | while (*bucket < ti->n_buckets) { | |
294 | i = 0; | |
295 | head = flex_array_get(ti->buckets, *bucket); | |
296 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { | |
297 | if (i < *last) { | |
298 | i++; | |
299 | continue; | |
300 | } | |
301 | *last = i + 1; | |
302 | return flow; | |
303 | } | |
304 | (*bucket)++; | |
305 | *last = 0; | |
306 | } | |
307 | ||
308 | return NULL; | |
309 | } | |
310 | ||
311 | static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) | |
312 | { | |
313 | hash = jhash_1word(hash, ti->hash_seed); | |
314 | return flex_array_get(ti->buckets, | |
315 | (hash & (ti->n_buckets - 1))); | |
316 | } | |
317 | ||
318 | static void table_instance_insert(struct table_instance *ti, | |
319 | struct sw_flow *flow) | |
320 | { | |
321 | struct hlist_head *head; | |
322 | ||
323 | head = find_bucket(ti, flow->flow_table.hash); | |
324 | hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); | |
325 | } | |
326 | ||
327 | static void ufid_table_instance_insert(struct table_instance *ti, | |
328 | struct sw_flow *flow) | |
329 | { | |
330 | struct hlist_head *head; | |
331 | ||
332 | head = find_bucket(ti, flow->ufid_table.hash); | |
333 | hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); | |
334 | } | |
335 | ||
336 | static void flow_table_copy_flows(struct table_instance *old, | |
337 | struct table_instance *new, bool ufid) | |
338 | { | |
339 | int old_ver; | |
340 | int i; | |
341 | ||
342 | old_ver = old->node_ver; | |
343 | new->node_ver = !old_ver; | |
344 | ||
345 | /* Insert in new table. */ | |
346 | for (i = 0; i < old->n_buckets; i++) { | |
347 | struct sw_flow *flow; | |
348 | struct hlist_head *head; | |
349 | ||
350 | head = flex_array_get(old->buckets, i); | |
351 | ||
352 | if (ufid) | |
353 | hlist_for_each_entry(flow, head, | |
354 | ufid_table.node[old_ver]) | |
355 | ufid_table_instance_insert(new, flow); | |
356 | else | |
357 | hlist_for_each_entry(flow, head, | |
358 | flow_table.node[old_ver]) | |
359 | table_instance_insert(new, flow); | |
360 | } | |
361 | ||
362 | old->keep_flows = true; | |
363 | } | |
364 | ||
365 | static struct table_instance *table_instance_rehash(struct table_instance *ti, | |
366 | int n_buckets, bool ufid) | |
367 | { | |
368 | struct table_instance *new_ti; | |
369 | ||
370 | new_ti = table_instance_alloc(n_buckets); | |
371 | if (!new_ti) | |
372 | return NULL; | |
373 | ||
374 | flow_table_copy_flows(ti, new_ti, ufid); | |
375 | ||
376 | return new_ti; | |
377 | } | |
378 | ||
379 | int ovs_flow_tbl_flush(struct flow_table *flow_table) | |
380 | { | |
381 | struct table_instance *old_ti, *new_ti; | |
382 | struct table_instance *old_ufid_ti, *new_ufid_ti; | |
383 | ||
384 | new_ti = table_instance_alloc(TBL_MIN_BUCKETS); | |
385 | if (!new_ti) | |
386 | return -ENOMEM; | |
387 | new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); | |
388 | if (!new_ufid_ti) | |
389 | goto err_free_ti; | |
390 | ||
391 | old_ti = ovsl_dereference(flow_table->ti); | |
392 | old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); | |
393 | ||
394 | rcu_assign_pointer(flow_table->ti, new_ti); | |
395 | rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); | |
396 | flow_table->last_rehash = jiffies; | |
397 | flow_table->count = 0; | |
398 | flow_table->ufid_count = 0; | |
399 | ||
400 | table_instance_destroy(old_ti, old_ufid_ti, true); | |
401 | return 0; | |
402 | ||
403 | err_free_ti: | |
404 | __table_instance_destroy(new_ti); | |
405 | return -ENOMEM; | |
406 | } | |
407 | ||
408 | static u32 flow_hash(const struct sw_flow_key *key, | |
409 | const struct sw_flow_key_range *range) | |
410 | { | |
411 | int key_start = range->start; | |
412 | int key_end = range->end; | |
413 | const u32 *hash_key = (const u32 *)((const u8 *)key + key_start); | |
414 | int hash_u32s = (key_end - key_start) >> 2; | |
415 | ||
416 | /* Make sure number of hash bytes are multiple of u32. */ | |
417 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | |
418 | ||
419 | return jhash2(hash_key, hash_u32s, 0); | |
420 | } | |
421 | ||
422 | static int flow_key_start(const struct sw_flow_key *key) | |
423 | { | |
424 | if (key->tun_proto) | |
425 | return 0; | |
426 | else | |
427 | return rounddown(offsetof(struct sw_flow_key, phy), | |
428 | sizeof(long)); | |
429 | } | |
430 | ||
431 | static bool cmp_key(const struct sw_flow_key *key1, | |
432 | const struct sw_flow_key *key2, | |
433 | int key_start, int key_end) | |
434 | { | |
435 | const long *cp1 = (const long *)((const u8 *)key1 + key_start); | |
436 | const long *cp2 = (const long *)((const u8 *)key2 + key_start); | |
437 | long diffs = 0; | |
438 | int i; | |
439 | ||
440 | for (i = key_start; i < key_end; i += sizeof(long)) | |
441 | diffs |= *cp1++ ^ *cp2++; | |
442 | ||
443 | return diffs == 0; | |
444 | } | |
445 | ||
446 | static bool flow_cmp_masked_key(const struct sw_flow *flow, | |
447 | const struct sw_flow_key *key, | |
448 | const struct sw_flow_key_range *range) | |
449 | { | |
450 | return cmp_key(&flow->key, key, range->start, range->end); | |
451 | } | |
452 | ||
453 | static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | |
454 | const struct sw_flow_match *match) | |
455 | { | |
456 | struct sw_flow_key *key = match->key; | |
457 | int key_start = flow_key_start(key); | |
458 | int key_end = match->range.end; | |
459 | ||
460 | BUG_ON(ovs_identifier_is_ufid(&flow->id)); | |
461 | return cmp_key(flow->id.unmasked_key, key, key_start, key_end); | |
462 | } | |
463 | ||
464 | static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | |
465 | const struct sw_flow_key *unmasked, | |
466 | const struct sw_flow_mask *mask) | |
467 | { | |
468 | struct sw_flow *flow; | |
469 | struct hlist_head *head; | |
470 | u32 hash; | |
471 | struct sw_flow_key masked_key; | |
472 | ||
473 | ovs_flow_mask_key(&masked_key, unmasked, false, mask); | |
474 | hash = flow_hash(&masked_key, &mask->range); | |
475 | head = find_bucket(ti, hash); | |
476 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { | |
477 | if (flow->mask == mask && flow->flow_table.hash == hash && | |
478 | flow_cmp_masked_key(flow, &masked_key, &mask->range)) | |
479 | return flow; | |
480 | } | |
481 | return NULL; | |
482 | } | |
483 | ||
484 | struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, | |
485 | const struct sw_flow_key *key, | |
486 | u32 *n_mask_hit) | |
487 | { | |
488 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); | |
489 | struct sw_flow_mask *mask; | |
490 | struct sw_flow *flow; | |
491 | ||
492 | *n_mask_hit = 0; | |
493 | list_for_each_entry_rcu(mask, &tbl->mask_list, list) { | |
494 | (*n_mask_hit)++; | |
495 | flow = masked_flow_lookup(ti, key, mask); | |
496 | if (flow) /* Found */ | |
497 | return flow; | |
498 | } | |
499 | return NULL; | |
500 | } | |
501 | ||
502 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, | |
503 | const struct sw_flow_key *key) | |
504 | { | |
505 | u32 __always_unused n_mask_hit; | |
506 | ||
507 | return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); | |
508 | } | |
509 | ||
510 | struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, | |
511 | const struct sw_flow_match *match) | |
512 | { | |
513 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); | |
514 | struct sw_flow_mask *mask; | |
515 | struct sw_flow *flow; | |
516 | ||
517 | /* Always called under ovs-mutex. */ | |
518 | list_for_each_entry(mask, &tbl->mask_list, list) { | |
519 | flow = masked_flow_lookup(ti, match->key, mask); | |
520 | if (flow && ovs_identifier_is_key(&flow->id) && | |
521 | ovs_flow_cmp_unmasked_key(flow, match)) | |
522 | return flow; | |
523 | } | |
524 | return NULL; | |
525 | } | |
526 | ||
527 | static u32 ufid_hash(const struct sw_flow_id *sfid) | |
528 | { | |
529 | return jhash(sfid->ufid, sfid->ufid_len, 0); | |
530 | } | |
531 | ||
532 | static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, | |
533 | const struct sw_flow_id *sfid) | |
534 | { | |
535 | if (flow->id.ufid_len != sfid->ufid_len) | |
536 | return false; | |
537 | ||
538 | return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); | |
539 | } | |
540 | ||
541 | bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match) | |
542 | { | |
543 | if (ovs_identifier_is_ufid(&flow->id)) | |
544 | return flow_cmp_masked_key(flow, match->key, &match->range); | |
545 | ||
546 | return ovs_flow_cmp_unmasked_key(flow, match); | |
547 | } | |
548 | ||
549 | struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, | |
550 | const struct sw_flow_id *ufid) | |
551 | { | |
552 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); | |
553 | struct sw_flow *flow; | |
554 | struct hlist_head *head; | |
555 | u32 hash; | |
556 | ||
557 | hash = ufid_hash(ufid); | |
558 | head = find_bucket(ti, hash); | |
559 | hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) { | |
560 | if (flow->ufid_table.hash == hash && | |
561 | ovs_flow_cmp_ufid(flow, ufid)) | |
562 | return flow; | |
563 | } | |
564 | return NULL; | |
565 | } | |
566 | ||
567 | int ovs_flow_tbl_num_masks(const struct flow_table *table) | |
568 | { | |
569 | struct sw_flow_mask *mask; | |
570 | int num = 0; | |
571 | ||
572 | list_for_each_entry(mask, &table->mask_list, list) | |
573 | num++; | |
574 | ||
575 | return num; | |
576 | } | |
577 | ||
578 | static struct table_instance *table_instance_expand(struct table_instance *ti, | |
579 | bool ufid) | |
580 | { | |
581 | return table_instance_rehash(ti, ti->n_buckets * 2, ufid); | |
582 | } | |
583 | ||
584 | /* Remove 'mask' from the mask list, if it is not needed any more. */ | |
585 | static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) | |
586 | { | |
587 | if (mask) { | |
588 | /* ovs-lock is required to protect mask-refcount and | |
589 | * mask list. | |
590 | */ | |
591 | ASSERT_OVSL(); | |
592 | BUG_ON(!mask->ref_count); | |
593 | mask->ref_count--; | |
594 | ||
595 | if (!mask->ref_count) { | |
596 | list_del_rcu(&mask->list); | |
597 | kfree_rcu(mask, rcu); | |
598 | } | |
599 | } | |
600 | } | |
601 | ||
602 | /* Must be called with OVS mutex held. */ | |
603 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | |
604 | { | |
605 | struct table_instance *ti = ovsl_dereference(table->ti); | |
606 | struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); | |
607 | ||
608 | BUG_ON(table->count == 0); | |
609 | hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); | |
610 | table->count--; | |
611 | if (ovs_identifier_is_ufid(&flow->id)) { | |
612 | hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); | |
613 | table->ufid_count--; | |
614 | } | |
615 | ||
616 | /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be | |
617 | * accessible as long as the RCU read lock is held. | |
618 | */ | |
619 | flow_mask_remove(table, flow->mask); | |
620 | } | |
621 | ||
622 | static struct sw_flow_mask *mask_alloc(void) | |
623 | { | |
624 | struct sw_flow_mask *mask; | |
625 | ||
626 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | |
627 | if (mask) | |
628 | mask->ref_count = 1; | |
629 | ||
630 | return mask; | |
631 | } | |
632 | ||
633 | static bool mask_equal(const struct sw_flow_mask *a, | |
634 | const struct sw_flow_mask *b) | |
635 | { | |
636 | const u8 *a_ = (const u8 *)&a->key + a->range.start; | |
637 | const u8 *b_ = (const u8 *)&b->key + b->range.start; | |
638 | ||
639 | return (a->range.end == b->range.end) | |
640 | && (a->range.start == b->range.start) | |
641 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | |
642 | } | |
643 | ||
644 | static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, | |
645 | const struct sw_flow_mask *mask) | |
646 | { | |
647 | struct list_head *ml; | |
648 | ||
649 | list_for_each(ml, &tbl->mask_list) { | |
650 | struct sw_flow_mask *m; | |
651 | m = container_of(ml, struct sw_flow_mask, list); | |
652 | if (mask_equal(mask, m)) | |
653 | return m; | |
654 | } | |
655 | ||
656 | return NULL; | |
657 | } | |
658 | ||
659 | /* Add 'mask' into the mask list, if it is not already there. */ | |
660 | static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, | |
661 | const struct sw_flow_mask *new) | |
662 | { | |
663 | struct sw_flow_mask *mask; | |
664 | mask = flow_mask_find(tbl, new); | |
665 | if (!mask) { | |
666 | /* Allocate a new mask if none exsits. */ | |
667 | mask = mask_alloc(); | |
668 | if (!mask) | |
669 | return -ENOMEM; | |
670 | mask->key = new->key; | |
671 | mask->range = new->range; | |
672 | list_add_rcu(&mask->list, &tbl->mask_list); | |
673 | } else { | |
674 | BUG_ON(!mask->ref_count); | |
675 | mask->ref_count++; | |
676 | } | |
677 | ||
678 | flow->mask = mask; | |
679 | return 0; | |
680 | } | |
681 | ||
682 | /* Must be called with OVS mutex held. */ | |
683 | static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) | |
684 | { | |
685 | struct table_instance *new_ti = NULL; | |
686 | struct table_instance *ti; | |
687 | ||
688 | flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); | |
689 | ti = ovsl_dereference(table->ti); | |
690 | table_instance_insert(ti, flow); | |
691 | table->count++; | |
692 | ||
693 | /* Expand table, if necessary, to make room. */ | |
694 | if (table->count > ti->n_buckets) | |
695 | new_ti = table_instance_expand(ti, false); | |
696 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) | |
697 | new_ti = table_instance_rehash(ti, ti->n_buckets, false); | |
698 | ||
699 | if (new_ti) { | |
700 | rcu_assign_pointer(table->ti, new_ti); | |
701 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | |
702 | table->last_rehash = jiffies; | |
703 | } | |
704 | } | |
705 | ||
706 | /* Must be called with OVS mutex held. */ | |
707 | static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) | |
708 | { | |
709 | struct table_instance *ti; | |
710 | ||
711 | flow->ufid_table.hash = ufid_hash(&flow->id); | |
712 | ti = ovsl_dereference(table->ufid_ti); | |
713 | ufid_table_instance_insert(ti, flow); | |
714 | table->ufid_count++; | |
715 | ||
716 | /* Expand table, if necessary, to make room. */ | |
717 | if (table->ufid_count > ti->n_buckets) { | |
718 | struct table_instance *new_ti; | |
719 | ||
720 | new_ti = table_instance_expand(ti, true); | |
721 | if (new_ti) { | |
722 | rcu_assign_pointer(table->ufid_ti, new_ti); | |
723 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | |
724 | } | |
725 | } | |
726 | } | |
727 | ||
728 | /* Must be called with OVS mutex held. */ | |
729 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | |
730 | const struct sw_flow_mask *mask) | |
731 | { | |
732 | int err; | |
733 | ||
734 | err = flow_mask_insert(table, flow, mask); | |
735 | if (err) | |
736 | return err; | |
737 | flow_key_insert(table, flow); | |
738 | if (ovs_identifier_is_ufid(&flow->id)) | |
739 | flow_ufid_insert(table, flow); | |
740 | ||
741 | return 0; | |
742 | } | |
743 | ||
744 | /* Initializes the flow module. | |
745 | * Returns zero if successful or a negative error code. */ | |
746 | int ovs_flow_init(void) | |
747 | { | |
748 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | |
749 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | |
750 | ||
751 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) | |
752 | + (nr_cpu_ids | |
753 | * sizeof(struct flow_stats *)), | |
754 | 0, 0, NULL); | |
755 | if (flow_cache == NULL) | |
756 | return -ENOMEM; | |
757 | ||
758 | flow_stats_cache | |
759 | = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), | |
760 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
761 | if (flow_stats_cache == NULL) { | |
762 | kmem_cache_destroy(flow_cache); | |
763 | flow_cache = NULL; | |
764 | return -ENOMEM; | |
765 | } | |
766 | ||
767 | return 0; | |
768 | } | |
769 | ||
770 | /* Uninitializes the flow module. */ | |
771 | void ovs_flow_exit(void) | |
772 | { | |
773 | kmem_cache_destroy(flow_stats_cache); | |
774 | kmem_cache_destroy(flow_cache); | |
775 | } |