2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
48 #include "flow_netlink.h"
50 #define TBL_MIN_BUCKETS 1024
51 #define MASK_ARRAY_SIZE_MIN 16
52 #define REHASH_INTERVAL (10 * 60 * HZ)
54 #define MC_HASH_SHIFT 8
55 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
56 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
58 static struct kmem_cache
*flow_cache
;
59 struct kmem_cache
*flow_stats_cache __read_mostly
;
61 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
63 return range
->end
- range
->start
;
66 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
67 bool full
, const struct sw_flow_mask
*mask
)
69 int start
= full
? 0 : mask
->range
.start
;
70 int len
= full
? sizeof *dst
: range_n_bytes(&mask
->range
);
71 const long *m
= (const long *)((const u8
*)&mask
->key
+ start
);
72 const long *s
= (const long *)((const u8
*)src
+ start
);
73 long *d
= (long *)((u8
*)dst
+ start
);
76 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
77 * if 'full' is false the memory outside of the 'mask->range' is left
78 * uninitialized. This can be used as an optimization when further
79 * operations on 'dst' only use contents within 'mask->range'.
81 for (i
= 0; i
< len
; i
+= sizeof(long))
85 struct sw_flow
*ovs_flow_alloc(void)
88 struct flow_stats
*stats
;
91 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
93 return ERR_PTR(-ENOMEM
);
97 flow
->id
.ufid_len
= 0;
98 flow
->id
.unmasked_key
= NULL
;
99 flow
->stats_last_writer
= NUMA_NO_NODE
;
101 /* Initialize the default stat node. */
102 stats
= kmem_cache_alloc_node(flow_stats_cache
,
103 GFP_KERNEL
| __GFP_ZERO
, 0);
107 spin_lock_init(&stats
->lock
);
109 RCU_INIT_POINTER(flow
->stats
[0], stats
);
113 RCU_INIT_POINTER(flow
->stats
[node
], NULL
);
117 kmem_cache_free(flow_cache
, flow
);
118 return ERR_PTR(-ENOMEM
);
121 int ovs_flow_tbl_count(const struct flow_table
*table
)
126 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
128 struct flex_array
*buckets
;
131 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
132 n_buckets
, GFP_KERNEL
);
136 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
138 flex_array_free(buckets
);
142 for (i
= 0; i
< n_buckets
; i
++)
143 INIT_HLIST_HEAD((struct hlist_head
*)
144 flex_array_get(buckets
, i
));
149 static void flow_free(struct sw_flow
*flow
)
153 if (ovs_identifier_is_key(&flow
->id
))
154 kfree(flow
->id
.unmasked_key
);
156 ovs_nla_free_flow_actions((struct sw_flow_actions __force
*)flow
->sf_acts
);
158 if (flow
->stats
[node
])
159 kmem_cache_free(flow_stats_cache
,
160 rcu_dereference_raw(flow
->stats
[node
]));
161 kmem_cache_free(flow_cache
, flow
);
164 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
166 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
171 static void rcu_free_sw_flow_mask_cb(struct rcu_head
*rcu
)
173 struct sw_flow_mask
*mask
= container_of(rcu
, struct sw_flow_mask
, rcu
);
178 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
184 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
189 static void free_buckets(struct flex_array
*buckets
)
191 flex_array_free(buckets
);
195 static void __table_instance_destroy(struct table_instance
*ti
)
197 free_buckets(ti
->buckets
);
201 static struct table_instance
*table_instance_alloc(int new_size
)
203 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
208 ti
->buckets
= alloc_buckets(new_size
);
214 ti
->n_buckets
= new_size
;
216 ti
->keep_flows
= false;
217 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
222 static void mask_array_rcu_cb(struct rcu_head
*rcu
)
224 struct mask_array
*ma
= container_of(rcu
, struct mask_array
, rcu
);
229 static struct mask_array
*tbl_mask_array_alloc(int size
)
231 struct mask_array
*new;
233 size
= max(MASK_ARRAY_SIZE_MIN
, size
);
234 new = kzalloc(sizeof(struct mask_array
) +
235 sizeof(struct sw_flow_mask
*) * size
, GFP_KERNEL
);
245 static int tbl_mask_array_realloc(struct flow_table
*tbl
, int size
)
247 struct mask_array
*old
;
248 struct mask_array
*new;
250 new = tbl_mask_array_alloc(size
);
254 old
= ovsl_dereference(tbl
->mask_array
);
258 for (i
= 0; i
< old
->max
; i
++) {
259 if (ovsl_dereference(old
->masks
[i
]))
260 new->masks
[count
++] = old
->masks
[i
];
265 rcu_assign_pointer(tbl
->mask_array
, new);
268 call_rcu(&old
->rcu
, mask_array_rcu_cb
);
273 int ovs_flow_tbl_init(struct flow_table
*table
)
275 struct table_instance
*ti
, *ufid_ti
;
276 struct mask_array
*ma
;
278 table
->mask_cache
= __alloc_percpu(sizeof(struct mask_cache_entry
) *
279 MC_HASH_ENTRIES
, __alignof__(struct mask_cache_entry
));
280 if (!table
->mask_cache
)
283 ma
= tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN
);
285 goto free_mask_cache
;
287 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
289 goto free_mask_array
;
291 ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
295 rcu_assign_pointer(table
->ti
, ti
);
296 rcu_assign_pointer(table
->ufid_ti
, ufid_ti
);
297 rcu_assign_pointer(table
->mask_array
, ma
);
298 table
->last_rehash
= jiffies
;
300 table
->ufid_count
= 0;
304 __table_instance_destroy(ti
);
308 free_percpu(table
->mask_cache
);
312 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
314 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
316 __table_instance_destroy(ti
);
319 static void table_instance_destroy(struct table_instance
*ti
,
320 struct table_instance
*ufid_ti
,
332 for (i
= 0; i
< ti
->n_buckets
; i
++) {
333 struct sw_flow
*flow
;
334 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
335 struct hlist_node
*n
;
336 int ver
= ti
->node_ver
;
337 int ufid_ver
= ufid_ti
->node_ver
;
339 hlist_for_each_entry_safe(flow
, n
, head
, flow_table
.node
[ver
]) {
340 hlist_del_rcu(&flow
->flow_table
.node
[ver
]);
341 if (ovs_identifier_is_ufid(&flow
->id
))
342 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ver
]);
343 ovs_flow_free(flow
, deferred
);
349 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
350 call_rcu(&ufid_ti
->rcu
, flow_tbl_destroy_rcu_cb
);
352 __table_instance_destroy(ti
);
353 __table_instance_destroy(ufid_ti
);
357 /* No need for locking this function is called from RCU callback or
360 void ovs_flow_tbl_destroy(struct flow_table
*table
)
362 struct table_instance
*ti
= rcu_dereference_raw(table
->ti
);
363 struct table_instance
*ufid_ti
= rcu_dereference_raw(table
->ufid_ti
);
365 free_percpu(table
->mask_cache
);
366 kfree(rcu_dereference_raw(table
->mask_array
));
367 table_instance_destroy(ti
, ufid_ti
, false);
370 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
371 u32
*bucket
, u32
*last
)
373 struct sw_flow
*flow
;
374 struct hlist_head
*head
;
379 while (*bucket
< ti
->n_buckets
) {
381 head
= flex_array_get(ti
->buckets
, *bucket
);
382 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ver
]) {
397 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
399 hash
= jhash_1word(hash
, ti
->hash_seed
);
400 return flex_array_get(ti
->buckets
,
401 (hash
& (ti
->n_buckets
- 1)));
404 static void table_instance_insert(struct table_instance
*ti
,
405 struct sw_flow
*flow
)
407 struct hlist_head
*head
;
409 head
= find_bucket(ti
, flow
->flow_table
.hash
);
410 hlist_add_head_rcu(&flow
->flow_table
.node
[ti
->node_ver
], head
);
413 static void ufid_table_instance_insert(struct table_instance
*ti
,
414 struct sw_flow
*flow
)
416 struct hlist_head
*head
;
418 head
= find_bucket(ti
, flow
->ufid_table
.hash
);
419 hlist_add_head_rcu(&flow
->ufid_table
.node
[ti
->node_ver
], head
);
422 static void flow_table_copy_flows(struct table_instance
*old
,
423 struct table_instance
*new, bool ufid
)
428 old_ver
= old
->node_ver
;
429 new->node_ver
= !old_ver
;
431 /* Insert in new table. */
432 for (i
= 0; i
< old
->n_buckets
; i
++) {
433 struct sw_flow
*flow
;
434 struct hlist_head
*head
;
436 head
= flex_array_get(old
->buckets
, i
);
439 hlist_for_each_entry(flow
, head
,
440 ufid_table
.node
[old_ver
])
441 ufid_table_instance_insert(new, flow
);
443 hlist_for_each_entry(flow
, head
,
444 flow_table
.node
[old_ver
])
445 table_instance_insert(new, flow
);
448 old
->keep_flows
= true;
451 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
452 int n_buckets
, bool ufid
)
454 struct table_instance
*new_ti
;
456 new_ti
= table_instance_alloc(n_buckets
);
460 flow_table_copy_flows(ti
, new_ti
, ufid
);
465 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
467 struct table_instance
*old_ti
, *new_ti
;
468 struct table_instance
*old_ufid_ti
, *new_ufid_ti
;
470 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
473 new_ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
477 old_ti
= ovsl_dereference(flow_table
->ti
);
478 old_ufid_ti
= ovsl_dereference(flow_table
->ufid_ti
);
480 rcu_assign_pointer(flow_table
->ti
, new_ti
);
481 rcu_assign_pointer(flow_table
->ufid_ti
, new_ufid_ti
);
482 flow_table
->last_rehash
= jiffies
;
483 flow_table
->count
= 0;
484 flow_table
->ufid_count
= 0;
486 table_instance_destroy(old_ti
, old_ufid_ti
, true);
490 __table_instance_destroy(new_ti
);
494 static u32
flow_hash(const struct sw_flow_key
*key
,
495 const struct sw_flow_key_range
*range
)
497 int key_start
= range
->start
;
498 int key_end
= range
->end
;
499 const u32
*hash_key
= (const u32
*)((const u8
*)key
+ key_start
);
500 int hash_u32s
= (key_end
- key_start
) >> 2;
502 /* Make sure number of hash bytes are multiple of u32. */
503 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
505 return jhash2(hash_key
, hash_u32s
, 0);
508 static int flow_key_start(const struct sw_flow_key
*key
)
510 if (key
->tun_key
.u
.ipv4
.dst
)
513 return rounddown(offsetof(struct sw_flow_key
, phy
),
517 static bool cmp_key(const struct sw_flow_key
*key1
,
518 const struct sw_flow_key
*key2
,
519 int key_start
, int key_end
)
521 const long *cp1
= (const long *)((const u8
*)key1
+ key_start
);
522 const long *cp2
= (const long *)((const u8
*)key2
+ key_start
);
526 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
527 diffs
|= *cp1
++ ^ *cp2
++;
532 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
533 const struct sw_flow_key
*key
,
534 const struct sw_flow_key_range
*range
)
536 return cmp_key(&flow
->key
, key
, range
->start
, range
->end
);
539 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
540 const struct sw_flow_match
*match
)
542 struct sw_flow_key
*key
= match
->key
;
543 int key_start
= flow_key_start(key
);
544 int key_end
= match
->range
.end
;
546 BUG_ON(ovs_identifier_is_ufid(&flow
->id
));
547 return cmp_key(flow
->id
.unmasked_key
, key
, key_start
, key_end
);
550 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
551 const struct sw_flow_key
*unmasked
,
552 const struct sw_flow_mask
*mask
,
555 struct sw_flow
*flow
;
556 struct hlist_head
*head
;
558 struct sw_flow_key masked_key
;
560 ovs_flow_mask_key(&masked_key
, unmasked
, false, mask
);
561 hash
= flow_hash(&masked_key
, &mask
->range
);
562 head
= find_bucket(ti
, hash
);
564 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ti
->node_ver
]) {
565 if (flow
->mask
== mask
&& flow
->flow_table
.hash
== hash
&&
566 flow_cmp_masked_key(flow
, &masked_key
, &mask
->range
))
572 /* Flow lookup does full lookup on flow table. It starts with
573 * mask from index passed in *index.
575 static struct sw_flow
*flow_lookup(struct flow_table
*tbl
,
576 struct table_instance
*ti
,
577 const struct mask_array
*ma
,
578 const struct sw_flow_key
*key
,
582 struct sw_flow_mask
*mask
;
583 struct sw_flow
*flow
;
586 if (*index
< ma
->max
) {
587 mask
= rcu_dereference_ovsl(ma
->masks
[*index
]);
589 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
595 for (i
= 0; i
< ma
->max
; i
++) {
600 mask
= rcu_dereference_ovsl(ma
->masks
[i
]);
604 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
605 if (flow
) { /* Found */
615 * mask_cache maps flow to probable mask. This cache is not tightly
616 * coupled cache, It means updates to mask list can result in inconsistent
617 * cache entry in mask cache.
618 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
619 * In case of a hash collision the entry is hashed in next segment.
621 struct sw_flow
*ovs_flow_tbl_lookup_stats(struct flow_table
*tbl
,
622 const struct sw_flow_key
*key
,
626 struct mask_array
*ma
= rcu_dereference(tbl
->mask_array
);
627 struct table_instance
*ti
= rcu_dereference(tbl
->ti
);
628 struct mask_cache_entry
*entries
, *ce
;
629 struct sw_flow
*flow
;
634 if (unlikely(!skb_hash
)) {
637 return flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &mask_index
);
640 /* Pre and post recirulation flows usually have the same skb_hash
641 * value. To avoid hash collisions, rehash the 'skb_hash' with
644 skb_hash
= jhash_1word(skb_hash
, key
->recirc_id
);
648 entries
= this_cpu_ptr(tbl
->mask_cache
);
650 /* Find the cache entry 'ce' to operate on. */
651 for (seg
= 0; seg
< MC_HASH_SEGS
; seg
++) {
652 int index
= hash
& (MC_HASH_ENTRIES
- 1);
653 struct mask_cache_entry
*e
;
656 if (e
->skb_hash
== skb_hash
) {
657 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
,
664 if (!ce
|| e
->skb_hash
< ce
->skb_hash
)
665 ce
= e
; /* A better replacement cache candidate. */
667 hash
>>= MC_HASH_SHIFT
;
670 /* Cache miss, do full lookup. */
671 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &ce
->mask_index
);
673 ce
->skb_hash
= skb_hash
;
678 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
679 const struct sw_flow_key
*key
)
681 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
682 struct mask_array
*ma
= rcu_dereference_ovsl(tbl
->mask_array
);
683 u32 __always_unused n_mask_hit
;
686 return flow_lookup(tbl
, ti
, ma
, key
, &n_mask_hit
, &index
);
689 struct sw_flow
*ovs_flow_tbl_lookup_exact(struct flow_table
*tbl
,
690 const struct sw_flow_match
*match
)
692 struct mask_array
*ma
= ovsl_dereference(tbl
->mask_array
);
695 /* Always called under ovs-mutex. */
696 for (i
= 0; i
< ma
->max
; i
++) {
697 struct table_instance
*ti
= ovsl_dereference(tbl
->ti
);
698 u32 __always_unused n_mask_hit
;
699 struct sw_flow_mask
*mask
;
700 struct sw_flow
*flow
;
702 mask
= ovsl_dereference(ma
->masks
[i
]);
705 flow
= masked_flow_lookup(ti
, match
->key
, mask
, &n_mask_hit
);
706 if (flow
&& ovs_identifier_is_key(&flow
->id
) &&
707 ovs_flow_cmp_unmasked_key(flow
, match
))
713 static u32
ufid_hash(const struct sw_flow_id
*sfid
)
715 return jhash(sfid
->ufid
, sfid
->ufid_len
, 0);
718 static bool ovs_flow_cmp_ufid(const struct sw_flow
*flow
,
719 const struct sw_flow_id
*sfid
)
721 if (flow
->id
.ufid_len
!= sfid
->ufid_len
)
724 return !memcmp(flow
->id
.ufid
, sfid
->ufid
, sfid
->ufid_len
);
727 bool ovs_flow_cmp(const struct sw_flow
*flow
, const struct sw_flow_match
*match
)
729 if (ovs_identifier_is_ufid(&flow
->id
))
730 return flow_cmp_masked_key(flow
, match
->key
, &match
->range
);
732 return ovs_flow_cmp_unmasked_key(flow
, match
);
735 struct sw_flow
*ovs_flow_tbl_lookup_ufid(struct flow_table
*tbl
,
736 const struct sw_flow_id
*ufid
)
738 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ufid_ti
);
739 struct sw_flow
*flow
;
740 struct hlist_head
*head
;
743 hash
= ufid_hash(ufid
);
744 head
= find_bucket(ti
, hash
);
745 hlist_for_each_entry_rcu(flow
, head
, ufid_table
.node
[ti
->node_ver
]) {
746 if (flow
->ufid_table
.hash
== hash
&&
747 ovs_flow_cmp_ufid(flow
, ufid
))
753 int ovs_flow_tbl_num_masks(const struct flow_table
*table
)
755 struct mask_array
*ma
;
757 ma
= rcu_dereference_ovsl(table
->mask_array
);
761 static struct table_instance
*table_instance_expand(struct table_instance
*ti
,
764 return table_instance_rehash(ti
, ti
->n_buckets
* 2, ufid
);
767 static void tbl_mask_array_delete_mask(struct mask_array
*ma
,
768 struct sw_flow_mask
*mask
)
772 /* Remove the deleted mask pointers from the array */
773 for (i
= 0; i
< ma
->max
; i
++) {
774 if (mask
== ovsl_dereference(ma
->masks
[i
])) {
775 RCU_INIT_POINTER(ma
->masks
[i
], NULL
);
777 call_rcu(&mask
->rcu
, rcu_free_sw_flow_mask_cb
);
784 /* Remove 'mask' from the mask list, if it is not needed any more. */
785 static void flow_mask_remove(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
788 /* ovs-lock is required to protect mask-refcount and
792 BUG_ON(!mask
->ref_count
);
795 if (!mask
->ref_count
) {
796 struct mask_array
*ma
;
798 ma
= ovsl_dereference(tbl
->mask_array
);
799 tbl_mask_array_delete_mask(ma
, mask
);
801 /* Shrink the mask array if necessary. */
802 if (ma
->max
>= (MASK_ARRAY_SIZE_MIN
* 2) &&
803 ma
->count
<= (ma
->max
/ 3))
804 tbl_mask_array_realloc(tbl
, ma
->max
/ 2);
810 /* Must be called with OVS mutex held. */
811 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
813 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
814 struct table_instance
*ufid_ti
= ovsl_dereference(table
->ufid_ti
);
816 BUG_ON(table
->count
== 0);
817 hlist_del_rcu(&flow
->flow_table
.node
[ti
->node_ver
]);
819 if (ovs_identifier_is_ufid(&flow
->id
)) {
820 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ti
->node_ver
]);
824 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
825 * accessible as long as the RCU read lock is held.
827 flow_mask_remove(table
, flow
->mask
);
830 static struct sw_flow_mask
*mask_alloc(void)
832 struct sw_flow_mask
*mask
;
834 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
841 static bool mask_equal(const struct sw_flow_mask
*a
,
842 const struct sw_flow_mask
*b
)
844 const u8
*a_
= (const u8
*)&a
->key
+ a
->range
.start
;
845 const u8
*b_
= (const u8
*)&b
->key
+ b
->range
.start
;
847 return (a
->range
.end
== b
->range
.end
)
848 && (a
->range
.start
== b
->range
.start
)
849 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
852 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
853 const struct sw_flow_mask
*mask
)
855 struct mask_array
*ma
;
858 ma
= ovsl_dereference(tbl
->mask_array
);
859 for (i
= 0; i
< ma
->max
; i
++) {
860 struct sw_flow_mask
*t
;
862 t
= ovsl_dereference(ma
->masks
[i
]);
863 if (t
&& mask_equal(mask
, t
))
870 /* Add 'mask' into the mask list, if it is not already there. */
871 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
872 const struct sw_flow_mask
*new)
874 struct sw_flow_mask
*mask
;
876 mask
= flow_mask_find(tbl
, new);
878 struct mask_array
*ma
;
881 /* Allocate a new mask if none exsits. */
886 mask
->key
= new->key
;
887 mask
->range
= new->range
;
889 /* Add mask to mask-list. */
890 ma
= ovsl_dereference(tbl
->mask_array
);
891 if (ma
->count
>= ma
->max
) {
894 err
= tbl_mask_array_realloc(tbl
, ma
->max
+
895 MASK_ARRAY_SIZE_MIN
);
900 ma
= ovsl_dereference(tbl
->mask_array
);
903 for (i
= 0; i
< ma
->max
; i
++) {
904 struct sw_flow_mask
*t
;
906 t
= ovsl_dereference(ma
->masks
[i
]);
908 rcu_assign_pointer(ma
->masks
[i
], mask
);
915 BUG_ON(!mask
->ref_count
);
923 /* Must be called with OVS mutex held. */
924 static void flow_key_insert(struct flow_table
*table
, struct sw_flow
*flow
)
926 struct table_instance
*new_ti
= NULL
;
927 struct table_instance
*ti
;
929 flow
->flow_table
.hash
= flow_hash(&flow
->key
, &flow
->mask
->range
);
930 ti
= ovsl_dereference(table
->ti
);
931 table_instance_insert(ti
, flow
);
934 /* Expand table, if necessary, to make room. */
935 if (table
->count
> ti
->n_buckets
)
936 new_ti
= table_instance_expand(ti
, false);
937 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
938 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
, false);
941 rcu_assign_pointer(table
->ti
, new_ti
);
942 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
943 table
->last_rehash
= jiffies
;
947 /* Must be called with OVS mutex held. */
948 static void flow_ufid_insert(struct flow_table
*table
, struct sw_flow
*flow
)
950 struct table_instance
*ti
;
952 flow
->ufid_table
.hash
= ufid_hash(&flow
->id
);
953 ti
= ovsl_dereference(table
->ufid_ti
);
954 ufid_table_instance_insert(ti
, flow
);
957 /* Expand table, if necessary, to make room. */
958 if (table
->ufid_count
> ti
->n_buckets
) {
959 struct table_instance
*new_ti
;
961 new_ti
= table_instance_expand(ti
, true);
963 rcu_assign_pointer(table
->ufid_ti
, new_ti
);
964 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
969 /* Must be called with OVS mutex held. */
970 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
971 const struct sw_flow_mask
*mask
)
975 err
= flow_mask_insert(table
, flow
, mask
);
978 flow_key_insert(table
, flow
);
979 if (ovs_identifier_is_ufid(&flow
->id
))
980 flow_ufid_insert(table
, flow
);
985 /* Initializes the flow module.
986 * Returns zero if successful or a negative error code.
988 int ovs_flow_init(void)
990 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
991 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
993 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
)
995 * sizeof(struct flow_stats
*)),
997 if (flow_cache
== NULL
)
1001 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats
),
1002 0, SLAB_HWCACHE_ALIGN
, NULL
);
1003 if (flow_stats_cache
== NULL
) {
1004 kmem_cache_destroy(flow_cache
);
1012 /* Uninitializes the flow module. */
1013 void ovs_flow_exit(void)
1015 kmem_cache_destroy(flow_stats_cache
);
1016 kmem_cache_destroy(flow_cache
);