2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define MASK_ARRAY_SIZE_MIN 16
51 #define REHASH_INTERVAL (10 * 60 * HZ)
53 #define MC_HASH_SHIFT 8
54 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
55 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
57 static struct kmem_cache
*flow_cache
;
58 struct kmem_cache
*flow_stats_cache __read_mostly
;
60 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
62 return range
->end
- range
->start
;
65 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
66 const struct sw_flow_mask
*mask
)
68 const long *m
= (const long *)((const u8
*)&mask
->key
+
70 const long *s
= (const long *)((const u8
*)src
+
72 long *d
= (long *)((u8
*)dst
+ mask
->range
.start
);
75 /* The memory outside of the 'mask->range' are not set since
76 * further operations on 'dst' only uses contents within
79 for (i
= 0; i
< range_n_bytes(&mask
->range
); i
+= sizeof(long))
83 struct sw_flow
*ovs_flow_alloc(void)
86 struct flow_stats
*stats
;
89 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
91 return ERR_PTR(-ENOMEM
);
95 flow
->stats_last_writer
= NUMA_NO_NODE
;
97 /* Initialize the default stat node. */
98 stats
= kmem_cache_alloc_node(flow_stats_cache
,
99 GFP_KERNEL
| __GFP_ZERO
, 0);
103 spin_lock_init(&stats
->lock
);
105 RCU_INIT_POINTER(flow
->stats
[0], stats
);
109 RCU_INIT_POINTER(flow
->stats
[node
], NULL
);
113 kmem_cache_free(flow_cache
, flow
);
114 return ERR_PTR(-ENOMEM
);
117 int ovs_flow_tbl_count(const struct flow_table
*table
)
122 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
124 struct flex_array
*buckets
;
127 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
128 n_buckets
, GFP_KERNEL
);
132 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
134 flex_array_free(buckets
);
138 for (i
= 0; i
< n_buckets
; i
++)
139 INIT_HLIST_HEAD((struct hlist_head
*)
140 flex_array_get(buckets
, i
));
145 static void flow_free(struct sw_flow
*flow
)
149 kfree(rcu_dereference_raw(flow
->sf_acts
));
151 if (flow
->stats
[node
])
152 kmem_cache_free(flow_stats_cache
,
153 rcu_dereference_raw(flow
->stats
[node
]));
154 kmem_cache_free(flow_cache
, flow
);
157 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
159 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
164 static void rcu_free_sw_flow_mask_cb(struct rcu_head
*rcu
)
166 struct sw_flow_mask
*mask
= container_of(rcu
, struct sw_flow_mask
, rcu
);
171 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
177 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
182 static void free_buckets(struct flex_array
*buckets
)
184 flex_array_free(buckets
);
188 static void __table_instance_destroy(struct table_instance
*ti
)
190 free_buckets(ti
->buckets
);
194 static struct table_instance
*table_instance_alloc(int new_size
)
196 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
201 ti
->buckets
= alloc_buckets(new_size
);
207 ti
->n_buckets
= new_size
;
209 ti
->keep_flows
= false;
210 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
215 static void mask_array_rcu_cb(struct rcu_head
*rcu
)
217 struct mask_array
*ma
= container_of(rcu
, struct mask_array
, rcu
);
222 static struct mask_array
*tbl_mask_array_alloc(int size
)
224 struct mask_array
*new;
226 size
= max(MASK_ARRAY_SIZE_MIN
, size
);
227 new = kzalloc(sizeof(struct mask_array
) +
228 sizeof(struct sw_flow_mask
*) * size
, GFP_KERNEL
);
238 static int tbl_mask_array_realloc(struct flow_table
*tbl
, int size
)
240 struct mask_array
*old
;
241 struct mask_array
*new;
243 new = tbl_mask_array_alloc(size
);
247 old
= ovsl_dereference(tbl
->mask_array
);
251 for (i
= 0; i
< old
->max
; i
++) {
252 if (ovsl_dereference(old
->masks
[i
]))
253 new->masks
[count
++] = old
->masks
[i
];
258 rcu_assign_pointer(tbl
->mask_array
, new);
261 call_rcu(&old
->rcu
, mask_array_rcu_cb
);
266 int ovs_flow_tbl_init(struct flow_table
*table
)
268 struct table_instance
*ti
;
269 struct mask_array
*ma
;
271 table
->mask_cache
= __alloc_percpu(sizeof(struct mask_cache_entry
) *
272 MC_HASH_ENTRIES
, __alignof__(struct mask_cache_entry
));
273 if (!table
->mask_cache
)
276 ma
= tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN
);
278 goto free_mask_cache
;
280 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
282 goto free_mask_array
;
284 rcu_assign_pointer(table
->ti
, ti
);
285 rcu_assign_pointer(table
->mask_array
, ma
);
286 table
->last_rehash
= jiffies
;
293 free_percpu(table
->mask_cache
);
297 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
299 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
301 __table_instance_destroy(ti
);
304 static void table_instance_destroy(struct table_instance
*ti
, bool deferred
)
314 for (i
= 0; i
< ti
->n_buckets
; i
++) {
315 struct sw_flow
*flow
;
316 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
317 struct hlist_node
*n
;
318 int ver
= ti
->node_ver
;
320 hlist_for_each_entry_safe(flow
, n
, head
, hash_node
[ver
]) {
321 hlist_del_rcu(&flow
->hash_node
[ver
]);
322 ovs_flow_free(flow
, deferred
);
328 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
330 __table_instance_destroy(ti
);
333 /* No need for locking this function is called from RCU callback or
336 void ovs_flow_tbl_destroy(struct flow_table
*table
)
338 struct table_instance
*ti
= rcu_dereference_raw(table
->ti
);
340 free_percpu(table
->mask_cache
);
341 kfree(rcu_dereference_raw(table
->mask_array
));
342 table_instance_destroy(ti
, false);
345 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
346 u32
*bucket
, u32
*last
)
348 struct sw_flow
*flow
;
349 struct hlist_head
*head
;
354 while (*bucket
< ti
->n_buckets
) {
356 head
= flex_array_get(ti
->buckets
, *bucket
);
357 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ver
]) {
372 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
374 hash
= jhash_1word(hash
, ti
->hash_seed
);
375 return flex_array_get(ti
->buckets
,
376 (hash
& (ti
->n_buckets
- 1)));
379 static void table_instance_insert(struct table_instance
*ti
, struct sw_flow
*flow
)
381 struct hlist_head
*head
;
383 head
= find_bucket(ti
, flow
->hash
);
384 hlist_add_head_rcu(&flow
->hash_node
[ti
->node_ver
], head
);
387 static void flow_table_copy_flows(struct table_instance
*old
,
388 struct table_instance
*new)
393 old_ver
= old
->node_ver
;
394 new->node_ver
= !old_ver
;
396 /* Insert in new table. */
397 for (i
= 0; i
< old
->n_buckets
; i
++) {
398 struct sw_flow
*flow
;
399 struct hlist_head
*head
;
401 head
= flex_array_get(old
->buckets
, i
);
403 hlist_for_each_entry(flow
, head
, hash_node
[old_ver
])
404 table_instance_insert(new, flow
);
407 old
->keep_flows
= true;
410 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
413 struct table_instance
*new_ti
;
415 new_ti
= table_instance_alloc(n_buckets
);
419 flow_table_copy_flows(ti
, new_ti
);
424 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
426 struct table_instance
*old_ti
;
427 struct table_instance
*new_ti
;
429 old_ti
= ovsl_dereference(flow_table
->ti
);
430 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
434 rcu_assign_pointer(flow_table
->ti
, new_ti
);
435 flow_table
->last_rehash
= jiffies
;
436 flow_table
->count
= 0;
438 table_instance_destroy(old_ti
, true);
442 static u32
flow_hash(const struct sw_flow_key
*key
, int key_start
,
445 const u32
*hash_key
= (const u32
*)((const u8
*)key
+ key_start
);
446 int hash_u32s
= (key_end
- key_start
) >> 2;
448 /* Make sure number of hash bytes are multiple of u32. */
449 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
451 return jhash2(hash_key
, hash_u32s
, 0);
454 static int flow_key_start(const struct sw_flow_key
*key
)
456 if (key
->tun_key
.ipv4_dst
)
459 return rounddown(offsetof(struct sw_flow_key
, phy
),
463 static bool cmp_key(const struct sw_flow_key
*key1
,
464 const struct sw_flow_key
*key2
,
465 int key_start
, int key_end
)
467 const long *cp1
= (const long *)((const u8
*)key1
+ key_start
);
468 const long *cp2
= (const long *)((const u8
*)key2
+ key_start
);
472 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
473 diffs
|= *cp1
++ ^ *cp2
++;
478 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
479 const struct sw_flow_key
*key
,
480 int key_start
, int key_end
)
482 return cmp_key(&flow
->key
, key
, key_start
, key_end
);
485 bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
486 const struct sw_flow_match
*match
)
488 struct sw_flow_key
*key
= match
->key
;
489 int key_start
= flow_key_start(key
);
490 int key_end
= match
->range
.end
;
492 return cmp_key(&flow
->unmasked_key
, key
, key_start
, key_end
);
495 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
496 const struct sw_flow_key
*unmasked
,
497 const struct sw_flow_mask
*mask
,
500 struct sw_flow
*flow
;
501 struct hlist_head
*head
;
502 int key_start
= mask
->range
.start
;
503 int key_end
= mask
->range
.end
;
505 struct sw_flow_key masked_key
;
507 ovs_flow_mask_key(&masked_key
, unmasked
, mask
);
508 hash
= flow_hash(&masked_key
, key_start
, key_end
);
509 head
= find_bucket(ti
, hash
);
511 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ti
->node_ver
]) {
512 if (flow
->mask
== mask
&& flow
->hash
== hash
&&
513 flow_cmp_masked_key(flow
, &masked_key
,
520 /* Flow lookup does full lookup on flow table. It starts with
521 * mask from index passed in *index.
523 static struct sw_flow
*flow_lookup(struct flow_table
*tbl
,
524 struct table_instance
*ti
,
525 const struct mask_array
*ma
,
526 const struct sw_flow_key
*key
,
530 struct sw_flow_mask
*mask
;
531 struct sw_flow
*flow
;
534 if (*index
< ma
->max
) {
535 mask
= rcu_dereference_ovsl(ma
->masks
[*index
]);
537 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
543 for (i
= 0; i
< ma
->max
; i
++) {
548 mask
= rcu_dereference_ovsl(ma
->masks
[i
]);
552 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
553 if (flow
) { /* Found */
563 * mask_cache maps flow to probable mask. This cache is not tightly
564 * coupled cache, It means updates to mask list can result in inconsistent
565 * cache entry in mask cache.
566 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
567 * In case of a hash collision the entry is hashed in next segment.
569 struct sw_flow
*ovs_flow_tbl_lookup_stats(struct flow_table
*tbl
,
570 const struct sw_flow_key
*key
,
574 struct mask_array
*ma
= rcu_dereference(tbl
->mask_array
);
575 struct table_instance
*ti
= rcu_dereference(tbl
->ti
);
576 struct mask_cache_entry
*entries
, *ce
;
577 struct sw_flow
*flow
;
582 if (unlikely(!skb_hash
)) {
585 return flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &mask_index
);
588 /* Pre and post recirulation flows usually have the same skb_hash
589 * value. To avoid hash collisions, rehash the 'skb_hash' with
592 skb_hash
= jhash_1word(skb_hash
, key
->recirc_id
);
596 entries
= this_cpu_ptr(tbl
->mask_cache
);
598 /* Find the cache entry 'ce' to operate on. */
599 for (seg
= 0; seg
< MC_HASH_SEGS
; seg
++) {
600 int index
= hash
& (MC_HASH_ENTRIES
- 1);
601 struct mask_cache_entry
*e
;
604 if (e
->skb_hash
== skb_hash
) {
605 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
,
612 if (!ce
|| e
->skb_hash
< ce
->skb_hash
)
613 ce
= e
; /* A better replacement cache candidate. */
615 hash
>>= MC_HASH_SHIFT
;
618 /* Cache miss, do full lookup. */
619 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &ce
->mask_index
);
621 ce
->skb_hash
= skb_hash
;
626 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
627 const struct sw_flow_key
*key
)
629 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
630 struct mask_array
*ma
= rcu_dereference_ovsl(tbl
->mask_array
);
631 u32 __always_unused n_mask_hit
;
634 return flow_lookup(tbl
, ti
, ma
, key
, &n_mask_hit
, &index
);
637 struct sw_flow
*ovs_flow_tbl_lookup_exact(struct flow_table
*tbl
,
638 const struct sw_flow_match
*match
)
640 struct mask_array
*ma
= ovsl_dereference(tbl
->mask_array
);
643 /* Always called under ovs-mutex. */
644 for (i
= 0; i
< ma
->max
; i
++) {
645 struct table_instance
*ti
= ovsl_dereference(tbl
->ti
);
646 u32 __always_unused n_mask_hit
;
647 struct sw_flow_mask
*mask
;
648 struct sw_flow
*flow
;
650 mask
= ovsl_dereference(ma
->masks
[i
]);
653 flow
= masked_flow_lookup(ti
, match
->key
, mask
, &n_mask_hit
);
654 if (flow
&& ovs_flow_cmp_unmasked_key(flow
, match
))
660 int ovs_flow_tbl_num_masks(const struct flow_table
*table
)
662 struct mask_array
*ma
;
664 ma
= rcu_dereference_ovsl(table
->mask_array
);
668 static struct table_instance
*table_instance_expand(struct table_instance
*ti
)
670 return table_instance_rehash(ti
, ti
->n_buckets
* 2);
673 static void tbl_mask_array_delete_mask(struct mask_array
*ma
,
674 struct sw_flow_mask
*mask
)
678 /* Remove the deleted mask pointers from the array */
679 for (i
= 0; i
< ma
->max
; i
++) {
680 if (mask
== ovsl_dereference(ma
->masks
[i
])) {
681 RCU_INIT_POINTER(ma
->masks
[i
], NULL
);
683 call_rcu(&mask
->rcu
, rcu_free_sw_flow_mask_cb
);
690 /* Remove 'mask' from the mask list, if it is not needed any more. */
691 static void flow_mask_remove(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
694 /* ovs-lock is required to protect mask-refcount and
698 BUG_ON(!mask
->ref_count
);
701 if (!mask
->ref_count
) {
702 struct mask_array
*ma
;
704 ma
= ovsl_dereference(tbl
->mask_array
);
705 tbl_mask_array_delete_mask(ma
, mask
);
707 /* Shrink the mask array if necessary. */
708 if (ma
->max
>= (MASK_ARRAY_SIZE_MIN
* 2) &&
709 ma
->count
<= (ma
->max
/ 3))
710 tbl_mask_array_realloc(tbl
, ma
->max
/ 2);
716 /* Must be called with OVS mutex held. */
717 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
719 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
721 BUG_ON(table
->count
== 0);
722 hlist_del_rcu(&flow
->hash_node
[ti
->node_ver
]);
725 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
726 * accessible as long as the RCU read lock is held.
728 flow_mask_remove(table
, flow
->mask
);
731 static struct sw_flow_mask
*mask_alloc(void)
733 struct sw_flow_mask
*mask
;
735 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
742 static bool mask_equal(const struct sw_flow_mask
*a
,
743 const struct sw_flow_mask
*b
)
745 const u8
*a_
= (const u8
*)&a
->key
+ a
->range
.start
;
746 const u8
*b_
= (const u8
*)&b
->key
+ b
->range
.start
;
748 return (a
->range
.end
== b
->range
.end
)
749 && (a
->range
.start
== b
->range
.start
)
750 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
753 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
754 const struct sw_flow_mask
*mask
)
756 struct mask_array
*ma
;
759 ma
= ovsl_dereference(tbl
->mask_array
);
760 for (i
= 0; i
< ma
->max
; i
++) {
761 struct sw_flow_mask
*t
;
763 t
= ovsl_dereference(ma
->masks
[i
]);
764 if (t
&& mask_equal(mask
, t
))
771 /* Add 'mask' into the mask list, if it is not already there. */
772 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
773 const struct sw_flow_mask
*new)
775 struct sw_flow_mask
*mask
;
777 mask
= flow_mask_find(tbl
, new);
779 struct mask_array
*ma
;
782 /* Allocate a new mask if none exsits. */
787 mask
->key
= new->key
;
788 mask
->range
= new->range
;
790 /* Add mask to mask-list. */
791 ma
= ovsl_dereference(tbl
->mask_array
);
792 if (ma
->count
>= ma
->max
) {
795 err
= tbl_mask_array_realloc(tbl
, ma
->max
+
796 MASK_ARRAY_SIZE_MIN
);
801 ma
= ovsl_dereference(tbl
->mask_array
);
804 for (i
= 0; i
< ma
->max
; i
++) {
805 struct sw_flow_mask
*t
;
807 t
= ovsl_dereference(ma
->masks
[i
]);
809 rcu_assign_pointer(ma
->masks
[i
], mask
);
816 BUG_ON(!mask
->ref_count
);
824 /* Must be called with OVS mutex held. */
825 static void flow_key_insert(struct flow_table
*table
, struct sw_flow
*flow
)
827 struct table_instance
*new_ti
= NULL
;
828 struct table_instance
*ti
;
830 flow
->hash
= flow_hash(&flow
->key
, flow
->mask
->range
.start
,
831 flow
->mask
->range
.end
);
832 ti
= ovsl_dereference(table
->ti
);
833 table_instance_insert(ti
, flow
);
836 /* Expand table, if necessary, to make room. */
837 if (table
->count
> ti
->n_buckets
)
838 new_ti
= table_instance_expand(ti
);
839 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
840 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
);
843 rcu_assign_pointer(table
->ti
, new_ti
);
844 table_instance_destroy(ti
, true);
845 table
->last_rehash
= jiffies
;
849 /* Must be called with OVS mutex held. */
850 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
851 const struct sw_flow_mask
*mask
)
855 err
= flow_mask_insert(table
, flow
, mask
);
858 flow_key_insert(table
, flow
);
863 /* Initializes the flow module.
864 * Returns zero if successful or a negative error code.
866 int ovs_flow_init(void)
868 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
869 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
871 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
)
872 + (num_possible_nodes()
873 * sizeof(struct flow_stats
*)),
875 if (flow_cache
== NULL
)
879 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats
),
880 0, SLAB_HWCACHE_ALIGN
, NULL
);
881 if (flow_stats_cache
== NULL
) {
882 kmem_cache_destroy(flow_cache
);
890 /* Uninitializes the flow module. */
891 void ovs_flow_exit(void)
893 kmem_cache_destroy(flow_stats_cache
);
894 kmem_cache_destroy(flow_cache
);