2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/hash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define MASK_ARRAY_SIZE_MIN 16
51 #define REHASH_INTERVAL (10 * 60 * HZ)
53 #define MC_HASH_SHIFT 8
54 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
55 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
57 static struct kmem_cache
*flow_cache
;
58 struct kmem_cache
*flow_stats_cache __read_mostly
;
60 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
62 return range
->end
- range
->start
;
65 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
66 const struct sw_flow_mask
*mask
)
68 const long *m
= (const long *)((const u8
*)&mask
->key
+
70 const long *s
= (const long *)((const u8
*)src
+
72 long *d
= (long *)((u8
*)dst
+ mask
->range
.start
);
75 /* The memory outside of the 'mask->range' are not set since
76 * further operations on 'dst' only uses contents within
79 for (i
= 0; i
< range_n_bytes(&mask
->range
); i
+= sizeof(long))
83 struct sw_flow
*ovs_flow_alloc(void)
86 struct flow_stats
*stats
;
89 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
91 return ERR_PTR(-ENOMEM
);
95 flow
->stats_last_writer
= NUMA_NO_NODE
;
97 /* Initialize the default stat node. */
98 stats
= kmem_cache_alloc_node(flow_stats_cache
,
99 GFP_KERNEL
| __GFP_ZERO
, 0);
103 spin_lock_init(&stats
->lock
);
105 RCU_INIT_POINTER(flow
->stats
[0], stats
);
109 RCU_INIT_POINTER(flow
->stats
[node
], NULL
);
113 kmem_cache_free(flow_cache
, flow
);
114 return ERR_PTR(-ENOMEM
);
117 int ovs_flow_tbl_count(struct flow_table
*table
)
122 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
124 struct flex_array
*buckets
;
127 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
128 n_buckets
, GFP_KERNEL
);
132 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
134 flex_array_free(buckets
);
138 for (i
= 0; i
< n_buckets
; i
++)
139 INIT_HLIST_HEAD((struct hlist_head
*)
140 flex_array_get(buckets
, i
));
145 static void flow_free(struct sw_flow
*flow
)
149 kfree((struct sw_flow_actions __force
*)flow
->sf_acts
);
151 if (flow
->stats
[node
])
152 kmem_cache_free(flow_stats_cache
,
153 (struct flow_stats __force
*)flow
->stats
[node
]);
154 kmem_cache_free(flow_cache
, flow
);
157 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
159 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
164 static void rcu_free_sw_flow_mask_cb(struct rcu_head
*rcu
)
166 struct sw_flow_mask
*mask
= container_of(rcu
, struct sw_flow_mask
, rcu
);
171 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
177 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
182 static void free_buckets(struct flex_array
*buckets
)
184 flex_array_free(buckets
);
188 static void __table_instance_destroy(struct table_instance
*ti
)
190 free_buckets(ti
->buckets
);
194 static struct table_instance
*table_instance_alloc(int new_size
)
196 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
201 ti
->buckets
= alloc_buckets(new_size
);
207 ti
->n_buckets
= new_size
;
209 ti
->keep_flows
= false;
210 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
215 static void mask_array_rcu_cb(struct rcu_head
*rcu
)
217 struct mask_array
*ma
= container_of(rcu
, struct mask_array
, rcu
);
222 static struct mask_array
*tbl_mask_array_alloc(int size
)
224 struct mask_array
*new;
226 new = kzalloc(sizeof(struct mask_array
) +
227 sizeof(struct sw_flow_mask
*) * size
, GFP_KERNEL
);
237 static int tbl_mask_array_realloc(struct flow_table
*tbl
, int size
)
239 struct mask_array
*old
;
240 struct mask_array
*new;
242 new = tbl_mask_array_alloc(size
);
246 old
= ovsl_dereference(tbl
->mask_array
);
250 for (i
= 0; i
< old
->max
; i
++)
251 new->masks
[i
] = old
->masks
[i
];
253 new->count
= old
->count
;
255 rcu_assign_pointer(tbl
->mask_array
, new);
258 call_rcu(&old
->rcu
, mask_array_rcu_cb
);
263 static void tbl_mask_array_delete_mask(struct mask_array
*ma
,
264 const struct sw_flow_mask
*mask
)
268 /* Delete a mask pointer from the valid section.
270 * Also move the last entry in its place, so there is no
271 * whole in the valid section.
273 * Notice the last entry still points to the original mask.
275 * <Note>: there is a small race window that may cause a mask
276 * to be missed in a search. Imaging a core is
277 * walking through the array, passing the index of deleting mask.
278 * But before reaching the last entry, it is overwritten,
279 * by another core that is adding a new mask, now the last entry
280 * will point to the new mask. In this case, the moved up last
281 * entry can be missed by the core walking the mask array.
283 * In case this missed mask would have led to successful
284 * lookup, Hitting the race window could cause a packet to miss
285 * kernel flow cache, and be sent to the user space.
288 for (i
= 0; i
< ma
->count
; i
++)
289 if (mask
== ovsl_dereference(ma
->masks
[i
])) {
290 struct sw_flow_mask
*last
;
292 last
= ovsl_dereference(ma
->masks
[ma
->count
- 1]);
293 rcu_assign_pointer(ma
->masks
[i
], last
);
298 /* Remove the deleted mask pointers from the invalid section. */
299 for (i
= ma
->count
; i
< ma
->max
; i
++)
300 if (mask
== ovsl_dereference(ma
->masks
[i
]))
301 RCU_INIT_POINTER(ma
->masks
[i
], NULL
);
304 int ovs_flow_tbl_init(struct flow_table
*table
)
306 struct table_instance
*ti
;
307 struct mask_array
*ma
;
309 table
->mask_cache
= __alloc_percpu(sizeof(struct mask_cache_entry
) *
310 MC_HASH_ENTRIES
, __alignof__(struct mask_cache_entry
));
311 if (!table
->mask_cache
)
314 ma
= tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN
);
316 goto free_mask_cache
;
318 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
320 goto free_mask_array
;
322 rcu_assign_pointer(table
->ti
, ti
);
323 rcu_assign_pointer(table
->mask_array
, ma
);
324 table
->last_rehash
= jiffies
;
329 kfree((struct mask_array __force
*)table
->mask_array
);
331 free_percpu(table
->mask_cache
);
335 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
337 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
339 __table_instance_destroy(ti
);
342 static void table_instance_destroy(struct table_instance
*ti
, bool deferred
)
352 for (i
= 0; i
< ti
->n_buckets
; i
++) {
353 struct sw_flow
*flow
;
354 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
355 struct hlist_node
*n
;
356 int ver
= ti
->node_ver
;
358 hlist_for_each_entry_safe(flow
, n
, head
, hash_node
[ver
]) {
359 hlist_del_rcu(&flow
->hash_node
[ver
]);
360 ovs_flow_free(flow
, deferred
);
366 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
368 __table_instance_destroy(ti
);
371 /* No need for locking this function is called from RCU callback or
373 void ovs_flow_tbl_destroy(struct flow_table
*table
)
375 struct table_instance
*ti
= (struct table_instance __force
*)table
->ti
;
377 free_percpu(table
->mask_cache
);
378 kfree((struct mask_array __force
*)table
->mask_array
);
379 table_instance_destroy(ti
, false);
382 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
383 u32
*bucket
, u32
*last
)
385 struct sw_flow
*flow
;
386 struct hlist_head
*head
;
391 while (*bucket
< ti
->n_buckets
) {
393 head
= flex_array_get(ti
->buckets
, *bucket
);
394 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ver
]) {
409 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
411 hash
= jhash_1word(hash
, ti
->hash_seed
);
412 return flex_array_get(ti
->buckets
,
413 (hash
& (ti
->n_buckets
- 1)));
416 static void table_instance_insert(struct table_instance
*ti
, struct sw_flow
*flow
)
418 struct hlist_head
*head
;
420 head
= find_bucket(ti
, flow
->hash
);
421 hlist_add_head_rcu(&flow
->hash_node
[ti
->node_ver
], head
);
424 static void flow_table_copy_flows(struct table_instance
*old
,
425 struct table_instance
*new)
430 old_ver
= old
->node_ver
;
431 new->node_ver
= !old_ver
;
433 /* Insert in new table. */
434 for (i
= 0; i
< old
->n_buckets
; i
++) {
435 struct sw_flow
*flow
;
436 struct hlist_head
*head
;
438 head
= flex_array_get(old
->buckets
, i
);
440 hlist_for_each_entry(flow
, head
, hash_node
[old_ver
])
441 table_instance_insert(new, flow
);
444 old
->keep_flows
= true;
447 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
450 struct table_instance
*new_ti
;
452 new_ti
= table_instance_alloc(n_buckets
);
456 flow_table_copy_flows(ti
, new_ti
);
461 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
463 struct table_instance
*old_ti
;
464 struct table_instance
*new_ti
;
466 old_ti
= ovsl_dereference(flow_table
->ti
);
467 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
471 rcu_assign_pointer(flow_table
->ti
, new_ti
);
472 flow_table
->last_rehash
= jiffies
;
473 flow_table
->count
= 0;
475 table_instance_destroy(old_ti
, true);
479 static u32
flow_hash(const struct sw_flow_key
*key
, int key_start
,
482 const u32
*hash_key
= (const u32
*)((const u8
*)key
+ key_start
);
483 int hash_u32s
= (key_end
- key_start
) >> 2;
485 /* Make sure number of hash bytes are multiple of u32. */
486 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
488 return arch_fast_hash2(hash_key
, hash_u32s
, 0);
491 static int flow_key_start(const struct sw_flow_key
*key
)
493 if (key
->tun_key
.ipv4_dst
)
496 return rounddown(offsetof(struct sw_flow_key
, phy
),
500 static bool cmp_key(const struct sw_flow_key
*key1
,
501 const struct sw_flow_key
*key2
,
502 int key_start
, int key_end
)
504 const long *cp1
= (const long *)((const u8
*)key1
+ key_start
);
505 const long *cp2
= (const long *)((const u8
*)key2
+ key_start
);
509 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
510 diffs
|= *cp1
++ ^ *cp2
++;
515 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
516 const struct sw_flow_key
*key
,
517 int key_start
, int key_end
)
519 return cmp_key(&flow
->key
, key
, key_start
, key_end
);
522 bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
523 struct sw_flow_match
*match
)
525 struct sw_flow_key
*key
= match
->key
;
526 int key_start
= flow_key_start(key
);
527 int key_end
= match
->range
.end
;
529 return cmp_key(&flow
->unmasked_key
, key
, key_start
, key_end
);
532 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
533 const struct sw_flow_key
*unmasked
,
534 struct sw_flow_mask
*mask
,
537 struct sw_flow
*flow
;
538 struct hlist_head
*head
;
539 int key_start
= mask
->range
.start
;
540 int key_end
= mask
->range
.end
;
542 struct sw_flow_key masked_key
;
544 ovs_flow_mask_key(&masked_key
, unmasked
, mask
);
545 hash
= flow_hash(&masked_key
, key_start
, key_end
);
546 head
= find_bucket(ti
, hash
);
548 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ti
->node_ver
]) {
549 if (flow
->mask
== mask
&& flow
->hash
== hash
&&
550 flow_cmp_masked_key(flow
, &masked_key
,
557 static struct sw_flow
*flow_lookup(struct flow_table
*tbl
,
558 struct table_instance
*ti
,
559 struct mask_array
*ma
,
560 const struct sw_flow_key
*key
,
564 struct sw_flow
*flow
;
567 for (i
= 0; i
< ma
->count
; i
++) {
568 struct sw_flow_mask
*mask
;
570 mask
= rcu_dereference_ovsl(ma
->masks
[i
]);
572 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
573 if (flow
) { /* Found */
583 /* If the the cache index is outside of the valid region, update the index
584 * in case cache entry was moved up. */
585 static void fixup_cache_entry_index(struct mask_cache_entry
*e
,
586 const struct mask_array
*ma
,
587 const struct sw_flow_mask
*cache
)
591 for (i
= 0; i
< ma
->count
; i
++)
592 if (cache
== ovsl_dereference(ma
->masks
[i
])) {
599 * mask_cache maps flow to probable mask. This cache is not tightly
600 * coupled cache, It means updates to mask list can result in inconsistent
601 * cache entry in mask cache.
602 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
603 * In case of a hash collision the entry is hashed in next segment.
605 struct sw_flow
*ovs_flow_tbl_lookup_stats(struct flow_table
*tbl
,
606 const struct sw_flow_key
*key
,
610 struct mask_array
*ma
= rcu_dereference_ovsl(tbl
->mask_array
);
611 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
612 struct mask_cache_entry
*entries
, *ce
;
613 struct sw_flow
*flow
;
618 if (unlikely(!skb_hash
)) {
619 u32 __always_unused mask_index
;
621 return flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &mask_index
);
625 entries
= this_cpu_ptr(tbl
->mask_cache
);
627 /* Find the cache entry 'ce' to operate on. */
628 for (seg
= 0; seg
< MC_HASH_SEGS
; seg
++) {
629 int index
= hash
& (MC_HASH_ENTRIES
- 1);
630 struct mask_cache_entry
*e
;
633 if (e
->skb_hash
== skb_hash
) {
634 struct sw_flow_mask
*cache
;
635 int i
= e
->mask_index
;
637 if (likely(i
< ma
->count
)) {
638 cache
= rcu_dereference_ovsl(ma
->masks
[i
]);
639 flow
= masked_flow_lookup(ti
, key
, cache
,
641 } else if (i
< ma
->max
) {
643 cache
= rcu_dereference_ovsl(ma
->masks
[i
]);
645 fixup_cache_entry_index(e
, ma
, cache
);
646 flow
= masked_flow_lookup(ti
, key
,
653 if (flow
) /* Cache hit. */
656 /* Cache miss. This is the best cache
657 * replacement candidate. */
663 if (!ce
|| e
->skb_hash
< ce
->skb_hash
)
664 ce
= e
; /* A better replacement cache candidate. */
666 hash
>>= MC_HASH_SHIFT
;
669 /* Cache miss, do full lookup. */
670 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &ce
->mask_index
);
672 ce
->skb_hash
= skb_hash
;
677 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
678 const struct sw_flow_key
*key
)
680 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
681 struct mask_array
*ma
= rcu_dereference_ovsl(tbl
->mask_array
);
682 u32 __always_unused n_mask_hit
;
683 u32 __always_unused index
;
686 return flow_lookup(tbl
, ti
, ma
, key
, &n_mask_hit
, &index
);
689 int ovs_flow_tbl_num_masks(const struct flow_table
*table
)
691 struct mask_array
*ma
;
693 ma
= rcu_dereference_ovsl(table
->mask_array
);
697 static struct table_instance
*table_instance_expand(struct table_instance
*ti
)
699 return table_instance_rehash(ti
, ti
->n_buckets
* 2);
702 /* Remove 'mask' from the mask list, if it is not needed any more. */
703 static void flow_mask_remove(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
706 /* ovs-lock is required to protect mask-refcount and
710 BUG_ON(!mask
->ref_count
);
713 if (!mask
->ref_count
) {
714 struct mask_array
*ma
;
716 ma
= ovsl_dereference(tbl
->mask_array
);
717 /* Shrink the mask array if necessary. */
718 if (ma
->max
> MASK_ARRAY_SIZE_MIN
* 2
719 && ma
->count
<= ma
->max
/ 4) {
721 tbl_mask_array_realloc(tbl
, ma
->max
/ 2);
722 ma
= ovsl_dereference(tbl
->mask_array
);
725 tbl_mask_array_delete_mask(ma
, mask
);
726 call_rcu(&mask
->rcu
, rcu_free_sw_flow_mask_cb
);
731 /* Must be called with OVS mutex held. */
732 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
734 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
736 BUG_ON(table
->count
== 0);
737 hlist_del_rcu(&flow
->hash_node
[ti
->node_ver
]);
740 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
741 * accessible as long as the RCU read lock is held. */
742 flow_mask_remove(table
, flow
->mask
);
745 static struct sw_flow_mask
*mask_alloc(void)
747 struct sw_flow_mask
*mask
;
749 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
756 static bool mask_equal(const struct sw_flow_mask
*a
,
757 const struct sw_flow_mask
*b
)
759 const u8
*a_
= (const u8
*)&a
->key
+ a
->range
.start
;
760 const u8
*b_
= (const u8
*)&b
->key
+ b
->range
.start
;
762 return (a
->range
.end
== b
->range
.end
)
763 && (a
->range
.start
== b
->range
.start
)
764 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
767 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
768 const struct sw_flow_mask
*mask
)
770 struct mask_array
*ma
;
773 ma
= ovsl_dereference(tbl
->mask_array
);
774 for (i
= 0; i
< ma
->count
; i
++) {
775 struct sw_flow_mask
*t
;
777 t
= ovsl_dereference(ma
->masks
[i
]);
778 if (t
&& mask_equal(mask
, t
))
785 /* Add 'mask' into the mask list, if it is not already there. */
786 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
787 struct sw_flow_mask
*new)
789 struct sw_flow_mask
*mask
;
791 mask
= flow_mask_find(tbl
, new);
793 struct mask_array
*ma
;
795 /* Allocate a new mask if none exsits. */
800 mask
->key
= new->key
;
801 mask
->range
= new->range
;
803 /* Add mask to mask-list. */
804 ma
= ovsl_dereference(tbl
->mask_array
);
805 if (ma
->count
>= ma
->max
) {
808 err
= tbl_mask_array_realloc(tbl
, ma
->max
+
809 MASK_ARRAY_SIZE_MIN
);
814 ma
= ovsl_dereference(tbl
->mask_array
);
817 rcu_assign_pointer(ma
->masks
[ma
->count
], mask
);
820 BUG_ON(!mask
->ref_count
);
828 /* Must be called with OVS mutex held. */
829 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
830 struct sw_flow_mask
*mask
)
832 struct table_instance
*new_ti
= NULL
;
833 struct table_instance
*ti
;
836 err
= flow_mask_insert(table
, flow
, mask
);
840 flow
->hash
= flow_hash(&flow
->key
, flow
->mask
->range
.start
,
841 flow
->mask
->range
.end
);
842 ti
= ovsl_dereference(table
->ti
);
843 table_instance_insert(ti
, flow
);
846 /* Expand table, if necessary, to make room. */
847 if (table
->count
> ti
->n_buckets
)
848 new_ti
= table_instance_expand(ti
);
849 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
850 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
);
853 rcu_assign_pointer(table
->ti
, new_ti
);
854 table_instance_destroy(ti
, true);
855 table
->last_rehash
= jiffies
;
860 /* Initializes the flow module.
861 * Returns zero if successful or a negative error code. */
862 int ovs_flow_init(void)
864 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
865 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
867 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
)
868 + (num_possible_nodes()
869 * sizeof(struct flow_stats
*)),
871 if (flow_cache
== NULL
)
875 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats
),
876 0, SLAB_HWCACHE_ALIGN
, NULL
);
877 if (flow_stats_cache
== NULL
) {
878 kmem_cache_destroy(flow_cache
);
886 /* Uninitializes the flow module. */
887 void ovs_flow_exit(void)
889 kmem_cache_destroy(flow_stats_cache
);
890 kmem_cache_destroy(flow_cache
);