1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
35 #include <net/ndisc.h>
37 #define TBL_MIN_BUCKETS 1024
38 #define MASK_ARRAY_SIZE_MIN 16
39 #define REHASH_INTERVAL (10 * 60 * HZ)
41 #define MC_DEFAULT_HASH_ENTRIES 256
42 #define MC_HASH_SHIFT 8
43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
45 static struct kmem_cache
*flow_cache
;
46 struct kmem_cache
*flow_stats_cache __read_mostly
;
48 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
50 return range
->end
- range
->start
;
53 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
54 bool full
, const struct sw_flow_mask
*mask
)
56 int start
= full
? 0 : mask
->range
.start
;
57 int len
= full
? sizeof *dst
: range_n_bytes(&mask
->range
);
58 const long *m
= (const long *)((const u8
*)&mask
->key
+ start
);
59 const long *s
= (const long *)((const u8
*)src
+ start
);
60 long *d
= (long *)((u8
*)dst
+ start
);
63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 * if 'full' is false the memory outside of the 'mask->range' is left
65 * uninitialized. This can be used as an optimization when further
66 * operations on 'dst' only use contents within 'mask->range'.
68 for (i
= 0; i
< len
; i
+= sizeof(long))
72 struct sw_flow
*ovs_flow_alloc(void)
75 struct sw_flow_stats
*stats
;
77 flow
= kmem_cache_zalloc(flow_cache
, GFP_KERNEL
);
79 return ERR_PTR(-ENOMEM
);
81 flow
->stats_last_writer
= -1;
83 /* Initialize the default stat node. */
84 stats
= kmem_cache_alloc_node(flow_stats_cache
,
85 GFP_KERNEL
| __GFP_ZERO
,
86 node_online(0) ? 0 : NUMA_NO_NODE
);
90 spin_lock_init(&stats
->lock
);
92 RCU_INIT_POINTER(flow
->stats
[0], stats
);
94 cpumask_set_cpu(0, &flow
->cpu_used_mask
);
98 kmem_cache_free(flow_cache
, flow
);
99 return ERR_PTR(-ENOMEM
);
102 int ovs_flow_tbl_count(const struct flow_table
*table
)
107 static void flow_free(struct sw_flow
*flow
)
111 if (ovs_identifier_is_key(&flow
->id
))
112 kfree(flow
->id
.unmasked_key
);
114 ovs_nla_free_flow_actions((struct sw_flow_actions __force
*)
116 /* We open code this to make sure cpu 0 is always considered */
117 for (cpu
= 0; cpu
< nr_cpu_ids
;
118 cpu
= cpumask_next(cpu
, &flow
->cpu_used_mask
)) {
119 if (flow
->stats
[cpu
])
120 kmem_cache_free(flow_stats_cache
,
121 (struct sw_flow_stats __force
*)flow
->stats
[cpu
]);
124 kmem_cache_free(flow_cache
, flow
);
127 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
129 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
134 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
140 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
145 static void __table_instance_destroy(struct table_instance
*ti
)
151 static struct table_instance
*table_instance_alloc(int new_size
)
153 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
159 ti
->buckets
= kvmalloc_array(new_size
, sizeof(struct hlist_head
),
166 for (i
= 0; i
< new_size
; i
++)
167 INIT_HLIST_HEAD(&ti
->buckets
[i
]);
169 ti
->n_buckets
= new_size
;
171 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
176 static void __mask_array_destroy(struct mask_array
*ma
)
178 free_percpu(ma
->masks_usage_stats
);
182 static void mask_array_rcu_cb(struct rcu_head
*rcu
)
184 struct mask_array
*ma
= container_of(rcu
, struct mask_array
, rcu
);
186 __mask_array_destroy(ma
);
189 static void tbl_mask_array_reset_counters(struct mask_array
*ma
)
193 /* As the per CPU counters are not atomic we can not go ahead and
194 * reset them from another CPU. To be able to still have an approximate
195 * zero based counter we store the value at reset, and subtract it
196 * later when processing.
198 for (i
= 0; i
< ma
->max
; i
++) {
199 ma
->masks_usage_zero_cntr
[i
] = 0;
201 for_each_possible_cpu(cpu
) {
202 struct mask_array_stats
*stats
;
206 stats
= per_cpu_ptr(ma
->masks_usage_stats
, cpu
);
208 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
209 counter
= stats
->usage_cntrs
[i
];
210 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
212 ma
->masks_usage_zero_cntr
[i
] += counter
;
217 static struct mask_array
*tbl_mask_array_alloc(int size
)
219 struct mask_array
*new;
221 size
= max(MASK_ARRAY_SIZE_MIN
, size
);
222 new = kzalloc(sizeof(struct mask_array
) +
223 sizeof(struct sw_flow_mask
*) * size
+
224 sizeof(u64
) * size
, GFP_KERNEL
);
228 new->masks_usage_zero_cntr
= (u64
*)((u8
*)new +
229 sizeof(struct mask_array
) +
230 sizeof(struct sw_flow_mask
*) *
233 new->masks_usage_stats
= __alloc_percpu(sizeof(struct mask_array_stats
) +
236 if (!new->masks_usage_stats
) {
247 static int tbl_mask_array_realloc(struct flow_table
*tbl
, int size
)
249 struct mask_array
*old
;
250 struct mask_array
*new;
252 new = tbl_mask_array_alloc(size
);
256 old
= ovsl_dereference(tbl
->mask_array
);
260 for (i
= 0; i
< old
->max
; i
++) {
261 if (ovsl_dereference(old
->masks
[i
]))
262 new->masks
[new->count
++] = old
->masks
[i
];
264 call_rcu(&old
->rcu
, mask_array_rcu_cb
);
267 rcu_assign_pointer(tbl
->mask_array
, new);
272 static int tbl_mask_array_add_mask(struct flow_table
*tbl
,
273 struct sw_flow_mask
*new)
275 struct mask_array
*ma
= ovsl_dereference(tbl
->mask_array
);
276 int err
, ma_count
= READ_ONCE(ma
->count
);
278 if (ma_count
>= ma
->max
) {
279 err
= tbl_mask_array_realloc(tbl
, ma
->max
+
280 MASK_ARRAY_SIZE_MIN
);
284 ma
= ovsl_dereference(tbl
->mask_array
);
286 /* On every add or delete we need to reset the counters so
287 * every new mask gets a fair chance of being prioritized.
289 tbl_mask_array_reset_counters(ma
);
292 BUG_ON(ovsl_dereference(ma
->masks
[ma_count
]));
294 rcu_assign_pointer(ma
->masks
[ma_count
], new);
295 WRITE_ONCE(ma
->count
, ma_count
+ 1);
300 static void tbl_mask_array_del_mask(struct flow_table
*tbl
,
301 struct sw_flow_mask
*mask
)
303 struct mask_array
*ma
= ovsl_dereference(tbl
->mask_array
);
304 int i
, ma_count
= READ_ONCE(ma
->count
);
306 /* Remove the deleted mask pointers from the array */
307 for (i
= 0; i
< ma_count
; i
++) {
308 if (mask
== ovsl_dereference(ma
->masks
[i
]))
316 WRITE_ONCE(ma
->count
, ma_count
- 1);
318 rcu_assign_pointer(ma
->masks
[i
], ma
->masks
[ma_count
- 1]);
319 RCU_INIT_POINTER(ma
->masks
[ma_count
- 1], NULL
);
321 kfree_rcu(mask
, rcu
);
323 /* Shrink the mask array if necessary. */
324 if (ma
->max
>= (MASK_ARRAY_SIZE_MIN
* 2) &&
325 ma_count
<= (ma
->max
/ 3))
326 tbl_mask_array_realloc(tbl
, ma
->max
/ 2);
328 tbl_mask_array_reset_counters(ma
);
332 /* Remove 'mask' from the mask list, if it is not needed any more. */
333 static void flow_mask_remove(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
336 /* ovs-lock is required to protect mask-refcount and
340 BUG_ON(!mask
->ref_count
);
343 if (!mask
->ref_count
)
344 tbl_mask_array_del_mask(tbl
, mask
);
348 static void __mask_cache_destroy(struct mask_cache
*mc
)
350 free_percpu(mc
->mask_cache
);
354 static void mask_cache_rcu_cb(struct rcu_head
*rcu
)
356 struct mask_cache
*mc
= container_of(rcu
, struct mask_cache
, rcu
);
358 __mask_cache_destroy(mc
);
361 static struct mask_cache
*tbl_mask_cache_alloc(u32 size
)
363 struct mask_cache_entry __percpu
*cache
= NULL
;
364 struct mask_cache
*new;
366 /* Only allow size to be 0, or a power of 2, and does not exceed
367 * percpu allocation size.
369 if ((!is_power_of_2(size
) && size
!= 0) ||
370 (size
* sizeof(struct mask_cache_entry
)) > PCPU_MIN_UNIT_SIZE
)
373 new = kzalloc(sizeof(*new), GFP_KERNEL
);
377 new->cache_size
= size
;
378 if (new->cache_size
> 0) {
379 cache
= __alloc_percpu(array_size(sizeof(struct mask_cache_entry
),
381 __alignof__(struct mask_cache_entry
));
388 new->mask_cache
= cache
;
391 int ovs_flow_tbl_masks_cache_resize(struct flow_table
*table
, u32 size
)
393 struct mask_cache
*mc
= rcu_dereference_ovsl(table
->mask_cache
);
394 struct mask_cache
*new;
396 if (size
== mc
->cache_size
)
399 if ((!is_power_of_2(size
) && size
!= 0) ||
400 (size
* sizeof(struct mask_cache_entry
)) > PCPU_MIN_UNIT_SIZE
)
403 new = tbl_mask_cache_alloc(size
);
407 rcu_assign_pointer(table
->mask_cache
, new);
408 call_rcu(&mc
->rcu
, mask_cache_rcu_cb
);
413 int ovs_flow_tbl_init(struct flow_table
*table
)
415 struct table_instance
*ti
, *ufid_ti
;
416 struct mask_cache
*mc
;
417 struct mask_array
*ma
;
419 mc
= tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES
);
423 ma
= tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN
);
425 goto free_mask_cache
;
427 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
429 goto free_mask_array
;
431 ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
435 rcu_assign_pointer(table
->ti
, ti
);
436 rcu_assign_pointer(table
->ufid_ti
, ufid_ti
);
437 rcu_assign_pointer(table
->mask_array
, ma
);
438 rcu_assign_pointer(table
->mask_cache
, mc
);
439 table
->last_rehash
= jiffies
;
441 table
->ufid_count
= 0;
445 __table_instance_destroy(ti
);
447 __mask_array_destroy(ma
);
449 __mask_cache_destroy(mc
);
453 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
455 struct table_instance
*ti
;
457 ti
= container_of(rcu
, struct table_instance
, rcu
);
458 __table_instance_destroy(ti
);
461 static void table_instance_flow_free(struct flow_table
*table
,
462 struct table_instance
*ti
,
463 struct table_instance
*ufid_ti
,
464 struct sw_flow
*flow
)
466 hlist_del_rcu(&flow
->flow_table
.node
[ti
->node_ver
]);
469 if (ovs_identifier_is_ufid(&flow
->id
)) {
470 hlist_del_rcu(&flow
->ufid_table
.node
[ufid_ti
->node_ver
]);
474 flow_mask_remove(table
, flow
->mask
);
477 /* Must be called with OVS mutex held. */
478 void table_instance_flow_flush(struct flow_table
*table
,
479 struct table_instance
*ti
,
480 struct table_instance
*ufid_ti
)
484 for (i
= 0; i
< ti
->n_buckets
; i
++) {
485 struct hlist_head
*head
= &ti
->buckets
[i
];
486 struct hlist_node
*n
;
487 struct sw_flow
*flow
;
489 hlist_for_each_entry_safe(flow
, n
, head
,
490 flow_table
.node
[ti
->node_ver
]) {
492 table_instance_flow_free(table
, ti
, ufid_ti
,
494 ovs_flow_free(flow
, true);
498 if (WARN_ON(table
->count
!= 0 ||
499 table
->ufid_count
!= 0)) {
501 table
->ufid_count
= 0;
505 static void table_instance_destroy(struct table_instance
*ti
,
506 struct table_instance
*ufid_ti
)
508 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
509 call_rcu(&ufid_ti
->rcu
, flow_tbl_destroy_rcu_cb
);
512 /* No need for locking this function is called from RCU callback or
515 void ovs_flow_tbl_destroy(struct flow_table
*table
)
517 struct table_instance
*ti
= rcu_dereference_raw(table
->ti
);
518 struct table_instance
*ufid_ti
= rcu_dereference_raw(table
->ufid_ti
);
519 struct mask_cache
*mc
= rcu_dereference_raw(table
->mask_cache
);
520 struct mask_array
*ma
= rcu_dereference_raw(table
->mask_array
);
522 call_rcu(&mc
->rcu
, mask_cache_rcu_cb
);
523 call_rcu(&ma
->rcu
, mask_array_rcu_cb
);
524 table_instance_destroy(ti
, ufid_ti
);
527 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
528 u32
*bucket
, u32
*last
)
530 struct sw_flow
*flow
;
531 struct hlist_head
*head
;
536 while (*bucket
< ti
->n_buckets
) {
538 head
= &ti
->buckets
[*bucket
];
539 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ver
]) {
554 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
556 hash
= jhash_1word(hash
, ti
->hash_seed
);
557 return &ti
->buckets
[hash
& (ti
->n_buckets
- 1)];
560 static void table_instance_insert(struct table_instance
*ti
,
561 struct sw_flow
*flow
)
563 struct hlist_head
*head
;
565 head
= find_bucket(ti
, flow
->flow_table
.hash
);
566 hlist_add_head_rcu(&flow
->flow_table
.node
[ti
->node_ver
], head
);
569 static void ufid_table_instance_insert(struct table_instance
*ti
,
570 struct sw_flow
*flow
)
572 struct hlist_head
*head
;
574 head
= find_bucket(ti
, flow
->ufid_table
.hash
);
575 hlist_add_head_rcu(&flow
->ufid_table
.node
[ti
->node_ver
], head
);
578 static void flow_table_copy_flows(struct table_instance
*old
,
579 struct table_instance
*new, bool ufid
)
584 old_ver
= old
->node_ver
;
585 new->node_ver
= !old_ver
;
587 /* Insert in new table. */
588 for (i
= 0; i
< old
->n_buckets
; i
++) {
589 struct sw_flow
*flow
;
590 struct hlist_head
*head
= &old
->buckets
[i
];
593 hlist_for_each_entry_rcu(flow
, head
,
594 ufid_table
.node
[old_ver
],
595 lockdep_ovsl_is_held())
596 ufid_table_instance_insert(new, flow
);
598 hlist_for_each_entry_rcu(flow
, head
,
599 flow_table
.node
[old_ver
],
600 lockdep_ovsl_is_held())
601 table_instance_insert(new, flow
);
605 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
606 int n_buckets
, bool ufid
)
608 struct table_instance
*new_ti
;
610 new_ti
= table_instance_alloc(n_buckets
);
614 flow_table_copy_flows(ti
, new_ti
, ufid
);
619 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
621 struct table_instance
*old_ti
, *new_ti
;
622 struct table_instance
*old_ufid_ti
, *new_ufid_ti
;
624 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
627 new_ufid_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
631 old_ti
= ovsl_dereference(flow_table
->ti
);
632 old_ufid_ti
= ovsl_dereference(flow_table
->ufid_ti
);
634 rcu_assign_pointer(flow_table
->ti
, new_ti
);
635 rcu_assign_pointer(flow_table
->ufid_ti
, new_ufid_ti
);
636 flow_table
->last_rehash
= jiffies
;
638 table_instance_flow_flush(flow_table
, old_ti
, old_ufid_ti
);
639 table_instance_destroy(old_ti
, old_ufid_ti
);
643 __table_instance_destroy(new_ti
);
647 static u32
flow_hash(const struct sw_flow_key
*key
,
648 const struct sw_flow_key_range
*range
)
650 const u32
*hash_key
= (const u32
*)((const u8
*)key
+ range
->start
);
652 /* Make sure number of hash bytes are multiple of u32. */
653 int hash_u32s
= range_n_bytes(range
) >> 2;
655 return jhash2(hash_key
, hash_u32s
, 0);
658 static int flow_key_start(const struct sw_flow_key
*key
)
663 return rounddown(offsetof(struct sw_flow_key
, phy
),
667 static bool cmp_key(const struct sw_flow_key
*key1
,
668 const struct sw_flow_key
*key2
,
669 int key_start
, int key_end
)
671 const long *cp1
= (const long *)((const u8
*)key1
+ key_start
);
672 const long *cp2
= (const long *)((const u8
*)key2
+ key_start
);
675 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
682 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
683 const struct sw_flow_key
*key
,
684 const struct sw_flow_key_range
*range
)
686 return cmp_key(&flow
->key
, key
, range
->start
, range
->end
);
689 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
690 const struct sw_flow_match
*match
)
692 struct sw_flow_key
*key
= match
->key
;
693 int key_start
= flow_key_start(key
);
694 int key_end
= match
->range
.end
;
696 BUG_ON(ovs_identifier_is_ufid(&flow
->id
));
697 return cmp_key(flow
->id
.unmasked_key
, key
, key_start
, key_end
);
700 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
701 const struct sw_flow_key
*unmasked
,
702 const struct sw_flow_mask
*mask
,
705 struct sw_flow
*flow
;
706 struct hlist_head
*head
;
708 struct sw_flow_key masked_key
;
710 ovs_flow_mask_key(&masked_key
, unmasked
, false, mask
);
711 hash
= flow_hash(&masked_key
, &mask
->range
);
712 head
= find_bucket(ti
, hash
);
715 hlist_for_each_entry_rcu(flow
, head
, flow_table
.node
[ti
->node_ver
],
716 lockdep_ovsl_is_held()) {
717 if (flow
->mask
== mask
&& flow
->flow_table
.hash
== hash
&&
718 flow_cmp_masked_key(flow
, &masked_key
, &mask
->range
))
724 /* Flow lookup does full lookup on flow table. It starts with
725 * mask from index passed in *index.
726 * This function MUST be called with BH disabled due to the use
727 * of CPU specific variables.
729 static struct sw_flow
*flow_lookup(struct flow_table
*tbl
,
730 struct table_instance
*ti
,
731 struct mask_array
*ma
,
732 const struct sw_flow_key
*key
,
737 struct mask_array_stats
*stats
= this_cpu_ptr(ma
->masks_usage_stats
);
738 struct sw_flow
*flow
;
739 struct sw_flow_mask
*mask
;
742 if (likely(*index
< ma
->max
)) {
743 mask
= rcu_dereference_ovsl(ma
->masks
[*index
]);
745 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
747 u64_stats_update_begin(&stats
->syncp
);
748 stats
->usage_cntrs
[*index
]++;
749 u64_stats_update_end(&stats
->syncp
);
756 for (i
= 0; i
< ma
->max
; i
++) {
761 mask
= rcu_dereference_ovsl(ma
->masks
[i
]);
765 flow
= masked_flow_lookup(ti
, key
, mask
, n_mask_hit
);
766 if (flow
) { /* Found */
768 u64_stats_update_begin(&stats
->syncp
);
769 stats
->usage_cntrs
[*index
]++;
770 u64_stats_update_end(&stats
->syncp
);
779 * mask_cache maps flow to probable mask. This cache is not tightly
780 * coupled cache, It means updates to mask list can result in inconsistent
781 * cache entry in mask cache.
782 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
783 * In case of a hash collision the entry is hashed in next segment.
785 struct sw_flow
*ovs_flow_tbl_lookup_stats(struct flow_table
*tbl
,
786 const struct sw_flow_key
*key
,
791 struct mask_cache
*mc
= rcu_dereference(tbl
->mask_cache
);
792 struct mask_array
*ma
= rcu_dereference(tbl
->mask_array
);
793 struct table_instance
*ti
= rcu_dereference(tbl
->ti
);
794 struct mask_cache_entry
*entries
, *ce
;
795 struct sw_flow
*flow
;
801 if (unlikely(!skb_hash
|| mc
->cache_size
== 0)) {
805 return flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, &cache
,
809 /* Pre and post recirulation flows usually have the same skb_hash
810 * value. To avoid hash collisions, rehash the 'skb_hash' with
813 skb_hash
= jhash_1word(skb_hash
, key
->recirc_id
);
817 entries
= this_cpu_ptr(mc
->mask_cache
);
819 /* Find the cache entry 'ce' to operate on. */
820 for (seg
= 0; seg
< MC_HASH_SEGS
; seg
++) {
821 int index
= hash
& (mc
->cache_size
- 1);
822 struct mask_cache_entry
*e
;
825 if (e
->skb_hash
== skb_hash
) {
826 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
,
827 n_cache_hit
, &e
->mask_index
);
833 if (!ce
|| e
->skb_hash
< ce
->skb_hash
)
834 ce
= e
; /* A better replacement cache candidate. */
836 hash
>>= MC_HASH_SHIFT
;
839 /* Cache miss, do full lookup. */
840 flow
= flow_lookup(tbl
, ti
, ma
, key
, n_mask_hit
, n_cache_hit
,
843 ce
->skb_hash
= skb_hash
;
849 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
850 const struct sw_flow_key
*key
)
852 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
853 struct mask_array
*ma
= rcu_dereference_ovsl(tbl
->mask_array
);
854 u32 __always_unused n_mask_hit
;
855 u32 __always_unused n_cache_hit
;
856 struct sw_flow
*flow
;
859 /* This function gets called trough the netlink interface and therefore
860 * is preemptible. However, flow_lookup() function needs to be called
861 * with BH disabled due to CPU specific variables.
864 flow
= flow_lookup(tbl
, ti
, ma
, key
, &n_mask_hit
, &n_cache_hit
, &index
);
869 struct sw_flow
*ovs_flow_tbl_lookup_exact(struct flow_table
*tbl
,
870 const struct sw_flow_match
*match
)
872 struct mask_array
*ma
= ovsl_dereference(tbl
->mask_array
);
875 /* Always called under ovs-mutex. */
876 for (i
= 0; i
< ma
->max
; i
++) {
877 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ti
);
878 u32 __always_unused n_mask_hit
;
879 struct sw_flow_mask
*mask
;
880 struct sw_flow
*flow
;
882 mask
= ovsl_dereference(ma
->masks
[i
]);
886 flow
= masked_flow_lookup(ti
, match
->key
, mask
, &n_mask_hit
);
887 if (flow
&& ovs_identifier_is_key(&flow
->id
) &&
888 ovs_flow_cmp_unmasked_key(flow
, match
)) {
896 static u32
ufid_hash(const struct sw_flow_id
*sfid
)
898 return jhash(sfid
->ufid
, sfid
->ufid_len
, 0);
901 static bool ovs_flow_cmp_ufid(const struct sw_flow
*flow
,
902 const struct sw_flow_id
*sfid
)
904 if (flow
->id
.ufid_len
!= sfid
->ufid_len
)
907 return !memcmp(flow
->id
.ufid
, sfid
->ufid
, sfid
->ufid_len
);
910 bool ovs_flow_cmp(const struct sw_flow
*flow
,
911 const struct sw_flow_match
*match
)
913 if (ovs_identifier_is_ufid(&flow
->id
))
914 return flow_cmp_masked_key(flow
, match
->key
, &match
->range
);
916 return ovs_flow_cmp_unmasked_key(flow
, match
);
919 struct sw_flow
*ovs_flow_tbl_lookup_ufid(struct flow_table
*tbl
,
920 const struct sw_flow_id
*ufid
)
922 struct table_instance
*ti
= rcu_dereference_ovsl(tbl
->ufid_ti
);
923 struct sw_flow
*flow
;
924 struct hlist_head
*head
;
927 hash
= ufid_hash(ufid
);
928 head
= find_bucket(ti
, hash
);
929 hlist_for_each_entry_rcu(flow
, head
, ufid_table
.node
[ti
->node_ver
],
930 lockdep_ovsl_is_held()) {
931 if (flow
->ufid_table
.hash
== hash
&&
932 ovs_flow_cmp_ufid(flow
, ufid
))
938 int ovs_flow_tbl_num_masks(const struct flow_table
*table
)
940 struct mask_array
*ma
= rcu_dereference_ovsl(table
->mask_array
);
941 return READ_ONCE(ma
->count
);
944 u32
ovs_flow_tbl_masks_cache_size(const struct flow_table
*table
)
946 struct mask_cache
*mc
= rcu_dereference_ovsl(table
->mask_cache
);
948 return READ_ONCE(mc
->cache_size
);
951 static struct table_instance
*table_instance_expand(struct table_instance
*ti
,
954 return table_instance_rehash(ti
, ti
->n_buckets
* 2, ufid
);
957 /* Must be called with OVS mutex held. */
958 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
960 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
961 struct table_instance
*ufid_ti
= ovsl_dereference(table
->ufid_ti
);
963 BUG_ON(table
->count
== 0);
964 table_instance_flow_free(table
, ti
, ufid_ti
, flow
);
967 static struct sw_flow_mask
*mask_alloc(void)
969 struct sw_flow_mask
*mask
;
971 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
978 static bool mask_equal(const struct sw_flow_mask
*a
,
979 const struct sw_flow_mask
*b
)
981 const u8
*a_
= (const u8
*)&a
->key
+ a
->range
.start
;
982 const u8
*b_
= (const u8
*)&b
->key
+ b
->range
.start
;
984 return (a
->range
.end
== b
->range
.end
)
985 && (a
->range
.start
== b
->range
.start
)
986 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
989 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
990 const struct sw_flow_mask
*mask
)
992 struct mask_array
*ma
;
995 ma
= ovsl_dereference(tbl
->mask_array
);
996 for (i
= 0; i
< ma
->max
; i
++) {
997 struct sw_flow_mask
*t
;
998 t
= ovsl_dereference(ma
->masks
[i
]);
1000 if (t
&& mask_equal(mask
, t
))
1007 /* Add 'mask' into the mask list, if it is not already there. */
1008 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
1009 const struct sw_flow_mask
*new)
1011 struct sw_flow_mask
*mask
;
1013 mask
= flow_mask_find(tbl
, new);
1015 /* Allocate a new mask if none exsits. */
1016 mask
= mask_alloc();
1019 mask
->key
= new->key
;
1020 mask
->range
= new->range
;
1022 /* Add mask to mask-list. */
1023 if (tbl_mask_array_add_mask(tbl
, mask
)) {
1028 BUG_ON(!mask
->ref_count
);
1036 /* Must be called with OVS mutex held. */
1037 static void flow_key_insert(struct flow_table
*table
, struct sw_flow
*flow
)
1039 struct table_instance
*new_ti
= NULL
;
1040 struct table_instance
*ti
;
1042 flow
->flow_table
.hash
= flow_hash(&flow
->key
, &flow
->mask
->range
);
1043 ti
= ovsl_dereference(table
->ti
);
1044 table_instance_insert(ti
, flow
);
1047 /* Expand table, if necessary, to make room. */
1048 if (table
->count
> ti
->n_buckets
)
1049 new_ti
= table_instance_expand(ti
, false);
1050 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
1051 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
, false);
1054 rcu_assign_pointer(table
->ti
, new_ti
);
1055 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
1056 table
->last_rehash
= jiffies
;
1060 /* Must be called with OVS mutex held. */
1061 static void flow_ufid_insert(struct flow_table
*table
, struct sw_flow
*flow
)
1063 struct table_instance
*ti
;
1065 flow
->ufid_table
.hash
= ufid_hash(&flow
->id
);
1066 ti
= ovsl_dereference(table
->ufid_ti
);
1067 ufid_table_instance_insert(ti
, flow
);
1068 table
->ufid_count
++;
1070 /* Expand table, if necessary, to make room. */
1071 if (table
->ufid_count
> ti
->n_buckets
) {
1072 struct table_instance
*new_ti
;
1074 new_ti
= table_instance_expand(ti
, true);
1076 rcu_assign_pointer(table
->ufid_ti
, new_ti
);
1077 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
1082 /* Must be called with OVS mutex held. */
1083 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
1084 const struct sw_flow_mask
*mask
)
1088 err
= flow_mask_insert(table
, flow
, mask
);
1091 flow_key_insert(table
, flow
);
1092 if (ovs_identifier_is_ufid(&flow
->id
))
1093 flow_ufid_insert(table
, flow
);
1098 static int compare_mask_and_count(const void *a
, const void *b
)
1100 const struct mask_count
*mc_a
= a
;
1101 const struct mask_count
*mc_b
= b
;
1103 return (s64
)mc_b
->counter
- (s64
)mc_a
->counter
;
1106 /* Must be called with OVS mutex held. */
1107 void ovs_flow_masks_rebalance(struct flow_table
*table
)
1109 struct mask_array
*ma
= rcu_dereference_ovsl(table
->mask_array
);
1110 struct mask_count
*masks_and_count
;
1111 struct mask_array
*new;
1112 int masks_entries
= 0;
1115 /* Build array of all current entries with use counters. */
1116 masks_and_count
= kmalloc_array(ma
->max
, sizeof(*masks_and_count
),
1118 if (!masks_and_count
)
1121 for (i
= 0; i
< ma
->max
; i
++) {
1122 struct sw_flow_mask
*mask
;
1125 mask
= rcu_dereference_ovsl(ma
->masks
[i
]);
1126 if (unlikely(!mask
))
1129 masks_and_count
[i
].index
= i
;
1130 masks_and_count
[i
].counter
= 0;
1132 for_each_possible_cpu(cpu
) {
1133 struct mask_array_stats
*stats
;
1137 stats
= per_cpu_ptr(ma
->masks_usage_stats
, cpu
);
1139 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1140 counter
= stats
->usage_cntrs
[i
];
1141 } while (u64_stats_fetch_retry_irq(&stats
->syncp
,
1144 masks_and_count
[i
].counter
+= counter
;
1147 /* Subtract the zero count value. */
1148 masks_and_count
[i
].counter
-= ma
->masks_usage_zero_cntr
[i
];
1150 /* Rather than calling tbl_mask_array_reset_counters()
1151 * below when no change is needed, do it inline here.
1153 ma
->masks_usage_zero_cntr
[i
] += masks_and_count
[i
].counter
;
1157 goto free_mask_entries
;
1159 /* Sort the entries */
1161 sort(masks_and_count
, masks_entries
, sizeof(*masks_and_count
),
1162 compare_mask_and_count
, NULL
);
1164 /* If the order is the same, nothing to do... */
1165 for (i
= 0; i
< masks_entries
; i
++) {
1166 if (i
!= masks_and_count
[i
].index
)
1169 if (i
== masks_entries
)
1170 goto free_mask_entries
;
1172 /* Rebuilt the new list in order of usage. */
1173 new = tbl_mask_array_alloc(ma
->max
);
1175 goto free_mask_entries
;
1177 for (i
= 0; i
< masks_entries
; i
++) {
1178 int index
= masks_and_count
[i
].index
;
1180 if (ovsl_dereference(ma
->masks
[index
]))
1181 new->masks
[new->count
++] = ma
->masks
[index
];
1184 rcu_assign_pointer(table
->mask_array
, new);
1185 call_rcu(&ma
->rcu
, mask_array_rcu_cb
);
1188 kfree(masks_and_count
);
1191 /* Initializes the flow module.
1192 * Returns zero if successful or a negative error code. */
1193 int ovs_flow_init(void)
1195 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
1196 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
1198 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
)
1200 * sizeof(struct sw_flow_stats
*)),
1202 if (flow_cache
== NULL
)
1206 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats
),
1207 0, SLAB_HWCACHE_ALIGN
, NULL
);
1208 if (flow_stats_cache
== NULL
) {
1209 kmem_cache_destroy(flow_cache
);
1217 /* Uninitializes the flow module. */
1218 void ovs_flow_exit(void)
1220 kmem_cache_destroy(flow_stats_cache
);
1221 kmem_cache_destroy(flow_cache
);