]>
git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/openvswitch/flow_table.c
036e019f8c3ccf9fedc51148cbb8a57423133163
2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define REHASH_INTERVAL (10 * 60 * HZ)
52 static struct kmem_cache
*flow_cache
;
54 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
56 return range
->end
- range
->start
;
59 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
60 const struct sw_flow_mask
*mask
)
62 const long *m
= (long *)((u8
*)&mask
->key
+ mask
->range
.start
);
63 const long *s
= (long *)((u8
*)src
+ mask
->range
.start
);
64 long *d
= (long *)((u8
*)dst
+ mask
->range
.start
);
67 /* The memory outside of the 'mask->range' are not set since
68 * further operations on 'dst' only uses contents within
71 for (i
= 0; i
< range_n_bytes(&mask
->range
); i
+= sizeof(long))
75 struct sw_flow
*ovs_flow_alloc(void)
79 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
81 return ERR_PTR(-ENOMEM
);
83 spin_lock_init(&flow
->lock
);
90 int ovs_flow_tbl_count(struct flow_table
*table
)
95 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
97 struct flex_array
*buckets
;
100 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
101 n_buckets
, GFP_KERNEL
);
105 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
107 flex_array_free(buckets
);
111 for (i
= 0; i
< n_buckets
; i
++)
112 INIT_HLIST_HEAD((struct hlist_head
*)
113 flex_array_get(buckets
, i
));
118 static void flow_free(struct sw_flow
*flow
)
120 kfree((struct sf_flow_acts __force
*)flow
->sf_acts
);
121 kmem_cache_free(flow_cache
, flow
);
124 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
126 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
131 static void rcu_free_sw_flow_mask_cb(struct rcu_head
*rcu
)
133 struct sw_flow_mask
*mask
= container_of(rcu
, struct sw_flow_mask
, rcu
);
138 static void flow_mask_del_ref(struct sw_flow_mask
*mask
, bool deferred
)
143 BUG_ON(!mask
->ref_count
);
146 if (!mask
->ref_count
) {
147 list_del_rcu(&mask
->list
);
149 call_rcu(&mask
->rcu
, rcu_free_sw_flow_mask_cb
);
155 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
160 flow_mask_del_ref(flow
->mask
, deferred
);
163 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
168 static void free_buckets(struct flex_array
*buckets
)
170 flex_array_free(buckets
);
173 static void __table_instance_destroy(struct table_instance
*ti
)
180 for (i
= 0; i
< ti
->n_buckets
; i
++) {
181 struct sw_flow
*flow
;
182 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
183 struct hlist_node
*n
;
184 int ver
= ti
->node_ver
;
186 hlist_for_each_entry_safe(flow
, n
, head
, hash_node
[ver
]) {
187 hlist_del(&flow
->hash_node
[ver
]);
188 ovs_flow_free(flow
, false);
193 free_buckets(ti
->buckets
);
197 static struct table_instance
*table_instance_alloc(int new_size
)
199 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
204 ti
->buckets
= alloc_buckets(new_size
);
210 ti
->n_buckets
= new_size
;
212 ti
->keep_flows
= false;
213 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
218 int ovs_flow_tbl_init(struct flow_table
*table
)
220 struct table_instance
*ti
;
222 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
227 rcu_assign_pointer(table
->ti
, ti
);
228 INIT_LIST_HEAD(&table
->mask_list
);
229 table
->last_rehash
= jiffies
;
234 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
236 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
238 __table_instance_destroy(ti
);
241 static void table_instance_destroy(struct table_instance
*ti
, bool deferred
)
247 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
249 __table_instance_destroy(ti
);
252 void ovs_flow_tbl_destroy(struct flow_table
*table
)
254 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
256 table_instance_destroy(ti
, false);
259 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
260 u32
*bucket
, u32
*last
)
262 struct sw_flow
*flow
;
263 struct hlist_head
*head
;
268 while (*bucket
< ti
->n_buckets
) {
270 head
= flex_array_get(ti
->buckets
, *bucket
);
271 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ver
]) {
286 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
288 hash
= jhash_1word(hash
, ti
->hash_seed
);
289 return flex_array_get(ti
->buckets
,
290 (hash
& (ti
->n_buckets
- 1)));
293 static void table_instance_insert(struct table_instance
*ti
, struct sw_flow
*flow
)
295 struct hlist_head
*head
;
297 head
= find_bucket(ti
, flow
->hash
);
298 hlist_add_head_rcu(&flow
->hash_node
[ti
->node_ver
], head
);
301 static void flow_table_copy_flows(struct table_instance
*old
,
302 struct table_instance
*new)
307 old_ver
= old
->node_ver
;
308 new->node_ver
= !old_ver
;
310 /* Insert in new table. */
311 for (i
= 0; i
< old
->n_buckets
; i
++) {
312 struct sw_flow
*flow
;
313 struct hlist_head
*head
;
315 head
= flex_array_get(old
->buckets
, i
);
317 hlist_for_each_entry(flow
, head
, hash_node
[old_ver
])
318 table_instance_insert(new, flow
);
321 old
->keep_flows
= true;
324 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
327 struct table_instance
*new_ti
;
329 new_ti
= table_instance_alloc(n_buckets
);
333 flow_table_copy_flows(ti
, new_ti
);
338 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
340 struct table_instance
*old_ti
;
341 struct table_instance
*new_ti
;
343 old_ti
= ovsl_dereference(flow_table
->ti
);
344 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
348 rcu_assign_pointer(flow_table
->ti
, new_ti
);
349 flow_table
->last_rehash
= jiffies
;
350 flow_table
->count
= 0;
352 table_instance_destroy(old_ti
, true);
356 static u32
flow_hash(const struct sw_flow_key
*key
, int key_start
,
359 u32
*hash_key
= (u32
*)((u8
*)key
+ key_start
);
360 int hash_u32s
= (key_end
- key_start
) >> 2;
362 /* Make sure number of hash bytes are multiple of u32. */
363 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
365 return jhash2(hash_key
, hash_u32s
, 0);
368 static int flow_key_start(const struct sw_flow_key
*key
)
370 if (key
->tun_key
.ipv4_dst
)
373 return rounddown(offsetof(struct sw_flow_key
, phy
),
377 static bool cmp_key(const struct sw_flow_key
*key1
,
378 const struct sw_flow_key
*key2
,
379 int key_start
, int key_end
)
381 const long *cp1
= (long *)((u8
*)key1
+ key_start
);
382 const long *cp2
= (long *)((u8
*)key2
+ key_start
);
386 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
387 diffs
|= *cp1
++ ^ *cp2
++;
392 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
393 const struct sw_flow_key
*key
,
394 int key_start
, int key_end
)
396 return cmp_key(&flow
->key
, key
, key_start
, key_end
);
399 bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
400 struct sw_flow_match
*match
)
402 struct sw_flow_key
*key
= match
->key
;
403 int key_start
= flow_key_start(key
);
404 int key_end
= match
->range
.end
;
406 return cmp_key(&flow
->unmasked_key
, key
, key_start
, key_end
);
409 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
410 const struct sw_flow_key
*unmasked
,
411 struct sw_flow_mask
*mask
)
413 struct sw_flow
*flow
;
414 struct hlist_head
*head
;
415 int key_start
= mask
->range
.start
;
416 int key_end
= mask
->range
.end
;
418 struct sw_flow_key masked_key
;
420 ovs_flow_mask_key(&masked_key
, unmasked
, mask
);
421 hash
= flow_hash(&masked_key
, key_start
, key_end
);
422 head
= find_bucket(ti
, hash
);
423 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ti
->node_ver
]) {
424 if (flow
->mask
== mask
&&
425 flow_cmp_masked_key(flow
, &masked_key
,
432 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
433 const struct sw_flow_key
*key
)
435 struct table_instance
*ti
= rcu_dereference(tbl
->ti
);
436 struct sw_flow_mask
*mask
;
437 struct sw_flow
*flow
;
439 list_for_each_entry_rcu(mask
, &tbl
->mask_list
, list
) {
440 flow
= masked_flow_lookup(ti
, key
, mask
);
441 if (flow
) /* Found */
447 static struct table_instance
*table_instance_expand(struct table_instance
*ti
)
449 return table_instance_rehash(ti
, ti
->n_buckets
* 2);
452 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
454 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
456 BUG_ON(table
->count
== 0);
457 hlist_del_rcu(&flow
->hash_node
[ti
->node_ver
]);
461 static struct sw_flow_mask
*mask_alloc(void)
463 struct sw_flow_mask
*mask
;
465 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
472 static void mask_add_ref(struct sw_flow_mask
*mask
)
477 static bool mask_equal(const struct sw_flow_mask
*a
,
478 const struct sw_flow_mask
*b
)
480 u8
*a_
= (u8
*)&a
->key
+ a
->range
.start
;
481 u8
*b_
= (u8
*)&b
->key
+ b
->range
.start
;
483 return (a
->range
.end
== b
->range
.end
)
484 && (a
->range
.start
== b
->range
.start
)
485 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
488 static struct sw_flow_mask
*flow_mask_find(const struct flow_table
*tbl
,
489 const struct sw_flow_mask
*mask
)
491 struct list_head
*ml
;
493 list_for_each(ml
, &tbl
->mask_list
) {
494 struct sw_flow_mask
*m
;
495 m
= container_of(ml
, struct sw_flow_mask
, list
);
496 if (mask_equal(mask
, m
))
504 * add a new mask into the mask list.
505 * The caller needs to make sure that 'mask' is not the same
506 * as any masks that are already on the list.
508 static int flow_mask_insert(struct flow_table
*tbl
, struct sw_flow
*flow
,
509 struct sw_flow_mask
*new)
511 struct sw_flow_mask
*mask
;
512 mask
= flow_mask_find(tbl
, new);
514 /* Allocate a new mask if none exsits. */
518 mask
->key
= new->key
;
519 mask
->range
= new->range
;
520 list_add_rcu(&mask
->list
, &tbl
->mask_list
);
528 int ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
,
529 struct sw_flow_mask
*mask
)
531 struct table_instance
*new_ti
= NULL
;
532 struct table_instance
*ti
;
535 err
= flow_mask_insert(table
, flow
, mask
);
539 flow
->hash
= flow_hash(&flow
->key
, flow
->mask
->range
.start
,
540 flow
->mask
->range
.end
);
541 ti
= ovsl_dereference(table
->ti
);
542 table_instance_insert(ti
, flow
);
545 /* Expand table, if necessary, to make room. */
546 if (table
->count
> ti
->n_buckets
)
547 new_ti
= table_instance_expand(ti
);
548 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
549 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
);
552 rcu_assign_pointer(table
->ti
, new_ti
);
553 table_instance_destroy(ti
, true);
554 table
->last_rehash
= jiffies
;
559 /* Initializes the flow module.
560 * Returns zero if successful or a negative error code. */
561 int ovs_flow_init(void)
563 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
564 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
566 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
), 0,
568 if (flow_cache
== NULL
)
574 /* Uninitializes the flow module. */
575 void ovs_flow_exit(void)
577 kmem_cache_destroy(flow_cache
);