dp_sysfs_if.c \
flow.c \
loop_counter.c \
- table.c \
tunnel.c \
vlan.c \
vport.c \
dp_sysfs.h \
flow.h \
loop_counter.h \
- table.h \
tunnel.h \
vlan.h \
vport.h \
#include "datapath.h"
#include "actions.h"
#include "flow.h"
-#include "table.h"
#include "vlan.h"
+#include "tunnel.h"
#include "vport-internal_dev.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
EXPORT_SYMBOL_GPL(get_dp);
/* Must be called with genl_mutex. */
-static struct tbl *get_table_protected(struct datapath *dp)
+static struct flow_table *get_table_protected(struct datapath *dp)
{
return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
}
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
+ flow_tbl_destroy(dp->table);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
}
}
/* Called with RTNL lock. */
-int dp_detach_port(struct vport *p)
+void dp_detach_port(struct vport *p)
{
ASSERT_RTNL();
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
/* Then destroy it. */
- return vport_del(p);
+ vport_del(p);
}
/* Must be called with rcu_read_lock. */
void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
+ struct sw_flow *flow;
struct dp_stats_percpu *stats;
int stats_counter_off;
int error;
if (!OVS_CB(skb)->flow) {
struct sw_flow_key key;
- struct tbl_node *flow_node;
int key_len;
bool is_frag;
}
/* Look up flow. */
- flow_node = tbl_lookup(rcu_dereference(dp->table), &key, key_len,
- flow_hash(&key, key_len), flow_cmp);
- if (unlikely(!flow_node)) {
+ flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ if (unlikely(!flow)) {
struct dp_upcall_info upcall;
upcall.cmd = OVS_PACKET_CMD_MISS;
goto out;
}
- OVS_CB(skb)->flow = flow_cast(flow_node);
+ OVS_CB(skb)->flow = flow;
}
stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
/* Called with genl_mutex. */
static int flush_flows(int dp_ifindex)
{
- struct tbl *old_table;
- struct tbl *new_table;
+ struct flow_table *old_table;
+ struct flow_table *new_table;
struct datapath *dp;
dp = get_dp(dp_ifindex);
return -ENODEV;
old_table = get_table_protected(dp);
- new_table = tbl_create(TBL_MIN_BUCKETS);
+ new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
rcu_assign_pointer(dp->table, new_table);
- tbl_deferred_destroy(old_table, flow_free_tbl);
-
+ flow_tbl_deferred_destroy(old_table);
return 0;
}
flow->byte_count = 0;
}
-/* Called with genl_mutex. */
-static int expand_table(struct datapath *dp)
-{
- struct tbl *old_table = get_table_protected(dp);
- struct tbl *new_table;
-
- new_table = tbl_expand(old_table);
- if (IS_ERR(new_table)) {
- if (PTR_ERR(new_table) != -ENOSPC)
- return PTR_ERR(new_table);
- } else {
- rcu_assign_pointer(dp->table, new_table);
- tbl_deferred_destroy(old_table, NULL);
- }
-
- return 0;
-}
-
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = info->userhdr;
if (err)
goto err_flow_put;
- flow->tbl_node.hash = flow_hash(&flow->key, key_len);
+ flow->hash = flow_hash(&flow->key, key_len);
acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
int i;
- struct tbl *table = get_table_protected(dp);
+ struct flow_table *table = get_table_protected(dp);
- stats->n_flows = tbl_count(table);
+ stats->n_flows = flow_tbl_count(table);
stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
- struct tbl_node *flow_node;
struct sw_flow_key key;
struct sw_flow *flow;
struct sk_buff *reply;
struct datapath *dp;
- struct tbl *table;
- u32 hash;
+ struct flow_table *table;
int error;
int key_len;
if (!dp)
goto error;
- hash = flow_hash(&key, key_len);
table = get_table_protected(dp);
- flow_node = tbl_lookup(table, &key, key_len, hash, flow_cmp);
- if (!flow_node) {
+ flow = flow_tbl_lookup(table, &key, key_len);
+ if (!flow) {
struct sw_flow_actions *acts;
/* Bail out if we're not allowed to create a new flow. */
goto error;
/* Expand table, if necessary, to make room. */
- if (tbl_count(table) >= tbl_n_buckets(table)) {
- error = expand_table(dp);
- if (error)
- goto error;
- table = get_table_protected(dp);
+ if (flow_tbl_need_to_expand(table)) {
+ struct flow_table *new_table;
+
+ new_table = flow_tbl_expand(table);
+ if (!IS_ERR(new_table)) {
+ rcu_assign_pointer(dp->table, new_table);
+ flow_tbl_deferred_destroy(table);
+ table = get_table_protected(dp);
+ }
}
/* Allocate flow. */
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- error = tbl_insert(table, &flow->tbl_node, hash);
- if (error)
- goto error_free_flow;
+ flow->hash = flow_hash(&key, key_len);
+ flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
info->snd_seq, OVS_FLOW_CMD_NEW);
goto error;
/* Update actions. */
- flow = flow_cast(flow_node);
old_acts = rcu_dereference_protected(flow->sf_acts,
lockdep_genl_is_held());
if (a[OVS_FLOW_ATTR_ACTIONS] &&
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
- struct tbl_node *flow_node;
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct tbl *table;
+ struct flow_table *table;
int err;
int key_len;
return -ENODEV;
table = get_table_protected(dp);
- flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
- flow_cmp);
- if (!flow_node)
+ flow = flow_tbl_lookup(table, &key, key_len);
+ if (!flow)
return -ENOENT;
- flow = flow_cast(flow_node);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
if (IS_ERR(reply))
return PTR_ERR(reply);
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
- struct tbl_node *flow_node;
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct tbl *table;
+ struct flow_table *table;
int err;
int key_len;
return -ENODEV;
table = get_table_protected(dp);
- flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
- flow_cmp);
- if (!flow_node)
+ flow = flow_tbl_lookup(table, &key, key_len);
+ if (!flow)
return -ENOENT;
- flow = flow_cast(flow_node);
reply = ovs_flow_cmd_alloc_info(flow);
if (!reply)
return -ENOMEM;
- err = tbl_remove(table, flow_node);
- if (err) {
- kfree_skb(reply);
- return err;
- }
+ flow_tbl_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
return -ENODEV;
for (;;) {
- struct tbl_node *flow_node;
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
- if (!flow_node)
+ flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
+ if (!flow)
break;
- flow = flow_cast(flow_node);
if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_FLOW_CMD_NEW) < 0)
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
+ rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
err_destroy_local_port:
dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
err_destroy_table:
- tbl_destroy(get_table_protected(dp), NULL);
+ flow_tbl_destroy(get_table_protected(dp));
err_free_dp:
kfree(dp);
err_put_module:
if (IS_ERR(reply))
goto exit_unlock;
- err = dp_detach_port(vport);
+ dp_detach_port(vport);
genl_notify(reply, genl_info_net(info), info->snd_pid,
dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
- err = flow_init();
+ err = tnl_init();
if (err)
goto error;
+ err = flow_init();
+ if (err)
+ goto error_tnl_exit;
+
err = vport_init();
if (err)
goto error_flow_exit;
vport_exit();
error_flow_exit:
flow_exit();
+error_tnl_exit:
+ tnl_exit();
error:
return err;
}
unregister_netdevice_notifier(&dp_device_notifier);
vport_exit();
flow_exit();
+ tnl_exit();
}
module_init(dp_init);
int drop_frags;
/* Flow table. */
- struct tbl __rcu *table;
+ struct flow_table __rcu *table;
/* Switch ports. */
struct vport __rcu *ports[DP_MAX_PORTS];
extern int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
void dp_process_received_packet(struct vport *, struct sk_buff *);
-int dp_detach_port(struct vport *);
+void dp_detach_port(struct vport *);
int dp_upcall(struct datapath *, struct sk_buff *, const struct dp_upcall_info *);
int dp_min_mtu(const struct datapath *dp);
void set_internal_devs_mtu(const struct datapath *dp);
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
+#include <linux/rculist.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/ipv6.h>
return flow;
}
-void flow_free_tbl(struct tbl_node *node)
+static struct hlist_head __rcu *find_bucket(struct flow_table * table, u32 hash)
{
- struct sw_flow *flow = flow_cast(node);
+ return flex_array_get(table->buckets,
+ (hash & (table->n_buckets - 1)));
+}
+
+static struct flex_array __rcu *alloc_buckets(unsigned int n_buckets)
+{
+ struct flex_array __rcu * buckets;
+ int i, err;
+
+ buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ n_buckets, GFP_KERNEL);
+ if (!buckets)
+ return NULL;
+
+ err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+ if (err) {
+ flex_array_free(buckets);
+ return NULL;
+ }
+
+ for (i = 0; i < n_buckets; i++)
+ INIT_HLIST_HEAD((struct hlist_head *)
+ flex_array_get(buckets, i));
+
+ return buckets;
+}
+
+static void free_buckets(struct flex_array * buckets)
+{
+ flex_array_free(buckets);
+}
+
+struct flow_table *flow_tbl_alloc(int new_size)
+{
+ struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
+ table->buckets = alloc_buckets(new_size);
+
+ if (!table->buckets) {
+ kfree(table);
+ return NULL;
+ }
+ table->n_buckets = new_size;
+ table->count = 0;
+
+ return table;
+}
+static void flow_free(struct sw_flow *flow)
+{
flow->dead = true;
flow_put(flow);
}
+void flow_tbl_destroy(struct flow_table *table)
+{
+ int i;
+
+ if (!table)
+ return;
+
+ for (i = 0; i < table->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head = flex_array_get(table->buckets, i);
+ struct hlist_node *node, *n;
+
+ hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
+ hlist_del_init_rcu(&flow->hash_node);
+ flow_free(flow);
+ }
+ }
+
+ free_buckets(table->buckets);
+ kfree(table);
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+ struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+
+ flow_tbl_destroy(table);
+}
+
+void flow_tbl_deferred_destroy(struct flow_table *table)
+{
+ if (!table)
+ return;
+
+ call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+}
+
+struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+{
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ struct hlist_node *n;
+ int i;
+
+ while (*bucket < table->n_buckets) {
+ i = 0;
+ head = flex_array_get(table->buckets, *bucket);
+ hlist_for_each_entry_rcu(flow, n, head, hash_node) {
+ if (i < *last) {
+ i++;
+ continue;
+ }
+ *last = i + 1;
+ return flow;
+ }
+ (*bucket)++;
+ *last = 0;
+ }
+
+ return NULL;
+}
+
+struct flow_table *flow_tbl_expand(struct flow_table *table)
+{
+ struct flow_table *new_table;
+ int n_buckets = table->n_buckets * 2;
+ int i;
+
+ new_table = flow_tbl_alloc(n_buckets);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < table->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head;
+ struct hlist_node *n, *pos;
+
+ head = flex_array_get(table->buckets, i);
+
+ hlist_for_each_entry_safe(flow, n, pos, head, hash_node) {
+ hlist_del_init_rcu(&flow->hash_node);
+ flow_tbl_insert(new_table, flow);
+ }
+ }
+
+ return new_table;
+}
+
/* RCU callback used by flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
return jhash2((u32*)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed);
}
-int flow_cmp(const struct tbl_node *node, void *key2_, int len)
+struct sw_flow * flow_tbl_lookup(struct flow_table *table,
+ struct sw_flow_key *key, int key_len)
{
- const struct sw_flow_key *key1 = &flow_cast(node)->key;
- const struct sw_flow_key *key2 = key2_;
+ struct sw_flow *flow;
+ struct hlist_node *n;
+ struct hlist_head *head;
+ u32 hash;
- return !memcmp(key1, key2, len);
+ hash = flow_hash(key, key_len);
+
+ head = find_bucket(table, hash);
+ hlist_for_each_entry_rcu(flow, n, head, hash_node) {
+
+ if (flow->hash == hash &&
+ !memcmp(&flow->key, key, key_len)) {
+ return flow;
+ }
+ }
+ return NULL;
+}
+
+void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+ struct hlist_head *head;
+
+ head = find_bucket(table, flow->hash);
+ hlist_add_head_rcu(&flow->hash_node, head);
+ table->count++;
+}
+
+void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+ if (!hlist_unhashed(&flow->hash_node)) {
+ hlist_del_init_rcu(&flow->hash_node);
+ table->count--;
+ BUG_ON(table->count < 0);
+ }
}
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
#include <linux/in6.h>
#include <linux/jiffies.h>
#include <linux/time.h>
-
+#include <linux/flex_array.h>
#include "openvswitch/datapath-protocol.h"
-#include "table.h"
struct sk_buff;
struct sw_flow {
struct rcu_head rcu;
- struct tbl_node tbl_node;
+ struct hlist_node hash_node;
+ u32 hash;
struct sw_flow_key key;
struct sw_flow_actions __rcu *sf_acts;
struct sw_flow *flow_alloc(void);
void flow_deferred_free(struct sw_flow *);
-void flow_free_tbl(struct tbl_node *);
struct sw_flow_actions *flow_actions_alloc(const struct nlattr *);
void flow_deferred_free_acts(struct sw_flow_actions *);
void flow_used(struct sw_flow *, struct sk_buff *);
u64 flow_used_time(unsigned long flow_jiffies);
-u32 flow_hash(const struct sw_flow_key *, int key_lenp);
-int flow_cmp(const struct tbl_node *, void *target, int len);
-
/* Upper bound on the length of a nlattr-formatted flow key. The longest
* nlattr-formatted flow key would be:
*
int flow_metadata_from_nlattrs(u16 *in_port, __be64 *tun_id,
const struct nlattr *);
-static inline struct sw_flow *flow_cast(const struct tbl_node *node)
+#define TBL_MIN_BUCKETS 1024
+
+struct flow_table {
+ struct flex_array *buckets;
+ unsigned int count, n_buckets;
+ struct rcu_head rcu;
+};
+
+static inline int flow_tbl_count(struct flow_table *table)
{
- return container_of(node, struct sw_flow, tbl_node);
+ return table->count;
}
+static inline int flow_tbl_need_to_expand(struct flow_table *table)
+{
+ return (table->count > table->n_buckets);
+}
+
+struct sw_flow *flow_tbl_lookup(struct flow_table *table,
+ struct sw_flow_key *key, int len);
+void flow_tbl_destroy(struct flow_table *table);
+void flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *flow_tbl_alloc(int new_size);
+struct flow_table *flow_tbl_expand(struct flow_table *table);
+void flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+
#endif /* flow.h */
#include <linux/list.h>
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
+#define hlist_del_init_rcu rpl_hlist_del_init_rcu
+static inline void hlist_del_init_rcu(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ n->pprev = NULL;
+ }
+}
+#endif
+
#endif
+++ /dev/null
-/*
- * Copyright (c) 2009, 2010, 2011 Nicira Networks.
- * Distributed under the terms of the GNU GPL version 2.
- *
- * Significant portions of this file may be copied from parts of the Linux
- * kernel, by Linus Torvalds and others.
- */
-
-#include "flow.h"
-#include "datapath.h"
-#include "table.h"
-
-#include <linux/genetlink.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <asm/pgtable.h>
-
-/**
- * struct tbl_bucket - single bucket within a hash table
- * @rcu: RCU callback structure
- * @n_objs: number of objects in @objs[] array
- * @objs: array of @n_objs pointers to table nodes contained inside objects
- *
- * The expected number of objects per bucket is 1, but this allows for an
- * arbitrary number of collisions.
- */
-struct tbl_bucket {
- struct rcu_head rcu;
- unsigned int n_objs;
- struct tbl_node *objs[];
-};
-
-static struct tbl_bucket *get_bucket(struct tbl_bucket __rcu *bucket)
-{
- return rcu_dereference_check(bucket, rcu_read_lock_held() ||
- lockdep_genl_is_held());
-}
-
-static struct tbl_bucket *get_bucket_protected(struct tbl_bucket __rcu *bucket)
-{
- return rcu_dereference_protected(bucket, lockdep_genl_is_held());
-}
-
-static inline int bucket_size(int n_objs)
-{
- return sizeof(struct tbl_bucket) + sizeof(struct tbl_node *) * n_objs;
-}
-
-static struct tbl_bucket *bucket_alloc(int n_objs)
-{
- return kmalloc(bucket_size(n_objs), GFP_KERNEL);
-}
-
-static void free_buckets(struct tbl_bucket __rcu ***l1,
- unsigned int n_buckets,
- void (*free_obj)(struct tbl_node *))
-{
- unsigned int i;
-
- for (i = 0; i < n_buckets >> TBL_L1_SHIFT; i++) {
- struct tbl_bucket __rcu **l2 = l1[i];
- unsigned int j;
-
- for (j = 0; j < TBL_L2_SIZE; j++) {
- struct tbl_bucket *bucket = (struct tbl_bucket __force *)l2[j];
- if (!bucket)
- continue;
-
- if (free_obj) {
- unsigned int k;
- for (k = 0; k < bucket->n_objs; k++)
- free_obj(bucket->objs[k]);
- }
- kfree(bucket);
- }
- free_page((unsigned long)l2);
- }
- kfree(l1);
-}
-
-static struct tbl_bucket __rcu ***alloc_buckets(unsigned int n_buckets)
-{
- struct tbl_bucket __rcu ***l1;
- unsigned int i;
-
- l1 = kmalloc((n_buckets >> TBL_L1_SHIFT) * sizeof(struct tbl_bucket **),
- GFP_KERNEL);
- if (!l1)
- return NULL;
- for (i = 0; i < n_buckets >> TBL_L1_SHIFT; i++) {
- l1[i] = (struct tbl_bucket __rcu **)get_zeroed_page(GFP_KERNEL);
- if (!l1[i]) {
- free_buckets(l1, i << TBL_L1_SHIFT, NULL);
- return NULL;
- }
- }
- return l1;
-}
-
-/**
- * tbl_create - create and return a new hash table
- * @n_buckets: number of buckets in the new table
- *
- * Creates and returns a new hash table, or %NULL if memory cannot be
- * allocated. @n_buckets must be a power of 2 in the range %TBL_MIN_BUCKETS to
- * %TBL_MAX_BUCKETS.
- */
-struct tbl *tbl_create(unsigned int n_buckets)
-{
- struct tbl *table;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- goto err;
-
- table->n_buckets = n_buckets;
- table->buckets = alloc_buckets(n_buckets);
- if (!table->buckets)
- goto err_free_table;
-
- return table;
-
-err_free_table:
- kfree(table);
-err:
- return NULL;
-}
-
-/**
- * tbl_destroy - destroy hash table and optionally the objects it contains
- * @table: table to destroy
- * @destructor: function to be called on objects at destruction time
- *
- * If a destructor is null, then the buckets in @table are destroyed
- * but not the objects within those buckets. This behavior is useful when a
- * table is being replaced by a larger or smaller one without destroying the
- * objects.
- *
- * If a destructor is not null, then it is called on the objects in @table
- * before destroying the buckets.
- */
-void tbl_destroy(struct tbl *table, void (*destructor)(struct tbl_node *))
-{
- if (!table)
- return;
-
- free_buckets(table->buckets, table->n_buckets, destructor);
- kfree(table);
-}
-
-static void destroy_table_rcu(struct rcu_head *rcu)
-{
- struct tbl *table = container_of(rcu, struct tbl, rcu);
- tbl_destroy(table, table->obj_destructor);
-}
-
-/**
- * tbl_deferred_destroy - destroy table after a RCU grace period
- * @table: table to destroy
- * @destructor: function to be called on objects at destruction time
- *
- * Calls tbl_destroy() on @table after an RCU grace period. If @destructor is
- * not null it is called on every element before the table is destroyed. */
-void tbl_deferred_destroy(struct tbl *table, void (*destructor)(struct tbl_node *))
-{
- if (!table)
- return;
-
- table->obj_destructor = destructor;
- call_rcu(&table->rcu, destroy_table_rcu);
-}
-
-static struct tbl_bucket __rcu **find_bucket(struct tbl *table, u32 hash)
-{
- unsigned int l1 = (hash & (table->n_buckets - 1)) >> TBL_L1_SHIFT;
- unsigned int l2 = hash & ((1 << TBL_L2_BITS) - 1);
- return &table->buckets[l1][l2];
-}
-
-static int search_bucket(const struct tbl_bucket *bucket, void *target, int len, u32 hash,
- int (*cmp)(const struct tbl_node *, void *, int len))
-{
- int i;
-
- for (i = 0; i < bucket->n_objs; i++) {
- struct tbl_node *obj = bucket->objs[i];
- if (obj->hash == hash && likely(cmp(obj, target, len)))
- return i;
- }
-
- return -1;
-}
-
-/**
- * tbl_lookup - searches hash table for a matching object
- * @table: hash table to search
- * @target: identifier for the object that is being searched for, will be
- * provided as an argument to @cmp when making comparisions
- * @len: length of @target in bytes, will be provided as an argument to @cmp
- * when making comparisons
- * @hash: hash of @target
- * @cmp: comparision function to match objects with the given hash, returns
- * nonzero if the objects match, zero otherwise
- *
- * Searches @table for an object identified by @target. Returns the tbl_node
- * contained in the object if successful, otherwise %NULL.
- */
-struct tbl_node *tbl_lookup(struct tbl *table, void *target, int len, u32 hash,
- int (*cmp)(const struct tbl_node *, void *, int))
-{
- struct tbl_bucket __rcu **bucketp = find_bucket(table, hash);
- struct tbl_bucket *bucket = get_bucket(*bucketp);
- int index;
-
- if (!bucket)
- return NULL;
-
- index = search_bucket(bucket, target, len, hash, cmp);
- if (index < 0)
- return NULL;
-
- return bucket->objs[index];
-}
-
-/**
- * tbl_foreach - iterate through hash table
- * @table: table to iterate
- * @callback: function to call for each entry
- * @aux: Extra data to pass to @callback
- *
- * Iterates through all of the objects in @table in hash order, passing each of
- * them in turn to @callback. If @callback returns nonzero, this terminates
- * the iteration and tbl_foreach() returns the same value. Returns 0 if
- * @callback never returns nonzero.
- *
- * This function does not try to intelligently handle the case where @callback
- * adds or removes flows in @table.
- */
-int tbl_foreach(struct tbl *table,
- int (*callback)(struct tbl_node *, void *aux), void *aux)
-{
- unsigned int n_l1 = table->n_buckets >> TBL_L1_SHIFT;
- unsigned int l1_idx;
-
- for (l1_idx = 0; l1_idx < n_l1; l1_idx++) {
- struct tbl_bucket __rcu **l2 = table->buckets[l1_idx];
- unsigned int l2_idx;
-
- for (l2_idx = 0; l2_idx < TBL_L2_SIZE; l2_idx++) {
- struct tbl_bucket *bucket;
- unsigned int i;
-
- bucket = get_bucket(l2[l2_idx]);
- if (!bucket)
- continue;
-
- for (i = 0; i < bucket->n_objs; i++) {
- int error = (*callback)(bucket->objs[i], aux);
- if (error)
- return error;
- }
- }
- }
- return 0;
-}
-
-/**
- * tbl_next - find next node in hash table
- * @table: table to iterate
- * @bucketp: On entry, hash value of bucket to start from. On exit, updated
- * to bucket to start from on next call.
- * @objp: On entry, index to start from within first bucket. On exit, updated
- * to index to start from on next call.
- *
- * Returns the next node in @table in hash order, or %NULL when no nodes remain
- * in the hash table.
- *
- * On entry, uses the values that @bucketp and @objp reference to determine
- * where to begin iteration. Use 0 for both values to begin a new iteration.
- * On exit, stores the values to pass on the next iteration into @bucketp and
- * @objp's referents.
- */
-struct tbl_node *tbl_next(struct tbl *table, u32 *bucketp, u32 *objp)
-{
- unsigned int n_l1 = table->n_buckets >> TBL_L1_SHIFT;
- u32 s_l1_idx = *bucketp >> TBL_L1_SHIFT;
- u32 s_l2_idx = *bucketp & (TBL_L2_SIZE - 1);
- u32 s_obj = *objp;
- unsigned int l1_idx;
-
- for (l1_idx = s_l1_idx; l1_idx < n_l1; l1_idx++) {
- struct tbl_bucket __rcu **l2 = table->buckets[l1_idx];
- unsigned int l2_idx;
-
- for (l2_idx = s_l2_idx; l2_idx < TBL_L2_SIZE; l2_idx++) {
- struct tbl_bucket *bucket;
-
- bucket = get_bucket_protected(l2[l2_idx]);
- if (bucket && s_obj < bucket->n_objs) {
- *bucketp = (l1_idx << TBL_L1_SHIFT) + l2_idx;
- *objp = s_obj + 1;
- return bucket->objs[s_obj];
- }
-
- s_obj = 0;
- }
- s_l2_idx = 0;
- }
- *bucketp = 0;
- *objp = 0;
- return NULL;
-}
-
-static int insert_table_flow(struct tbl_node *node, void *new_table_)
-{
- struct tbl *new_table = new_table_;
- return tbl_insert(new_table, node, node->hash);
-}
-
-/**
- * tbl_expand - create a hash table with more buckets
- * @table: table to expand
- *
- * Creates a new table containing the same objects as @table but with twice
- * as many buckets. Returns 0 if successful, otherwise a negative error. The
- * caller should free @table upon success (probably using
- * tbl_deferred_destroy()).
- */
-struct tbl *tbl_expand(struct tbl *table)
-{
- int err;
- int n_buckets = table->n_buckets * 2;
- struct tbl *new_table;
-
- if (n_buckets > TBL_MAX_BUCKETS) {
- err = -ENOSPC;
- goto error;
- }
-
- err = -ENOMEM;
- new_table = tbl_create(n_buckets);
- if (!new_table)
- goto error;
-
- if (tbl_foreach(table, insert_table_flow, new_table))
- goto error_free_new_table;
-
- return new_table;
-
-error_free_new_table:
- tbl_destroy(new_table, NULL);
-error:
- return ERR_PTR(err);
-}
-
-/**
- * tbl_n_buckets - returns the number of buckets
- * @table: table to examine
- *
- * Returns the number of buckets currently allocated in @table, useful when
- * deciding whether to expand.
- */
-int tbl_n_buckets(struct tbl *table)
-{
- return table->n_buckets;
-}
-
-static void free_bucket_rcu(struct rcu_head *rcu)
-{
- struct tbl_bucket *bucket = container_of(rcu, struct tbl_bucket, rcu);
- kfree(bucket);
-}
-
-/**
- * tbl_insert - insert object into table
- * @table: table in which to insert object
- * @target: tbl_node contained in object to insert
- * @hash: hash of object to insert
- *
- * The caller must ensure that no object considered to be identical to @target
- * already exists in @table. Returns 0 or a negative error (currently just
- * -ENOMEM).
- */
-int tbl_insert(struct tbl *table, struct tbl_node *target, u32 hash)
-{
- struct tbl_bucket __rcu **oldp = find_bucket(table, hash);
- struct tbl_bucket *old = get_bucket_protected(*oldp);
- unsigned int n = old ? old->n_objs : 0;
- struct tbl_bucket *new = bucket_alloc(n + 1);
-
- if (!new)
- return -ENOMEM;
-
- target->hash = hash;
-
- new->n_objs = n + 1;
- if (old)
- memcpy(new->objs, old->objs, n * sizeof(struct tbl_node *));
- new->objs[n] = target;
-
- rcu_assign_pointer(*oldp, new);
- if (old)
- call_rcu(&old->rcu, free_bucket_rcu);
-
- table->count++;
-
- return 0;
-}
-
-/**
- * tbl_remove - remove object from table
- * @table: table from which to remove object
- * @target: tbl_node inside of object to remove
- *
- * The caller must ensure that @target itself is in @table. (It is not
- * good enough for @table to contain a different object considered identical
- * @target.)
- *
- * Returns 0 or a negative error (currently just -ENOMEM). Yes, it *is*
- * possible for object deletion to fail due to lack of memory.
- */
-int tbl_remove(struct tbl *table, struct tbl_node *target)
-{
- struct tbl_bucket __rcu **oldp = find_bucket(table, target->hash);
- struct tbl_bucket *old = get_bucket_protected(*oldp);
- unsigned int n = old->n_objs;
- struct tbl_bucket *new;
-
- if (n > 1) {
- unsigned int i;
-
- new = bucket_alloc(n - 1);
- if (!new)
- return -ENOMEM;
-
- new->n_objs = 0;
- for (i = 0; i < n; i++) {
- struct tbl_node *obj = old->objs[i];
- if (obj != target)
- new->objs[new->n_objs++] = obj;
- }
- WARN_ON_ONCE(new->n_objs != n - 1);
- } else {
- new = NULL;
- }
-
- rcu_assign_pointer(*oldp, new);
- call_rcu(&old->rcu, free_bucket_rcu);
-
- table->count--;
-
- return 0;
-}
-
-/**
- * tbl_count - retrieves the number of stored objects
- * @table: table to count
- *
- * Returns the number of objects that have been inserted into the hash table.
- */
-unsigned int tbl_count(struct tbl *table)
-{
- return table->count;
-}
+++ /dev/null
-/*
- * Copyright (c) 2010 Nicira Networks.
- * Distributed under the terms of the GNU GPL version 2.
- *
- * Significant portions of this file may be copied from parts of the Linux
- * kernel, by Linus Torvalds and others.
- */
-
-#ifndef TABLE_H
-#define TABLE_H 1
-
-struct tbl_bucket;
-
-struct tbl_node {
- u32 hash;
-};
-
-/**
- * struct tbl - hash table
- * @n_buckets: number of buckets (a power of 2 between %TBL_L1_SIZE and
- * %TBL_MAX_BUCKETS)
- * @buckets: pointer to @n_buckets/%TBL_L1_SIZE pointers to %TBL_L1_SIZE pointers
- * to buckets
- * @rcu: RCU callback structure
- * @obj_destructor: Called on each element when the table is destroyed.
- *
- * The @buckets array is logically an array of pointers to buckets. It is
- * broken into two levels to avoid the need to kmalloc() any object larger than
- * a single page or to use vmalloc(). @buckets is always nonnull, as is each
- * @buckets[i], but each @buckets[i][j] is nonnull only if the specified hash
- * bucket is nonempty (for 0 <= i < @n_buckets/%TBL_L1_SIZE, 0 <= j <
- * %TBL_L1_SIZE).
- */
-struct tbl {
- struct rcu_head rcu;
- unsigned int n_buckets;
- struct tbl_bucket __rcu ***buckets;
- unsigned int count;
- void (*obj_destructor)(struct tbl_node *);
-};
-
-#define TBL_L2_BITS (PAGE_SHIFT - ilog2(sizeof(struct tbl_bucket *)))
-#define TBL_L2_SIZE (1 << TBL_L2_BITS)
-#define TBL_L2_SHIFT 0
-
-#define TBL_L1_BITS (PAGE_SHIFT - ilog2(sizeof(struct tbl_bucket **)))
-#define TBL_L1_SIZE (1 << TBL_L1_BITS)
-#define TBL_L1_SHIFT TBL_L2_BITS
-
-/* For 4 kB pages, this is 1,024 on 32-bit or 512 on 64-bit. */
-#define TBL_MIN_BUCKETS TBL_L2_SIZE
-
-/* For 4 kB pages, this is 1,048,576 on 32-bit or 262,144 on 64-bit. */
-#define TBL_MAX_BUCKETS (TBL_L1_SIZE * TBL_L2_SIZE)
-
-struct tbl *tbl_create(unsigned int n_buckets);
-void tbl_destroy(struct tbl *, void (*destructor)(struct tbl_node *));
-struct tbl_node *tbl_lookup(struct tbl *, void *target, int len, u32 hash,
- int (*cmp)(const struct tbl_node *, void *target, int len));
-int tbl_insert(struct tbl *, struct tbl_node *, u32 hash);
-int tbl_remove(struct tbl *, struct tbl_node *);
-unsigned int tbl_count(struct tbl *);
-int tbl_foreach(struct tbl *,
- int (*callback)(struct tbl_node *, void *aux), void *aux);
-struct tbl_node *tbl_next(struct tbl *, u32 *bucketp, u32 *objp);
-
-int tbl_n_buckets(struct tbl *);
-struct tbl *tbl_expand(struct tbl *);
-void tbl_deferred_destroy(struct tbl *, void (*destructor)(struct tbl_node *));
-
-#endif /* table.h */
#include <linux/in.h>
#include <linux/in_route.h>
#include <linux/jhash.h>
+#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/workqueue.h>
+#include <linux/rculist.h>
#include <net/dsfield.h>
#include <net/dst.h>
#include "actions.h"
#include "checksum.h"
#include "datapath.h"
-#include "table.h"
#include "tunnel.h"
#include "vlan.h"
#include "vport.h"
#define CACHE_CLEANER_INTERVAL (5 * HZ)
#define CACHE_DATA_ALIGN 16
+#define PORT_TABLE_SIZE 1024
-static struct tbl __rcu *port_table __read_mostly;
+static struct hlist_head *port_table __read_mostly;
+static int port_table_count;
static void cache_cleaner(struct work_struct *work);
static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
return vport_from_priv(tnl_vport);
}
-static inline struct tnl_vport *tnl_vport_table_cast(const struct tbl_node *node)
-{
- return container_of(node, struct tnl_vport, tbl_node);
-}
-
/* This is analogous to rtnl_dereference for the tunnel cache. It checks that
* cache_lock is held, so it is only for update side code.
*/
* Modifies 'target' to store the rcu_dereferenced pointer that was used to do
* the comparision.
*/
-static int port_cmp(const struct tbl_node *node, void *target, int unused)
+static int port_cmp(const struct tnl_vport *tnl_vport,
+ struct port_lookup_key *lookup)
{
- const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
- struct port_lookup_key *lookup = target;
-
lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
return port_hash(&lookup);
}
-static void check_table_empty(void)
-{
- struct tbl *old_table = rtnl_dereference(port_table);
- if (tbl_count(old_table) == 0) {
- cancel_delayed_work_sync(&cache_cleaner_wq);
- rcu_assign_pointer(port_table, NULL);
- tbl_deferred_destroy(old_table, NULL);
- }
+static inline struct hlist_head *find_bucket(u32 hash)
+{
+ return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
}
-static int add_port(struct vport *vport)
+static void port_table_add_port(struct vport *vport)
{
- struct tbl *cur_table = rtnl_dereference(port_table);
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- int err;
-
- if (!port_table) {
- struct tbl *new_table;
-
- new_table = tbl_create(TBL_MIN_BUCKETS);
- if (!new_table)
- return -ENOMEM;
+ u32 hash = mutable_hash(rtnl_dereference(tnl_vport->mutable));
- rcu_assign_pointer(port_table, new_table);
+ if (port_table_count == 0)
schedule_cache_cleaner();
- } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
- struct tbl *new_table;
-
- new_table = tbl_expand(cur_table);
- if (IS_ERR(new_table)) {
- if (PTR_ERR(new_table) != -ENOSPC)
- return PTR_ERR(new_table);
- } else {
- rcu_assign_pointer(port_table, new_table);
- tbl_deferred_destroy(cur_table, NULL);
- }
- }
-
- err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
- mutable_hash(rtnl_dereference(tnl_vport->mutable)));
- if (err) {
- check_table_empty();
- return err;
- }
+ hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
+ port_table_count++;
(*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
-
- return 0;
}
-static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
+static void port_table_move_port(struct vport *vport,
+ struct tnl_mutable_config *new_mutable)
{
- int err;
- struct tbl *cur_table = rtnl_dereference(port_table);
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
u32 hash;
hash = mutable_hash(new_mutable);
- if (hash == tnl_vport->tbl_node.hash)
- goto table_updated;
+ hlist_del_init_rcu(&tnl_vport->hash_node);
+ hlist_add_head_rcu(&tnl_vport->hash_node, find_bucket(hash));
- /*
- * Ideally we should make this move atomic to avoid having gaps in
- * finding tunnels or the possibility of failure. However, if we do
- * find a tunnel it will always be consistent.
- */
- err = tbl_remove(cur_table, &tnl_vport->tbl_node);
- if (err)
- return err;
-
- err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
- if (err) {
- (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
- check_table_empty();
- return err;
- }
-
-table_updated:
(*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
assign_config_rcu(vport, new_mutable);
(*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
-
- return 0;
}
-static int del_port(struct vport *vport)
+static void port_table_remove_port(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- int err;
- err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
- if (err)
- return err;
+ hlist_del_init_rcu(&tnl_vport->hash_node);
+
+ port_table_count--;
+ if (port_table_count == 0)
+ cancel_delayed_work_sync(&cache_cleaner_wq);
- check_table_empty();
(*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
+}
- return 0;
+static struct tnl_vport *port_table_lookup(struct port_lookup_key *lookup)
+{
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ u32 hash = port_hash(lookup);
+ struct tnl_vport * tnl_vport;
+
+ bucket = find_bucket(hash);
+
+ hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node) {
+ if (!port_cmp(tnl_vport, lookup))
+ return tnl_vport;
+ }
+
+ return NULL;
}
struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
const struct tnl_mutable_config **mutable)
{
struct port_lookup_key lookup;
- struct tbl *table = rcu_dereference_rtnl(port_table);
- struct tbl_node *tbl_node;
-
- if (unlikely(!table))
- return NULL;
+ struct tnl_vport * tnl_vport;
lookup.saddr = saddr;
lookup.daddr = daddr;
lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
if (key_local_remote_ports) {
- tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
- port_hash(&lookup), port_cmp);
- if (tbl_node)
+ tnl_vport = port_table_lookup(&lookup);
+ if (tnl_vport)
goto found;
}
if (key_remote_ports) {
lookup.saddr = 0;
-
- tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
- port_hash(&lookup), port_cmp);
- if (tbl_node)
+ tnl_vport = port_table_lookup(&lookup);
+ if (tnl_vport)
goto found;
lookup.saddr = saddr;
lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
if (local_remote_ports) {
- tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
- port_hash(&lookup), port_cmp);
- if (tbl_node)
+ tnl_vport = port_table_lookup(&lookup);
+ if (tnl_vport)
goto found;
}
if (remote_ports) {
lookup.saddr = 0;
-
- tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
- port_hash(&lookup), port_cmp);
- if (tbl_node)
+ tnl_vport = port_table_lookup(&lookup);
+ if (tnl_vport)
goto found;
}
}
found:
*mutable = lookup.mutable;
- return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
+ return tnl_vport_to_vport(tnl_vport);
}
static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
(cache->flow && !cache->flow->dead));
}
-static int cache_cleaner_cb(struct tbl_node *tbl_node, void *aux)
+static void __cache_cleaner(struct tnl_vport *tnl_vport)
{
- struct tnl_vport *tnl_vport = tnl_vport_table_cast(tbl_node);
- const struct tnl_mutable_config *mutable = rcu_dereference(tnl_vport->mutable);
+ const struct tnl_mutable_config *mutable =
+ rcu_dereference(tnl_vport->mutable);
const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
if (cache && !check_cache_valid(cache, mutable) &&
assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
spin_unlock_bh(&tnl_vport->cache_lock);
}
-
- return 0;
}
static void cache_cleaner(struct work_struct *work)
{
+ int i;
+
schedule_cache_cleaner();
rcu_read_lock();
- tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
+ for (i = 0; i < PORT_TABLE_SIZE; i++) {
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ struct tnl_vport *tnl_vport;
+
+ bucket = &port_table[i];
+ hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
+ __cache_cleaner(tnl_vport);
+ }
rcu_read_unlock();
}
if (is_internal_dev(rt_dst(rt).dev)) {
struct sw_flow_key flow_key;
- struct tbl_node *flow_node;
struct vport *dst_vport;
struct sk_buff *skb;
bool is_frag;
int err;
int flow_key_len;
+ struct sw_flow *flow;
dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
if (!dst_vport)
if (err || is_frag)
goto done;
- flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len,
- flow_hash(&flow_key, flow_key_len),
- flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
-
+ flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
+ &flow_key, flow_key_len);
+ if (flow) {
cache->flow = flow;
flow_hold(flow);
}
rcu_assign_pointer(tnl_vport->mutable, mutable);
- err = add_port(vport);
- if (err)
- goto error_free_mutable;
-
+ port_table_add_port(vport);
return vport;
error_free_mutable:
if (err)
goto error_free;
- err = move_port(vport, mutable);
- if (err)
- goto error_free;
+ if (mutable_hash(mutable) != mutable_hash(old_mutable))
+ port_table_move_port(vport, mutable);
return 0;
vport_free(tnl_vport_to_vport(tnl_vport));
}
-int tnl_destroy(struct vport *vport)
+void tnl_destroy(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- const struct tnl_mutable_config *mutable, *old_mutable;
+ const struct tnl_mutable_config *mutable;
mutable = rtnl_dereference(tnl_vport->mutable);
-
- if (vport == tnl_find_port(mutable->saddr, mutable->daddr,
- mutable->in_key, mutable->tunnel_type,
- &old_mutable))
- del_port(vport);
-
+ port_table_remove_port(vport);
call_rcu(&tnl_vport->rcu, free_port_rcu);
-
- return 0;
}
int tnl_set_addr(struct vport *vport, const unsigned char *addr)
skb = next;
}
}
+
+int tnl_init(void)
+{
+ int i;
+
+ port_table = kmalloc(PORT_TABLE_SIZE * sizeof(struct hlist_head *),
+ GFP_KERNEL);
+ if (!port_table)
+ return -ENOMEM;
+
+ for (i = 0; i < PORT_TABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&port_table[i]);
+
+ return 0;
+}
+
+void tnl_exit(void)
+{
+ int i;
+
+ for (i = 0; i < PORT_TABLE_SIZE; i++) {
+ struct tnl_vport * tnl_vport;
+ struct hlist_head *hash_head;
+ struct hlist_node *n;
+
+ hash_head = &port_table[i];
+ hlist_for_each_entry(tnl_vport, n, hash_head, hash_node) {
+ BUG();
+ goto out;
+ }
+ }
+out:
+ kfree(port_table);
+}
#include "flow.h"
#include "openvswitch/tunnel.h"
-#include "table.h"
#include "vport.h"
/*
struct tnl_vport {
struct rcu_head rcu;
- struct tbl_node tbl_node;
+ struct hlist_node hash_node;
char name[IFNAMSIZ];
const struct tnl_ops *tnl_ops;
struct vport *tnl_create(const struct vport_parms *, const struct vport_ops *,
const struct tnl_ops *);
-int tnl_destroy(struct vport *);
+void tnl_destroy(struct vport *);
int tnl_set_options(struct vport *, struct nlattr *);
int tnl_get_options(const struct vport *, struct sk_buff *);
struct sk_buff *skb, unsigned int mtu, __be64 flow_key);
void tnl_free_linked_skbs(struct sk_buff *skb);
+int tnl_init(void);
+void tnl_exit(void);
static inline struct tnl_vport *tnl_vport_priv(const struct vport *vport)
{
return vport_priv(vport);
return ERR_PTR(err);
}
-static int internal_dev_destroy(struct vport *vport)
+static void internal_dev_destroy(struct vport *vport)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
/* unregister_netdevice() waits for an RCU grace period. */
unregister_netdevice(netdev_vport->dev);
-
- return 0;
}
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
return ERR_PTR(err);
}
-static int netdev_destroy(struct vport *vport)
+static void netdev_destroy(struct vport *vport)
{
struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
dev_put(netdev_vport->dev);
vport_free(vport);
-
- return 0;
}
int netdev_set_mtu(struct vport *vport, int mtu)
vport_free(vport_from_priv(patch_vport));
}
-static int patch_destroy(struct vport *vport)
+static void patch_destroy(struct vport *vport)
{
struct patch_vport *patch_vport = patch_vport_priv(vport);
update_peers(patch_vport->name, NULL);
hlist_del(&patch_vport->hash_node);
call_rcu(&patch_vport->rcu, free_port_rcu);
-
- return 0;
}
static int patch_set_options(struct vport *vport, struct nlattr *options)
* Detaches @vport from its datapath and destroys it. It is possible to fail
* for reasons such as lack of memory. RTNL lock must be held.
*/
-int vport_del(struct vport *vport)
+void vport_del(struct vport *vport)
{
ASSERT_RTNL();
hlist_del_rcu(&vport->hash_node);
- return vport->ops->destroy(vport);
+ vport->ops->destroy(vport);
}
/**
void vport_exit(void);
struct vport *vport_add(const struct vport_parms *);
-int vport_del(struct vport *);
+void vport_del(struct vport *);
struct vport *vport_locate(const char *name);
/* Called with RTNL lock. */
struct vport *(*create)(const struct vport_parms *);
- int (*destroy)(struct vport *);
+ void (*destroy)(struct vport *);
int (*set_options)(struct vport *, struct nlattr *);
int (*get_options)(const struct vport *, struct sk_buff *);