*
* @cookie: implementation specific element cookie
* @key: element key
- * @data: element data (maps only)
- * @flags: element flags (end of interval)
+ * @priv: element private data and extensions
*
* The cookie can be used to store a handle to the element for subsequent
* removal.
struct nft_set_elem {
void *cookie;
struct nft_data key;
- struct nft_data data;
- u32 flags;
+ void *priv;
};
struct nft_set;
* @destroy: destroy private data of set instance
* @list: nf_tables_set_ops list node
* @owner: module reference
+ * @elemsize: element private size
* @features: features supported by the implementation
*/
struct nft_set_ops {
struct list_head list;
struct module *owner;
+ unsigned int elemsize;
u32 features;
};
return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
}
+static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
+ void *elem)
+{
+ return elem + set->ops->elemsize;
+}
+
/**
* struct nft_expr_type - nf_tables expression type
*
const struct nft_set_iter *iter,
const struct nft_set_elem *elem)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
enum nft_registers dreg;
dreg = nft_type_to_reg(set->dtype);
- return nft_validate_data_load(ctx, dreg, &elem->data,
+ return nft_validate_data_load(ctx, dreg, nft_set_ext_data(ext),
set->dtype == NFT_DATA_VERDICT ?
NFT_DATA_VERDICT : NFT_DATA_VALUE);
}
const struct nft_set *set,
const struct nft_set_elem *elem)
{
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
if (nest == NULL)
goto nla_put_failure;
- if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
- set->klen) < 0)
+ if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, nft_set_ext_key(ext),
+ NFT_DATA_VALUE, set->klen) < 0)
goto nla_put_failure;
- if (set->flags & NFT_SET_MAP &&
- !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
- nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
+ nft_data_dump(skb, NFTA_SET_ELEM_DATA, nft_set_ext_data(ext),
set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
set->dlen) < 0)
goto nla_put_failure;
- if (elem->flags != 0)
- if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
- goto nla_put_failure;
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+ nla_put_be32(skb, NFTA_SET_ELEM_FLAGS,
+ htonl(*nft_set_ext_flags(ext))))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
return trans;
}
+static void *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const struct nft_data *key,
+ const struct nft_data *data,
+ gfp_t gfp)
+{
+ struct nft_set_ext *ext;
+ void *elem;
+
+ elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
+ if (elem == NULL)
+ return NULL;
+
+ ext = nft_set_elem_ext(set, elem);
+ nft_set_ext_init(ext, tmpl);
+
+ memcpy(nft_set_ext_key(ext), key, set->klen);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ memcpy(nft_set_ext_data(ext), data, set->dlen);
+
+ return elem;
+}
+
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
const struct nlattr *attr)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_data_desc d1, d2;
+ struct nft_set_ext_tmpl tmpl;
+ struct nft_set_ext *ext;
struct nft_set_elem elem;
struct nft_set_binding *binding;
+ struct nft_data data;
enum nft_registers dreg;
struct nft_trans *trans;
+ u32 flags;
int err;
if (set->size && set->nelems == set->size)
if (nla[NFTA_SET_ELEM_KEY] == NULL)
return -EINVAL;
- elem.flags = 0;
+ nft_set_ext_prepare(&tmpl);
+
+ flags = 0;
if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
- elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
- if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+ flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+ if (flags & ~NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
if (!(set->flags & NFT_SET_INTERVAL) &&
- elem.flags & NFT_SET_ELEM_INTERVAL_END)
+ flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
+ if (flags != 0)
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
}
if (set->flags & NFT_SET_MAP) {
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
- !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+ !(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
if (nla[NFTA_SET_ELEM_DATA] != NULL &&
- elem.flags & NFT_SET_ELEM_INTERVAL_END)
+ flags & NFT_SET_ELEM_INTERVAL_END)
return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
if (set->ops->get(set, &elem) == 0)
goto err2;
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_KEY);
+
if (nla[NFTA_SET_ELEM_DATA] != NULL) {
- err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+ err = nft_data_init(ctx, &data, &d2, nla[NFTA_SET_ELEM_DATA]);
if (err < 0)
goto err2;
};
err = nft_validate_data_load(&bind_ctx, dreg,
- &elem.data, d2.type);
+ &data, d2.type);
if (err < 0)
goto err3;
}
+
+ nft_set_ext_add(&tmpl, NFT_SET_EXT_DATA);
}
+ err = -ENOMEM;
+ elem.priv = nft_set_elem_init(set, &tmpl, &elem.key, &data, GFP_KERNEL);
+ if (elem.priv == NULL)
+ goto err3;
+
+ ext = nft_set_elem_ext(set, elem.priv);
+ if (flags)
+ *nft_set_ext_flags(ext) = flags;
+
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
if (trans == NULL)
- goto err3;
+ goto err4;
err = set->ops->insert(set, &elem);
if (err < 0)
- goto err4;
+ goto err5;
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
-err4:
+err5:
kfree(trans);
+err4:
+ kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
- nft_data_uninit(&elem.data, d2.type);
+ nft_data_uninit(&data, d2.type);
err2:
nft_data_uninit(&elem.key, d1.type);
err1:
struct net *net = sock_net(skb->sk);
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
+ struct nft_set_ext *ext;
/* Bump generation counter, invalidate any dump in progress */
while (++net->nft.base_seq == 0);
break;
case NFT_MSG_DELSETELEM:
te = (struct nft_trans_elem *)trans->data;
+ ext = nft_set_elem_ext(te->set, te->elem.priv);
+
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
NFT_MSG_DELSETELEM, 0);
te->set->ops->get(te->set, &te->elem);
nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
- if (te->set->flags & NFT_SET_MAP &&
- !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_uninit(&te->elem.data, te->set->dtype);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ nft_data_uninit(nft_set_ext_data(ext),
+ te->set->dtype);
te->set->ops->remove(te->set, &te->elem);
nft_trans_destroy(trans);
break;
struct net *net = sock_net(skb->sk);
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
+ struct nft_set_ext *ext;
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
switch (trans->msg_type) {
case NFT_MSG_NEWSETELEM:
nft_trans_elem_set(trans)->nelems--;
te = (struct nft_trans_elem *)trans->data;
+ ext = nft_set_elem_ext(te->set, te->elem.priv);
+
te->set->ops->get(te->set, &te->elem);
nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
- if (te->set->flags & NFT_SET_MAP &&
- !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_uninit(&te->elem.data, te->set->dtype);
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
+ nft_data_uninit(nft_set_ext_data(ext),
+ te->set->dtype);
te->set->ops->remove(te->set, &te->elem);
nft_trans_destroy(trans);
break;
const struct nft_set_iter *iter,
const struct nft_set_elem *elem)
{
- if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
+ const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ const struct nft_data *data;
+
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+ *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
return 0;
- switch (elem->data.verdict) {
+ data = nft_set_ext_data(ext);
+ switch (data->verdict) {
case NFT_JUMP:
case NFT_GOTO:
- return nf_tables_check_loops(ctx, elem->data.chain);
+ return nf_tables_check_loops(ctx, data->chain);
default:
return 0;
}
struct nft_hash_elem {
struct rhash_head node;
- struct nft_data key;
- struct nft_data data[];
+ struct nft_set_ext ext;
};
struct nft_hash_cmp_arg {
{
const struct nft_hash_elem *he = data;
- return jhash(&he->key, len, seed);
+ return jhash(nft_set_ext_key(&he->ext), len, seed);
}
static inline int nft_hash_cmp(struct rhashtable_compare_arg *arg,
const struct nft_hash_cmp_arg *x = arg->key;
const struct nft_hash_elem *he = ptr;
- if (nft_data_cmp(&he->key, x->key, x->set->klen))
+ if (nft_data_cmp(nft_set_ext_key(&he->ext), x->key, x->set->klen))
return 1;
return 0;
}
he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
if (he && set->flags & NFT_SET_MAP)
- nft_data_copy(data, he->data);
+ nft_data_copy(data, nft_set_ext_data(&he->ext));
return !!he;
}
const struct nft_set_elem *elem)
{
struct nft_hash *priv = nft_set_priv(set);
- struct nft_hash_elem *he;
+ struct nft_hash_elem *he = elem->priv;
struct nft_hash_cmp_arg arg = {
.set = set,
.key = &elem->key,
};
- unsigned int size;
- int err;
-
- if (elem->flags != 0)
- return -EINVAL;
-
- size = sizeof(*he);
- if (set->flags & NFT_SET_MAP)
- size += sizeof(he->data[0]);
-
- he = kzalloc(size, GFP_KERNEL);
- if (he == NULL)
- return -ENOMEM;
-
- nft_data_copy(&he->key, &elem->key);
- if (set->flags & NFT_SET_MAP)
- nft_data_copy(he->data, &elem->data);
-
- err = rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
- nft_hash_params);
- if (err)
- kfree(he);
- return err;
+ return rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node,
+ nft_hash_params);
}
static void nft_hash_elem_destroy(const struct nft_set *set,
struct nft_hash_elem *he)
{
- nft_data_uninit(&he->key, NFT_DATA_VALUE);
+ nft_data_uninit(nft_set_ext_key(&he->ext), NFT_DATA_VALUE);
if (set->flags & NFT_SET_MAP)
- nft_data_uninit(he->data, set->dtype);
+ nft_data_uninit(nft_set_ext_data(&he->ext), set->dtype);
kfree(he);
}
if (!he)
return -ENOENT;
- elem->cookie = he;
- elem->flags = 0;
- if (set->flags & NFT_SET_MAP)
- nft_data_copy(&elem->data, he->data);
+ elem->priv = he;
return 0;
}
struct nft_set_iter *iter)
{
struct nft_hash *priv = nft_set_priv(set);
- const struct nft_hash_elem *he;
+ struct nft_hash_elem *he;
struct rhashtable_iter hti;
struct nft_set_elem elem;
int err;
if (iter->count < iter->skip)
goto cont;
- memcpy(&elem.key, &he->key, sizeof(elem.key));
- if (set->flags & NFT_SET_MAP)
- memcpy(&elem.data, he->data, sizeof(elem.data));
- elem.flags = 0;
+ elem.priv = he;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0)
unsigned int esize;
esize = sizeof(struct nft_hash_elem);
- if (features & NFT_SET_MAP)
- esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
-
if (desc->size) {
est->size = sizeof(struct nft_hash) +
roundup_pow_of_two(desc->size * 4 / 3) *
static struct nft_set_ops nft_hash_ops __read_mostly = {
.privsize = nft_hash_privsize,
+ .elemsize = offsetof(struct nft_hash_elem, ext),
.estimate = nft_hash_estimate,
.init = nft_hash_init,
.destroy = nft_hash_destroy,
struct nft_rbtree_elem {
struct rb_node node;
- u16 flags;
- struct nft_data key;
- struct nft_data data[];
+ struct nft_set_ext ext;
};
static bool nft_rbtree_lookup(const struct nft_set *set,
while (parent != NULL) {
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = nft_data_cmp(&rbe->key, key, set->klen);
+ d = nft_data_cmp(nft_set_ext_key(&rbe->ext), key, set->klen);
if (d < 0) {
parent = parent->rb_left;
interval = rbe;
parent = parent->rb_right;
else {
found:
- if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+ if (nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
+ *nft_set_ext_flags(&rbe->ext) &
+ NFT_SET_ELEM_INTERVAL_END)
goto out;
if (set->flags & NFT_SET_MAP)
- nft_data_copy(data, rbe->data);
+ nft_data_copy(data, nft_set_ext_data(&rbe->ext));
spin_unlock_bh(&nft_rbtree_lock);
return true;
static void nft_rbtree_elem_destroy(const struct nft_set *set,
struct nft_rbtree_elem *rbe)
{
- nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
+ nft_data_uninit(nft_set_ext_key(&rbe->ext), NFT_DATA_VALUE);
if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_uninit(rbe->data, set->dtype);
+ nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_DATA))
+ nft_data_uninit(nft_set_ext_data(&rbe->ext), set->dtype);
kfree(rbe);
}
while (*p != NULL) {
parent = *p;
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+ d = nft_data_cmp(nft_set_ext_key(&rbe->ext),
+ nft_set_ext_key(&new->ext),
+ set->klen);
if (d < 0)
p = &parent->rb_left;
else if (d > 0)
static int nft_rbtree_insert(const struct nft_set *set,
const struct nft_set_elem *elem)
{
- struct nft_rbtree_elem *rbe;
- unsigned int size;
+ struct nft_rbtree_elem *rbe = elem->priv;
int err;
- size = sizeof(*rbe);
- if (set->flags & NFT_SET_MAP &&
- !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
- size += sizeof(rbe->data[0]);
-
- rbe = kzalloc(size, GFP_KERNEL);
- if (rbe == NULL)
- return -ENOMEM;
-
- rbe->flags = elem->flags;
- nft_data_copy(&rbe->key, &elem->key);
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(rbe->data, &elem->data);
-
spin_lock_bh(&nft_rbtree_lock);
err = __nft_rbtree_insert(set, rbe);
- if (err < 0)
- kfree(rbe);
-
spin_unlock_bh(&nft_rbtree_lock);
+
return err;
}
while (parent != NULL) {
rbe = rb_entry(parent, struct nft_rbtree_elem, node);
- d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+ d = nft_data_cmp(nft_set_ext_key(&rbe->ext), &elem->key,
+ set->klen);
if (d < 0)
parent = parent->rb_left;
else if (d > 0)
parent = parent->rb_right;
else {
elem->cookie = rbe;
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(&elem->data, rbe->data);
- elem->flags = rbe->flags;
+ elem->priv = rbe;
return 0;
}
}
struct nft_set_iter *iter)
{
const struct nft_rbtree *priv = nft_set_priv(set);
- const struct nft_rbtree_elem *rbe;
+ struct nft_rbtree_elem *rbe;
struct nft_set_elem elem;
struct rb_node *node;
goto cont;
rbe = rb_entry(node, struct nft_rbtree_elem, node);
- nft_data_copy(&elem.key, &rbe->key);
- if (set->flags & NFT_SET_MAP &&
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(&elem.data, rbe->data);
- elem.flags = rbe->flags;
+ elem.priv = rbe;
iter->err = iter->fn(ctx, set, iter, &elem);
if (iter->err < 0) {
unsigned int nsize;
nsize = sizeof(struct nft_rbtree_elem);
- if (features & NFT_SET_MAP)
- nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
-
if (desc->size)
est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
else
static struct nft_set_ops nft_rbtree_ops __read_mostly = {
.privsize = nft_rbtree_privsize,
+ .elemsize = offsetof(struct nft_rbtree_elem, ext),
.estimate = nft_rbtree_estimate,
.init = nft_rbtree_init,
.destroy = nft_rbtree_destroy,