struct fl_flow_key key;
struct list_head list;
u32 handle;
+ u32 flags;
struct rcu_head rcu;
};
struct fl_flow_key skb_key;
struct fl_flow_key skb_mkey;
+ if (!atomic_read(&head->ht.nelems))
+ return -1;
+
fl_clear_masked_range(&skb_key, &head->mask);
skb_key.indev_ifindex = skb->skb_iif;
/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
f = rhashtable_lookup_fast(&head->ht,
fl_key_get_start(&skb_mkey, &head->mask),
head->ht_params);
- if (f) {
+ if (f && !(f->flags & TCA_CLS_FLAGS_SKIP_SW)) {
*res = f->res;
return tcf_exts_exec(skb, &f->exts, res);
}
struct cls_fl_filter *fnew;
struct nlattr *tb[TCA_FLOWER_MAX + 1];
struct fl_flow_mask mask = {};
- u32 flags = 0;
int err;
if (!tca[TCA_OPTIONS])
}
fnew->handle = handle;
- if (tb[TCA_FLOWER_FLAGS])
- flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+ if (tb[TCA_FLOWER_FLAGS]) {
+ fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
+ if (!tc_flags_valid(fnew->flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
if (err)
if (err)
goto errout;
- err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
- head->ht_params);
- if (err)
- goto errout;
+ if (!(fnew->flags & TCA_CLS_FLAGS_SKIP_SW)) {
+ err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
+ head->ht_params);
+ if (err)
+ goto errout;
+ }
fl_hw_replace_filter(tp,
&head->dissector,
&fnew->key,
&fnew->exts,
(unsigned long)fnew,
- flags);
+ fnew->flags);
if (fold) {
rhashtable_remove_fast(&head->ht, &fold->ht_node,
sizeof(key->tp.dst))))
goto nla_put_failure;
+ nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
+
if (tcf_exts_dump(skb, &f->exts))
goto nla_put_failure;