#define MAX_PEDIT_OFFSETS 32
+#ifndef TCM_IFINDEX_MAGIC_BLOCK
+#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU)
+#endif
+
+#if TCA_MAX < 14
+#define TCA_INGRESS_BLOCK 13
+#endif
+
VLOG_DEFINE_THIS_MODULE(tc);
static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
struct flower_key_to_pedit {
enum pedit_header_type htype;
- int flower_offset;
int offset;
+ int flower_offset;
int size;
};
},
};
+static inline int
+csum_update_flag(struct tc_flower *flower,
+ enum pedit_header_type htype);
+
struct tcmsg *
tc_make_request(int ifindex, int type, unsigned int flags,
struct ofpbuf *request)
* The configuration and stats may be seen with the following command:
* /sbin/tc -s qdisc show dev <devname>
*
+ * If block_id is greater than 0, then the ingress qdisc is added to a block.
+ * In this case, it is equivalent to running (when 'add' is true):
+ * /sbin/tc qdisc add dev <devname> ingress_block <block_id> ingress
+ *
* Returns 0 if successful, otherwise a positive errno value.
*/
int
-tc_add_del_ingress_qdisc(int ifindex, bool add)
+tc_add_del_ingress_qdisc(int ifindex, bool add, uint32_t block_id)
{
struct ofpbuf request;
struct tcmsg *tcmsg;
tcmsg->tcm_parent = TC_H_INGRESS;
nl_msg_put_string(&request, TCA_KIND, "ingress");
nl_msg_put_unspec(&request, TCA_OPTIONS, NULL, 0);
+ if (block_id) {
+ nl_msg_put_u32(&request, TCA_INGRESS_BLOCK, block_id);
+ }
error = tc_transact(&request, NULL);
if (error) {
.optional = true, },
[TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NL_A_U16,
.optional = true, },
+ [TCA_FLOWER_KEY_FLAGS] = { .type = NL_A_BE32, .optional = true, },
+ [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NL_A_BE32, .optional = true, },
[TCA_FLOWER_KEY_IP_TTL] = { .type = NL_A_U8,
.optional = true, },
[TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NL_A_U8,
.optional = true, },
+ [TCA_FLOWER_KEY_IP_TOS] = { .type = NL_A_U8,
+ .optional = true, },
+ [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NL_A_U8,
+ .optional = true, },
[TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NL_A_U16,
.optional = true, },
[TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NL_A_U16,
.optional = true, },
+ [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NL_A_U16, .optional = true, },
+ [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NL_A_U8, .optional = true, },
+ [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NL_A_U16, .optional = true, },
};
static void
static void
nl_parse_flower_vlan(struct nlattr **attrs, struct tc_flower *flower)
{
- if (flower->key.eth_type != htons(ETH_TYPE_VLAN)) {
+ ovs_be16 encap_ethtype;
+
+ if (!eth_type_vlan(flower->key.eth_type)) {
return;
}
- flower->key.encap_eth_type =
+ flower->key.encap_eth_type[0] =
nl_attr_get_be16(attrs[TCA_FLOWER_KEY_ETH_TYPE]);
if (attrs[TCA_FLOWER_KEY_VLAN_ID]) {
- flower->key.vlan_id =
+ flower->key.vlan_id[0] =
nl_attr_get_u16(attrs[TCA_FLOWER_KEY_VLAN_ID]);
}
if (attrs[TCA_FLOWER_KEY_VLAN_PRIO]) {
- flower->key.vlan_prio =
+ flower->key.vlan_prio[0] =
nl_attr_get_u8(attrs[TCA_FLOWER_KEY_VLAN_PRIO]);
}
+
+ if (!attrs[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
+ return;
+ }
+
+ encap_ethtype = nl_attr_get_be16(attrs[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
+ if (!eth_type_vlan(encap_ethtype)) {
+ return;
+ }
+
+ flower->key.encap_eth_type[1] = flower->key.encap_eth_type[0];
+ flower->key.encap_eth_type[0] = encap_ethtype;
+
+ if (attrs[TCA_FLOWER_KEY_CVLAN_ID]) {
+ flower->key.vlan_id[1] =
+ nl_attr_get_u16(attrs[TCA_FLOWER_KEY_CVLAN_ID]);
+ }
+ if (attrs[TCA_FLOWER_KEY_CVLAN_PRIO]) {
+ flower->key.vlan_prio[1] =
+ nl_attr_get_u8(attrs[TCA_FLOWER_KEY_CVLAN_PRIO]);
+ }
}
static void
mask->ip_proto = UINT8_MAX;
}
+ if (attrs[TCA_FLOWER_KEY_FLAGS_MASK]) {
+ key->flags = ntohl(nl_attr_get_be32(attrs[TCA_FLOWER_KEY_FLAGS]));
+ mask->flags =
+ ntohl(nl_attr_get_be32(attrs[TCA_FLOWER_KEY_FLAGS_MASK]));
+ }
+
if (attrs[TCA_FLOWER_KEY_IPV4_SRC_MASK]) {
key->ipv4.ipv4_src =
nl_attr_get_be32(attrs[TCA_FLOWER_KEY_IPV4_SRC]);
key->ip_ttl = nl_attr_get_u8(attrs[TCA_FLOWER_KEY_IP_TTL]);
mask->ip_ttl = nl_attr_get_u8(attrs[TCA_FLOWER_KEY_IP_TTL_MASK]);
}
+
+ if (attrs[TCA_FLOWER_KEY_IP_TOS_MASK]) {
+ key->ip_tos = nl_attr_get_u8(attrs[TCA_FLOWER_KEY_IP_TOS]);
+ mask->ip_tos = nl_attr_get_u8(attrs[TCA_FLOWER_KEY_IP_TOS_MASK]);
+ }
+}
+
+static enum tc_offloaded_state
+nl_get_flower_offloaded_state(struct nlattr **attrs)
+{
+ uint32_t flower_flags = 0;
+
+ if (attrs[TCA_FLOWER_FLAGS]) {
+ flower_flags = nl_attr_get_u32(attrs[TCA_FLOWER_FLAGS]);
+ if (flower_flags & TCA_CLS_FLAGS_NOT_IN_HW) {
+ return TC_OFFLOADED_STATE_NOT_IN_HW;
+ } else if (flower_flags & TCA_CLS_FLAGS_IN_HW) {
+ return TC_OFFLOADED_STATE_IN_HW;
+ }
+ }
+ return TC_OFFLOADED_STATE_UNDEFINED;
+}
+
+static void
+nl_parse_flower_flags(struct nlattr **attrs, struct tc_flower *flower)
+{
+ flower->offloaded_state = nl_get_flower_offloaded_state(attrs);
}
static const struct nl_policy pedit_policy[] = {
static int
nl_parse_act_pedit(struct nlattr *options, struct tc_flower *flower)
{
+ struct tc_action *action;
struct nlattr *pe_attrs[ARRAY_SIZE(pedit_policy)];
const struct tc_pedit *pe;
const struct tc_pedit_key *keys;
char *rewrite_key = (void *) &flower->rewrite.key;
char *rewrite_mask = (void *) &flower->rewrite.mask;
size_t keys_ex_size, left;
- int type, i = 0;
+ int type, i = 0, err;
if (!nl_parse_nested(options, pedit_policy, pe_attrs,
ARRAY_SIZE(pedit_policy))) {
break;
}
- if (nl_attr_type(nla) == TCA_PEDIT_KEY_EX) {
+ if (nl_attr_type(nla) != TCA_PEDIT_KEY_EX) {
VLOG_ERR_RL(&error_rl, "unable to parse legacy pedit type: %d",
nl_attr_type(nla));
return EOPNOTSUPP;
ex_type = nl_attr_find_nested(nla, TCA_PEDIT_KEY_EX_HTYPE);
type = nl_attr_get_u16(ex_type);
+ err = csum_update_flag(flower, type);
+ if (err) {
+ return err;
+ }
+
for (int j = 0; j < ARRAY_SIZE(flower_pedit_map); j++) {
struct flower_key_to_pedit *m = &flower_pedit_map[j];
int flower_off = m->flower_offset;
i++;
}
- flower->rewrite.rewrite = true;
+ action = &flower->actions[flower->action_count++];
+ action->type = TC_ACT_PEDIT;
return 0;
}
struct nlattr *tun_attrs[ARRAY_SIZE(tunnel_key_policy)];
const struct nlattr *tun_parms;
const struct tc_tunnel_key *tun;
+ struct tc_action *action;
if (!nl_parse_nested(options, tunnel_key_policy, tun_attrs,
ARRAY_SIZE(tunnel_key_policy))) {
struct nlattr *ipv6_src = tun_attrs[TCA_TUNNEL_KEY_ENC_IPV6_SRC];
struct nlattr *ipv6_dst = tun_attrs[TCA_TUNNEL_KEY_ENC_IPV6_DST];
- flower->set.set = true;
- flower->set.ipv4.ipv4_src = ipv4_src ? nl_attr_get_be32(ipv4_src) : 0;
- flower->set.ipv4.ipv4_dst = ipv4_dst ? nl_attr_get_be32(ipv4_dst) : 0;
+ action = &flower->actions[flower->action_count++];
+ action->type = TC_ACT_ENCAP;
+ action->encap.ipv4.ipv4_src = ipv4_src ? nl_attr_get_be32(ipv4_src) : 0;
+ action->encap.ipv4.ipv4_dst = ipv4_dst ? nl_attr_get_be32(ipv4_dst) : 0;
if (ipv6_src) {
- flower->set.ipv6.ipv6_src = nl_attr_get_in6_addr(ipv6_src);
+ action->encap.ipv6.ipv6_src = nl_attr_get_in6_addr(ipv6_src);
}
if (ipv6_dst) {
- flower->set.ipv6.ipv6_dst = nl_attr_get_in6_addr(ipv6_dst);
+ action->encap.ipv6.ipv6_dst = nl_attr_get_in6_addr(ipv6_dst);
}
- flower->set.id = id ? be32_to_be64(nl_attr_get_be32(id)) : 0;
- flower->set.tp_dst = dst_port ? nl_attr_get_be16(dst_port) : 0;
+ action->encap.id = id ? be32_to_be64(nl_attr_get_be32(id)) : 0;
+ action->encap.tp_dst = dst_port ? nl_attr_get_be16(dst_port) : 0;
} else if (tun->t_action == TCA_TUNNEL_KEY_ACT_RELEASE) {
flower->tunnel.tunnel = true;
} else {
const struct nlattr *mirred_parms;
const struct tcf_t *tm;
struct nlattr *mirred_tm;
+ struct tc_action *action;
if (!nl_parse_nested(options, mirred_policy, mirred_attrs,
ARRAY_SIZE(mirred_policy))) {
mirred_parms = mirred_attrs[TCA_MIRRED_PARMS];
m = nl_attr_get_unspec(mirred_parms, sizeof *m);
- if (m->action != TC_ACT_STOLEN || m->eaction != TCA_EGRESS_REDIR) {
+ if (m->eaction != TCA_EGRESS_REDIR && m->eaction != TCA_EGRESS_MIRROR) {
VLOG_ERR_RL(&error_rl, "unknown mirred action: %d, %d, %d",
- m->action, m->eaction, m->ifindex);
+ m->action, m->eaction, m->ifindex);
return EINVAL;
}
- flower->ifindex_out = m->ifindex;
+ action = &flower->actions[flower->action_count++];
+ action->ifindex_out = m->ifindex;
+ action->type = TC_ACT_OUTPUT;
mirred_tm = mirred_attrs[TCA_MIRRED_TM];
tm = nl_attr_get_unspec(mirred_tm, sizeof *tm);
struct nlattr *vlan_attrs[ARRAY_SIZE(vlan_policy)];
const struct tc_vlan *v;
const struct nlattr *vlan_parms;
+ struct tc_action *action;
if (!nl_parse_nested(options, vlan_policy, vlan_attrs,
ARRAY_SIZE(vlan_policy))) {
return EPROTO;
}
+ action = &flower->actions[flower->action_count++];
vlan_parms = vlan_attrs[TCA_VLAN_PARMS];
v = nl_attr_get_unspec(vlan_parms, sizeof *v);
if (v->v_action == TCA_VLAN_ACT_PUSH) {
+ struct nlattr *vlan_tpid = vlan_attrs[TCA_VLAN_PUSH_VLAN_PROTOCOL];
struct nlattr *vlan_id = vlan_attrs[TCA_VLAN_PUSH_VLAN_ID];
struct nlattr *vlan_prio = vlan_attrs[TCA_VLAN_PUSH_VLAN_PRIORITY];
- flower->vlan_push_id = nl_attr_get_u16(vlan_id);
- flower->vlan_push_prio = vlan_prio ? nl_attr_get_u8(vlan_prio) : 0;
+ action->vlan.vlan_push_tpid = nl_attr_get_be16(vlan_tpid);
+ action->vlan.vlan_push_id = nl_attr_get_u16(vlan_id);
+ action->vlan.vlan_push_prio = vlan_prio ? nl_attr_get_u8(vlan_prio) : 0;
+ action->type = TC_ACT_VLAN_PUSH;
} else if (v->v_action == TCA_VLAN_ACT_POP) {
- flower->vlan_pop = 1;
+ action->type = TC_ACT_VLAN_POP;
} else {
VLOG_ERR_RL(&error_rl, "unknown vlan action: %d, %d",
v->action, v->v_action);
return 0;
}
+static const struct nl_policy csum_policy[] = {
+ [TCA_CSUM_PARMS] = { .type = NL_A_UNSPEC,
+ .min_len = sizeof(struct tc_csum),
+ .optional = false, },
+};
+
+static int
+nl_parse_act_csum(struct nlattr *options, struct tc_flower *flower)
+{
+ struct nlattr *csum_attrs[ARRAY_SIZE(csum_policy)];
+ const struct tc_csum *c;
+ const struct nlattr *csum_parms;
+
+ if (!nl_parse_nested(options, csum_policy, csum_attrs,
+ ARRAY_SIZE(csum_policy))) {
+ VLOG_ERR_RL(&error_rl, "failed to parse csum action options");
+ return EPROTO;
+ }
+
+ csum_parms = csum_attrs[TCA_CSUM_PARMS];
+ c = nl_attr_get_unspec(csum_parms, sizeof *c);
+
+ /* sanity checks */
+ if (c->update_flags != flower->csum_update_flags) {
+ VLOG_WARN_RL(&error_rl,
+ "expected different act csum flags: 0x%x != 0x%x",
+ flower->csum_update_flags, c->update_flags);
+ return EINVAL;
+ }
+ flower->csum_update_flags = 0; /* so we know csum was handled */
+
+ if (flower->needs_full_ip_proto_mask
+ && flower->mask.ip_proto != UINT8_MAX) {
+ VLOG_WARN_RL(&error_rl, "expected full matching on flower ip_proto");
+ return EINVAL;
+ }
+
+ return 0;
+}
+
static const struct nl_policy act_policy[] = {
[TCA_ACT_KIND] = { .type = NL_A_STRING, .optional = false, },
[TCA_ACT_COOKIE] = { .type = NL_A_UNSPEC, .optional = true, },
struct nlattr *stats_attrs[ARRAY_SIZE(stats_policy)];
struct ovs_flow_stats *stats = &flower->stats;
const struct gnet_stats_basic *bs;
+ int err = 0;
if (!nl_parse_nested(action, act_policy, action_attrs,
ARRAY_SIZE(act_policy))) {
act_cookie = action_attrs[TCA_ACT_COOKIE];
if (!strcmp(act_kind, "gact")) {
- nl_parse_act_drop(act_options, flower);
+ err = nl_parse_act_drop(act_options, flower);
} else if (!strcmp(act_kind, "mirred")) {
- nl_parse_act_mirred(act_options, flower);
+ err = nl_parse_act_mirred(act_options, flower);
} else if (!strcmp(act_kind, "vlan")) {
- nl_parse_act_vlan(act_options, flower);
+ err = nl_parse_act_vlan(act_options, flower);
} else if (!strcmp(act_kind, "tunnel_key")) {
- nl_parse_act_tunnel_key(act_options, flower);
+ err = nl_parse_act_tunnel_key(act_options, flower);
} else if (!strcmp(act_kind, "pedit")) {
- nl_parse_act_pedit(act_options, flower);
+ err = nl_parse_act_pedit(act_options, flower);
} else if (!strcmp(act_kind, "csum")) {
- /* not doing anything for now, ovs has an implicit csum recalculation
- * with rewriting of packet headers (translating of pedit acts). */
+ nl_parse_act_csum(act_options, flower);
} else {
VLOG_ERR_RL(&error_rl, "unknown tc action kind: %s", act_kind);
- return EINVAL;
+ err = EINVAL;
+ }
+
+ if (err) {
+ return err;
}
if (act_cookie) {
for (int i = TCA_ACT_MIN_PRIO; i < max_size; i++) {
if (actions_orders[i]) {
- int err = nl_parse_single_action(actions_orders[i], flower);
+ int err;
+
+ if (flower->action_count >= TCA_ACT_MAX_PRIO) {
+ VLOG_DBG_RL(&error_rl, "Can only support %d actions", flower->action_count);
+ return EOPNOTSUPP;
+ }
+ err = nl_parse_single_action(actions_orders[i], flower);
if (err) {
return err;
}
}
+ if (flower->csum_update_flags) {
+ VLOG_WARN_RL(&error_rl,
+ "expected act csum with flags: 0x%x",
+ flower->csum_update_flags);
+ return EINVAL;
+ }
+
return 0;
}
nl_parse_flower_vlan(attrs, flower);
nl_parse_flower_ip(attrs, flower);
nl_parse_flower_tunnel(attrs, flower);
+ nl_parse_flower_flags(attrs, flower);
return nl_parse_flower_actions(attrs, flower);
}
kind = nl_attr_get_string(ta[TCA_KIND]);
if (strcmp(kind, "flower")) {
- VLOG_ERR_RL(&error_rl, "failed to parse filter: %s", kind);
+ VLOG_DBG_ONCE("Unsupported filter: %s", kind);
return EPROTO;
}
}
int
-tc_dump_flower_start(int ifindex, struct nl_dump *dump)
+tc_dump_flower_start(int ifindex, struct nl_dump *dump, uint32_t block_id)
{
struct ofpbuf request;
struct tcmsg *tcmsg;
+ int index;
- tcmsg = tc_make_request(ifindex, RTM_GETTFILTER, NLM_F_DUMP, &request);
- tcmsg->tcm_parent = TC_INGRESS_PARENT;
+ index = block_id ? TCM_IFINDEX_MAGIC_BLOCK : ifindex;
+ tcmsg = tc_make_request(index, RTM_GETTFILTER, NLM_F_DUMP, &request);
+ tcmsg->tcm_parent = block_id ? : TC_INGRESS_PARENT;
tcmsg->tcm_info = TC_H_UNSPEC;
tcmsg->tcm_handle = 0;
}
int
-tc_flush(int ifindex)
+tc_flush(int ifindex, uint32_t block_id)
{
struct ofpbuf request;
struct tcmsg *tcmsg;
+ int index;
- tcmsg = tc_make_request(ifindex, RTM_DELTFILTER, NLM_F_ACK, &request);
- tcmsg->tcm_parent = TC_INGRESS_PARENT;
+ index = block_id ? TCM_IFINDEX_MAGIC_BLOCK : ifindex;
+ tcmsg = tc_make_request(index, RTM_DELTFILTER, NLM_F_ACK, &request);
+ tcmsg->tcm_parent = block_id ? : TC_INGRESS_PARENT;
tcmsg->tcm_info = TC_H_UNSPEC;
return tc_transact(&request, NULL);
}
int
-tc_del_filter(int ifindex, int prio, int handle)
+tc_del_filter(int ifindex, int prio, int handle, uint32_t block_id)
{
struct ofpbuf request;
struct tcmsg *tcmsg;
struct ofpbuf *reply;
int error;
+ int index;
- tcmsg = tc_make_request(ifindex, RTM_DELTFILTER, NLM_F_ECHO, &request);
- tcmsg->tcm_parent = TC_INGRESS_PARENT;
+ index = block_id ? TCM_IFINDEX_MAGIC_BLOCK : ifindex;
+ tcmsg = tc_make_request(index, RTM_DELTFILTER, NLM_F_ECHO, &request);
+ tcmsg->tcm_parent = block_id ? : TC_INGRESS_PARENT;
tcmsg->tcm_info = tc_make_handle(prio, 0);
tcmsg->tcm_handle = handle;
}
int
-tc_get_flower(int ifindex, int prio, int handle, struct tc_flower *flower)
+tc_get_flower(int ifindex, int prio, int handle, struct tc_flower *flower,
+ uint32_t block_id)
{
struct ofpbuf request;
struct tcmsg *tcmsg;
struct ofpbuf *reply;
int error;
+ int index;
- tcmsg = tc_make_request(ifindex, RTM_GETTFILTER, NLM_F_ECHO, &request);
- tcmsg->tcm_parent = TC_INGRESS_PARENT;
+ index = block_id ? TCM_IFINDEX_MAGIC_BLOCK : ifindex;
+ tcmsg = tc_make_request(index, RTM_GETTFILTER, NLM_F_ECHO, &request);
+ tcmsg->tcm_parent = block_id ? : TC_INGRESS_PARENT;
tcmsg->tcm_info = tc_make_handle(prio, 0);
tcmsg->tcm_handle = handle;
nl_msg_put_act_pedit(struct ofpbuf *request, struct tc_pedit *parm,
struct tc_pedit_key_ex *ex)
{
- size_t ksize = sizeof *parm + (parm->nkeys * sizeof(struct tc_pedit_key));
+ size_t ksize = sizeof *parm + parm->nkeys * sizeof(struct tc_pedit_key);
size_t offset, offset_keys_ex, offset_key;
int i;
}
static void
-nl_msg_put_act_push_vlan(struct ofpbuf *request, uint16_t vid, uint8_t prio)
+nl_msg_put_act_push_vlan(struct ofpbuf *request, ovs_be16 tpid,
+ uint16_t vid, uint8_t prio)
{
size_t offset;
.v_action = TCA_VLAN_ACT_PUSH };
nl_msg_put_unspec(request, TCA_VLAN_PARMS, &parm, sizeof parm);
+ nl_msg_put_be16(request, TCA_VLAN_PUSH_VLAN_PROTOCOL, tpid);
nl_msg_put_u16(request, TCA_VLAN_PUSH_VLAN_ID, vid);
nl_msg_put_u8(request, TCA_VLAN_PUSH_VLAN_PRIORITY, prio);
}
}
static void
-nl_msg_put_act_redirect(struct ofpbuf *request, int ifindex)
+nl_msg_put_act_mirred(struct ofpbuf *request, int ifindex, int action,
+ int eaction)
{
size_t offset;
nl_msg_put_string(request, TCA_ACT_KIND, "mirred");
offset = nl_msg_start_nested(request, TCA_ACT_OPTIONS);
{
- struct tc_mirred m = { .action = TC_ACT_STOLEN,
- .eaction = TCA_EGRESS_REDIR,
+ struct tc_mirred m = { .action = action,
+ .eaction = eaction,
.ifindex = ifindex };
nl_msg_put_unspec(request, TCA_MIRRED_PARMS, &m, sizeof m);
*mask = (void *) (rewrite_mask + m->flower_offset - diff);
}
-static inline void
+static inline int
csum_update_flag(struct tc_flower *flower,
enum pedit_header_type htype) {
- if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4) {
+ /* Explictily specifiy the csum flags so HW can return EOPNOTSUPP
+ * if it doesn't support a checksum recalculation of some headers.
+ * And since OVS allows a flow such as
+ * eth(dst=<mac>),eth_type(0x0800) actions=set(ipv4(src=<new_ip>))
+ * we need to force a more specific flow as this can, for example,
+ * need a recalculation of icmp checksum if the packet that passes
+ * is ICMPv6 and tcp checksum if its tcp. */
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
flower->csum_update_flags |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
- }
- if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
- || htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
- || htype == TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
- || htype == TCA_PEDIT_KEY_EX_HDR_TYPE_UDP) {
+ /* Fall through. */
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
if (flower->key.ip_proto == IPPROTO_TCP) {
- flower->mask.ip_proto = UINT8_MAX;
+ flower->needs_full_ip_proto_mask = true;
flower->csum_update_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
} else if (flower->key.ip_proto == IPPROTO_UDP) {
- flower->mask.ip_proto = UINT8_MAX;
+ flower->needs_full_ip_proto_mask = true;
flower->csum_update_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
- } else if (flower->key.ip_proto == IPPROTO_ICMP
- || flower->key.ip_proto == IPPROTO_ICMPV6) {
- flower->mask.ip_proto = UINT8_MAX;
+ } else if (flower->key.ip_proto == IPPROTO_ICMP) {
+ flower->needs_full_ip_proto_mask = true;
+ } else if (flower->key.ip_proto == IPPROTO_ICMPV6) {
+ flower->needs_full_ip_proto_mask = true;
flower->csum_update_flags |= TCA_CSUM_UPDATE_FLAG_ICMP;
+ } else {
+ VLOG_WARN_RL(&error_rl,
+ "can't offload rewrite of IP/IPV6 with ip_proto: %d",
+ flower->key.ip_proto);
+ break;
}
+ /* Fall through. */
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ return 0; /* success */
+
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
+ case __PEDIT_HDR_TYPE_MAX:
+ default:
+ break;
}
+
+ return EOPNOTSUPP;
}
static int
.nkeys = 0
}
};
- int i, j;
+ int i, j, err;
for (i = 0; i < ARRAY_SIZE(flower_pedit_map); i++) {
struct flower_key_to_pedit *m = &flower_pedit_map[i];
pedit_key->mask = ~mask_word;
pedit_key->val = *data & mask_word;
sel.sel.nkeys++;
- csum_update_flag(flower, m->htype);
+
+ err = csum_update_flag(flower, m->htype);
+ if (err) {
+ return err;
+ }
+
+ if (flower->needs_full_ip_proto_mask) {
+ flower->mask.ip_proto = UINT8_MAX;
+ }
}
}
nl_msg_put_act_pedit(request, &sel.sel, sel.keys_ex);
{
size_t offset;
size_t act_offset;
+ uint16_t act_index = 1;
+ struct tc_action *action;
+ int i, ifindex = 0;
offset = nl_msg_start_nested(request, TCA_FLOWER_ACT);
{
- uint16_t act_index = 1;
int error;
- if (flower->rewrite.rewrite) {
- act_offset = nl_msg_start_nested(request, act_index++);
- error = nl_msg_put_flower_rewrite_pedits(request, flower);
- if (error) {
- return error;
- }
- nl_msg_end_nested(request, act_offset);
-
- act_offset = nl_msg_start_nested(request, act_index++);
- nl_msg_put_act_csum(request, flower->csum_update_flags);
- nl_msg_end_nested(request, act_offset);
- }
- if (flower->set.set) {
- act_offset = nl_msg_start_nested(request, act_index++);
- nl_msg_put_act_tunnel_key_set(request, flower->set.id,
- flower->set.ipv4.ipv4_src,
- flower->set.ipv4.ipv4_dst,
- &flower->set.ipv6.ipv6_src,
- &flower->set.ipv6.ipv6_dst,
- flower->set.tp_dst);
- nl_msg_end_nested(request, act_offset);
- }
if (flower->tunnel.tunnel) {
act_offset = nl_msg_start_nested(request, act_index++);
nl_msg_put_act_tunnel_key_release(request);
nl_msg_end_nested(request, act_offset);
}
- if (flower->vlan_pop) {
- act_offset = nl_msg_start_nested(request, act_index++);
- nl_msg_put_act_pop_vlan(request);
- nl_msg_end_nested(request, act_offset);
- }
- if (flower->vlan_push_id) {
- act_offset = nl_msg_start_nested(request, act_index++);
- nl_msg_put_act_push_vlan(request,
- flower->vlan_push_id,
- flower->vlan_push_prio);
- nl_msg_end_nested(request, act_offset);
- }
- if (flower->ifindex_out) {
- act_offset = nl_msg_start_nested(request, act_index++);
- nl_msg_put_act_redirect(request, flower->ifindex_out);
- nl_msg_put_act_cookie(request, &flower->act_cookie);
- nl_msg_end_nested(request, act_offset);
- } else {
- act_offset = nl_msg_start_nested(request, act_index++);
- nl_msg_put_act_drop(request);
- nl_msg_put_act_cookie(request, &flower->act_cookie);
- nl_msg_end_nested(request, act_offset);
+
+ action = flower->actions;
+ for (i = 0; i < flower->action_count; i++, action++) {
+ switch (action->type) {
+ case TC_ACT_PEDIT: {
+ act_offset = nl_msg_start_nested(request, act_index++);
+ error = nl_msg_put_flower_rewrite_pedits(request, flower);
+ if (error) {
+ return error;
+ }
+ nl_msg_end_nested(request, act_offset);
+
+ if (flower->csum_update_flags) {
+ act_offset = nl_msg_start_nested(request, act_index++);
+ nl_msg_put_act_csum(request, flower->csum_update_flags);
+ nl_msg_end_nested(request, act_offset);
+ }
+ }
+ break;
+ case TC_ACT_ENCAP: {
+ act_offset = nl_msg_start_nested(request, act_index++);
+ nl_msg_put_act_tunnel_key_set(request, action->encap.id,
+ action->encap.ipv4.ipv4_src,
+ action->encap.ipv4.ipv4_dst,
+ &action->encap.ipv6.ipv6_src,
+ &action->encap.ipv6.ipv6_dst,
+ action->encap.tp_dst);
+ nl_msg_end_nested(request, act_offset);
+ }
+ break;
+ case TC_ACT_VLAN_POP: {
+ act_offset = nl_msg_start_nested(request, act_index++);
+ nl_msg_put_act_pop_vlan(request);
+ nl_msg_end_nested(request, act_offset);
+ }
+ break;
+ case TC_ACT_VLAN_PUSH: {
+ act_offset = nl_msg_start_nested(request, act_index++);
+ nl_msg_put_act_push_vlan(request,
+ action->vlan.vlan_push_tpid,
+ action->vlan.vlan_push_id,
+ action->vlan.vlan_push_prio);
+ nl_msg_end_nested(request, act_offset);
+ }
+ break;
+ case TC_ACT_OUTPUT: {
+ ifindex = action->ifindex_out;
+ if (ifindex < 1) {
+ VLOG_ERR_RL(&error_rl, "%s: invalid ifindex: %d, type: %d",
+ __func__, ifindex, action->type);
+ return EINVAL;
+ }
+ act_offset = nl_msg_start_nested(request, act_index++);
+ if (i == flower->action_count - 1) {
+ nl_msg_put_act_mirred(request, ifindex, TC_ACT_STOLEN,
+ TCA_EGRESS_REDIR);
+ } else {
+ nl_msg_put_act_mirred(request, ifindex, TC_ACT_PIPE,
+ TCA_EGRESS_MIRROR);
+ }
+ nl_msg_put_act_cookie(request, &flower->act_cookie);
+ nl_msg_end_nested(request, act_offset);
+ }
+ break;
+ }
}
}
+ if (!ifindex) {
+ act_offset = nl_msg_start_nested(request, act_index++);
+ nl_msg_put_act_drop(request);
+ nl_msg_put_act_cookie(request, &flower->act_cookie);
+ nl_msg_end_nested(request, act_offset);
+ }
nl_msg_end_nested(request, offset);
return 0;
{
uint16_t host_eth_type = ntohs(flower->key.eth_type);
- bool is_vlan = (host_eth_type == ETH_TYPE_VLAN);
+ bool is_vlan = eth_type_vlan(flower->key.eth_type);
+ bool is_qinq = is_vlan && eth_type_vlan(flower->key.encap_eth_type[0]);
int err;
- /* need to parse acts first as some acts require changing the matching */
+ /* need to parse acts first as some acts require changing the matching
+ * see csum_update_flag() */
err = nl_msg_put_flower_acts(request, flower);
if (err) {
return err;
}
if (is_vlan) {
- host_eth_type = ntohs(flower->key.encap_eth_type);
+ if (is_qinq) {
+ host_eth_type = ntohs(flower->key.encap_eth_type[1]);
+ } else {
+ host_eth_type = ntohs(flower->key.encap_eth_type[0]);
+ }
}
FLOWER_PUT_MASKED_VALUE(dst_mac, TCA_FLOWER_KEY_ETH_DST);
FLOWER_PUT_MASKED_VALUE(src_mac, TCA_FLOWER_KEY_ETH_SRC);
if (host_eth_type == ETH_P_IP || host_eth_type == ETH_P_IPV6) {
+ FLOWER_PUT_MASKED_VALUE(ip_ttl, TCA_FLOWER_KEY_IP_TTL);
+ FLOWER_PUT_MASKED_VALUE(ip_tos, TCA_FLOWER_KEY_IP_TOS);
+
if (flower->mask.ip_proto && flower->key.ip_proto) {
nl_msg_put_u8(request, TCA_FLOWER_KEY_IP_PROTO,
flower->key.ip_proto);
}
+ if (flower->mask.flags) {
+ nl_msg_put_be32(request, TCA_FLOWER_KEY_FLAGS,
+ htonl(flower->key.flags));
+ nl_msg_put_be32(request, TCA_FLOWER_KEY_FLAGS_MASK,
+ htonl(flower->mask.flags));
+ }
+
if (flower->key.ip_proto == IPPROTO_UDP) {
FLOWER_PUT_MASKED_VALUE(udp_src, TCA_FLOWER_KEY_UDP_SRC);
FLOWER_PUT_MASKED_VALUE(udp_dst, TCA_FLOWER_KEY_UDP_DST);
if (host_eth_type == ETH_P_IP) {
FLOWER_PUT_MASKED_VALUE(ipv4.ipv4_src, TCA_FLOWER_KEY_IPV4_SRC);
FLOWER_PUT_MASKED_VALUE(ipv4.ipv4_dst, TCA_FLOWER_KEY_IPV4_DST);
- FLOWER_PUT_MASKED_VALUE(ip_ttl, TCA_FLOWER_KEY_IP_TTL);
} else if (host_eth_type == ETH_P_IPV6) {
FLOWER_PUT_MASKED_VALUE(ipv6.ipv6_src, TCA_FLOWER_KEY_IPV6_SRC);
FLOWER_PUT_MASKED_VALUE(ipv6.ipv6_dst, TCA_FLOWER_KEY_IPV6_DST);
nl_msg_put_be16(request, TCA_FLOWER_KEY_ETH_TYPE, flower->key.eth_type);
if (is_vlan) {
- if (flower->key.vlan_id || flower->key.vlan_prio) {
+ if (flower->key.vlan_id[0] || flower->key.vlan_prio[0]) {
nl_msg_put_u16(request, TCA_FLOWER_KEY_VLAN_ID,
- flower->key.vlan_id);
+ flower->key.vlan_id[0]);
nl_msg_put_u8(request, TCA_FLOWER_KEY_VLAN_PRIO,
- flower->key.vlan_prio);
+ flower->key.vlan_prio[0]);
}
- if (flower->key.encap_eth_type) {
+ if (flower->key.encap_eth_type[0]) {
nl_msg_put_be16(request, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
- flower->key.encap_eth_type);
+ flower->key.encap_eth_type[0]);
+ }
+
+ if (is_qinq) {
+ if (flower->key.vlan_id[1] || flower->key.vlan_prio[1]) {
+ nl_msg_put_u16(request, TCA_FLOWER_KEY_CVLAN_ID,
+ flower->key.vlan_id[1]);
+ nl_msg_put_u8(request, TCA_FLOWER_KEY_CVLAN_PRIO,
+ flower->key.vlan_prio[1]);
+ }
+ if (flower->key.encap_eth_type[1]) {
+ nl_msg_put_be16(request, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
+ flower->key.encap_eth_type[1]);
+ }
}
}
int
tc_replace_flower(int ifindex, uint16_t prio, uint32_t handle,
- struct tc_flower *flower)
+ struct tc_flower *flower, uint32_t block_id)
{
struct ofpbuf request;
struct tcmsg *tcmsg;
int error = 0;
size_t basic_offset;
uint16_t eth_type = (OVS_FORCE uint16_t) flower->key.eth_type;
+ int index;
- tcmsg = tc_make_request(ifindex, RTM_NEWTFILTER,
- NLM_F_CREATE | NLM_F_ECHO, &request);
- tcmsg->tcm_parent = TC_INGRESS_PARENT;
+ index = block_id ? TCM_IFINDEX_MAGIC_BLOCK : ifindex;
+ tcmsg = tc_make_request(index, RTM_NEWTFILTER, NLM_F_CREATE | NLM_F_ECHO,
+ &request);
+ tcmsg->tcm_parent = block_id ? : TC_INGRESS_PARENT;
tcmsg->tcm_info = tc_make_handle(prio, eth_type);
tcmsg->tcm_handle = handle;