int err;
err = tc_del_filter(id);
- del_ufid_tc_mapping(ufid);
+ if (!err) {
+ del_ufid_tc_mapping(ufid);
+ }
return err;
}
static int
netdev_tc_flow_dump_create(struct netdev *netdev,
- struct netdev_flow_dump **dump_out)
+ struct netdev_flow_dump **dump_out,
+ bool terse)
{
enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
struct netdev_flow_dump *dump;
dump = xzalloc(sizeof *dump);
dump->nl_dump = xzalloc(sizeof *dump->nl_dump);
dump->netdev = netdev_ref(netdev);
+ dump->terse = terse;
id = tc_make_tcf_id(ifindex, block_id, prio, hook);
- tc_dump_flower_start(&id, dump->nl_dump);
+ tc_dump_flower_start(&id, dump->nl_dump, terse);
*dump_out = dump;
match->wc.masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
}
+static void
+parse_tc_flower_to_stats(struct tc_flower *flower,
+ struct dpif_flow_stats *stats)
+{
+ if (!stats) {
+ return;
+ }
+
+ memset(stats, 0, sizeof *stats);
+ stats->n_packets = get_32aligned_u64(&flower->stats.n_packets);
+ stats->n_bytes = get_32aligned_u64(&flower->stats.n_bytes);
+ stats->used = flower->lastused;
+}
+
+static void
+parse_tc_flower_to_attrs(struct tc_flower *flower,
+ struct dpif_flow_attrs *attrs)
+{
+ attrs->offloaded = (flower->offloaded_state == TC_OFFLOADED_STATE_IN_HW ||
+ flower->offloaded_state ==
+ TC_OFFLOADED_STATE_UNDEFINED);
+ attrs->dp_layer = "tc";
+ attrs->dp_extra_info = NULL;
+}
+
+static int
+parse_tc_flower_terse_to_match(struct tc_flower *flower,
+ struct match *match,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs)
+{
+ match_init_catchall(match);
+
+ parse_tc_flower_to_stats(flower, stats);
+ parse_tc_flower_to_attrs(flower, attrs);
+
+ return 0;
+}
+
static int
parse_tc_flower_to_match(struct tc_flower *flower,
struct match *match,
struct nlattr **actions,
struct dpif_flow_stats *stats,
struct dpif_flow_attrs *attrs,
- struct ofpbuf *buf)
+ struct ofpbuf *buf,
+ bool terse)
{
size_t act_off;
struct tc_flower_key *key = &flower->key;
struct tc_action *action;
int i;
+ if (terse) {
+ return parse_tc_flower_terse_to_match(flower, match, stats, attrs);
+ }
+
ofpbuf_clear(buf);
match_init_catchall(match);
match->flow.mpls_lse[0] = key->mpls_lse & mask->mpls_lse;
match->wc.masks.mpls_lse[0] = mask->mpls_lse;
match_set_dl_type(match, key->encap_eth_type[0]);
+ } else if (key->eth_type == htons(ETH_TYPE_ARP)) {
+ match_set_arp_sha_masked(match, key->arp.sha, mask->arp.sha);
+ match_set_arp_tha_masked(match, key->arp.tha, mask->arp.tha);
+ match_set_arp_spa_masked(match, key->arp.spa, mask->arp.spa);
+ match_set_arp_tpa_masked(match, key->arp.tpa, mask->arp.tpa);
+ match_set_arp_opcode_masked(match, key->arp.opcode,
+ mask->arp.opcode);
+ match_set_dl_type(match, key->eth_type);
} else {
match_set_dl_type(match, key->eth_type);
}
match_set_tun_ttl_masked(match, flower->key.tunnel.ttl,
flower->mask.tunnel.ttl);
}
- if (flower->mask.tunnel.tp_dst) {
- match_set_tun_tp_dst_masked(match,
- flower->key.tunnel.tp_dst,
- flower->mask.tunnel.tp_dst);
- }
- if (flower->mask.tunnel.tp_src) {
- match_set_tun_tp_src_masked(match,
- flower->key.tunnel.tp_src,
- flower->mask.tunnel.tp_src);
+ if (flower->key.tunnel.tp_dst) {
+ match_set_tun_tp_dst(match, flower->key.tunnel.tp_dst);
}
if (flower->key.tunnel.metadata.present.len) {
flower_tun_opt_to_match(match, flower);
*actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr));
- if (stats) {
- memset(stats, 0, sizeof *stats);
- stats->n_packets = get_32aligned_u64(&flower->stats.n_packets);
- stats->n_bytes = get_32aligned_u64(&flower->stats.n_bytes);
- stats->used = flower->lastused;
- }
-
- attrs->offloaded = (flower->offloaded_state == TC_OFFLOADED_STATE_IN_HW)
- || (flower->offloaded_state == TC_OFFLOADED_STATE_UNDEFINED);
- attrs->dp_layer = "tc";
- attrs->dp_extra_info = NULL;
+ parse_tc_flower_to_stats(flower, stats);
+ parse_tc_flower_to_attrs(flower, attrs);
return 0;
}
while (nl_dump_next(dump->nl_dump, &nl_flow, rbuffer)) {
struct tc_flower flower;
- if (parse_netlink_to_tc_flower(&nl_flow, &id, &flower)) {
+ if (parse_netlink_to_tc_flower(&nl_flow, &id, &flower, dump->terse)) {
continue;
}
if (parse_tc_flower_to_match(&flower, match, actions, stats, attrs,
- wbuffer)) {
+ wbuffer, dump->terse)) {
continue;
}
flower.mask.tunnel.ipv6.ipv6_dst = tnl_mask->ipv6_dst;
flower.mask.tunnel.tos = tnl_mask->ip_tos;
flower.mask.tunnel.ttl = tnl_mask->ip_ttl;
- flower.mask.tunnel.tp_src = tnl_mask->tp_src;
- flower.mask.tunnel.tp_dst = tnl_mask->tp_dst;
flower.mask.tunnel.id = (tnl->flags & FLOW_TNL_F_KEY) ? tnl_mask->tun_id : 0;
flower_match_to_tun_opt(&flower, tnl, tnl_mask);
flower.tunnel = true;
mask->dl_type = 0;
mask->in_port.odp_port = 0;
+ if (key->dl_type == htons(ETH_P_ARP)) {
+ flower.key.arp.spa = key->nw_src;
+ flower.key.arp.tpa = key->nw_dst;
+ flower.key.arp.sha = key->arp_sha;
+ flower.key.arp.tha = key->arp_tha;
+ flower.key.arp.opcode = key->nw_proto;
+ flower.mask.arp.spa = mask->nw_src;
+ flower.mask.arp.tpa = mask->nw_dst;
+ flower.mask.arp.sha = mask->arp_sha;
+ flower.mask.arp.tha = mask->arp_tha;
+ flower.mask.arp.opcode = mask->nw_proto;
+
+ mask->nw_src = 0;
+ mask->nw_dst = 0;
+ mask->nw_proto = 0;
+ memset(&mask->arp_sha, 0, sizeof mask->arp_sha);
+ memset(&mask->arp_tha, 0, sizeof mask->arp_tha);
+ }
+
if (is_ip_any(key)) {
flower.key.ip_proto = key->nw_proto;
flower.mask.ip_proto = mask->nw_proto;
action = &flower.actions[flower.action_count];
if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
odp_port_t port = nl_attr_get_odp_port(nla);
- struct netdev *outdev = netdev_ports_get(port, info->dpif_class);
+ struct netdev *outdev = netdev_ports_get(
+ port, netdev_get_dpif_type(netdev));
if (!outdev) {
VLOG_DBG_RL(&rl, "Can't find netdev for output port %d", port);
action->chain = nl_attr_get_u32(nla);
flower.action_count++;
recirc_act = true;
+ } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_DROP) {
+ action->type = TC_ACT_GOTO;
+ action->chain = 0; /* 0 is reserved and not used by recirc. */
+ flower.action_count++;
} else {
VLOG_DBG_RL(&rl, "unsupported put action type: %d",
nl_attr_type(nla));
}
in_port = netdev_ifindex_to_odp_port(id.ifindex);
- parse_tc_flower_to_match(&flower, match, actions, stats, attrs, buf);
+ parse_tc_flower_to_match(&flower, match, actions, stats, attrs, buf, false);
match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
match->flow.in_port.odp_port = in_port;
memset(&flower, 0, sizeof flower);
+ flower.tc_policy = TC_POLICY_SKIP_HW;
flower.key.eth_type = htons(ETH_P_IP);
flower.mask.eth_type = OVS_BE16_MAX;
memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac);
memset(&flower, 0, sizeof flower);
+ flower.tc_policy = TC_POLICY_SKIP_HW;
flower.key.eth_type = htons(ETH_P_IP);
flower.mask.eth_type = OVS_BE16_MAX;
memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac);
static int
netdev_tc_init_flow_api(struct netdev *netdev)
{
- static struct ovsthread_once multi_mask_once = OVSTHREAD_ONCE_INITIALIZER;
- static struct ovsthread_once block_once = OVSTHREAD_ONCE_INITIALIZER;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
uint32_t block_id = 0;
struct tcf_id id;
/* make sure there is no ingress/egress qdisc */
tc_add_del_qdisc(ifindex, false, 0, hook);
- if (ovsthread_once_start(&block_once)) {
+ if (ovsthread_once_start(&once)) {
probe_tc_block_support(ifindex);
/* Need to re-fetch block id as it depends on feature availability. */
block_id = get_block_id_from_netdev(netdev);
- ovsthread_once_done(&block_once);
- }
- if (ovsthread_once_start(&multi_mask_once)) {
probe_multi_mask_per_prio(ifindex);
- ovsthread_once_done(&multi_mask_once);
+ ovsthread_once_done(&once);
}
error = tc_add_del_qdisc(ifindex, true, block_id, hook);