lib/if-notifier.h \
lib/netdev-linux.c \
lib/netdev-linux.h \
- lib/netdev-tc-offloads.c \
+ lib/netdev-offload-tc.c \
lib/netlink-conntrack.c \
lib/netlink-conntrack.h \
lib/netlink-notifier.c \
lib_libopenvswitch_la_SOURCES += \
lib/dpdk.c \
lib/netdev-dpdk.c \
- lib/netdev-rte-offloads.c
+ lib/netdev-offload-dpdk.c
else
lib_libopenvswitch_la_SOURCES += \
lib/dpdk-stub.c
/* Finally, register the dpdk classes */
netdev_dpdk_register();
- netdev_register_flow_api_provider(&netdev_dpdk_offloads);
+ netdev_register_flow_api_provider(&netdev_offload_dpdk);
return true;
}
return is_dummy_class(netdev->netdev_class) ? 0 : EOPNOTSUPP;
}
-static const struct netdev_flow_api netdev_dummy_offloads = {
+static const struct netdev_flow_api netdev_offload_dummy = {
.type = "dummy",
.flow_put = netdev_dummy_flow_put,
.flow_del = netdev_dummy_flow_del,
netdev_register_provider(&dummy_internal_class);
netdev_register_provider(&dummy_pmd_class);
- netdev_register_flow_api_provider(&netdev_dummy_offloads);
+ netdev_register_flow_api_provider(&netdev_offload_dummy);
netdev_vport_tunnel_register();
}
--- /dev/null
+/*
+ * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
+ * Copyright (c) 2019 Mellanox Technologies, Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <config.h>
+
+#include <rte_flow.h>
+
+#include "cmap.h"
+#include "dpif-netdev.h"
+#include "netdev-offload-provider.h"
+#include "netdev-provider.h"
+#include "openvswitch/match.h"
+#include "openvswitch/vlog.h"
+#include "packets.h"
+#include "uuid.h"
+
+VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk);
+
+/* Thread-safety
+ * =============
+ *
+ * Below API is NOT thread safe in following terms:
+ *
+ * - The caller must be sure that none of these functions will be called
+ * simultaneously. Even for different 'netdev's.
+ *
+ * - The caller must be sure that 'netdev' will not be destructed/deallocated.
+ *
+ * - The caller must be sure that 'netdev' configuration will not be changed.
+ * For example, simultaneous call of 'netdev_reconfigure()' for the same
+ * 'netdev' is forbidden.
+ *
+ * For current implementation all above restrictions could be fulfilled by
+ * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
+
+/*
+ * A mapping from ufid to dpdk rte_flow.
+ */
+static struct cmap ufid_to_rte_flow = CMAP_INITIALIZER;
+
+struct ufid_to_rte_flow_data {
+ struct cmap_node node;
+ ovs_u128 ufid;
+ struct rte_flow *rte_flow;
+};
+
+/* Find rte_flow with @ufid. */
+static struct rte_flow *
+ufid_to_rte_flow_find(const ovs_u128 *ufid)
+{
+ size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
+ struct ufid_to_rte_flow_data *data;
+
+ CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
+ if (ovs_u128_equals(*ufid, data->ufid)) {
+ return data->rte_flow;
+ }
+ }
+
+ return NULL;
+}
+
+static inline void
+ufid_to_rte_flow_associate(const ovs_u128 *ufid,
+ struct rte_flow *rte_flow)
+{
+ size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
+ struct ufid_to_rte_flow_data *data = xzalloc(sizeof *data);
+
+ /*
+ * We should not simply overwrite an existing rte flow.
+ * We should have deleted it first before re-adding it.
+ * Thus, if following assert triggers, something is wrong:
+ * the rte_flow is not destroyed.
+ */
+ ovs_assert(ufid_to_rte_flow_find(ufid) == NULL);
+
+ data->ufid = *ufid;
+ data->rte_flow = rte_flow;
+
+ cmap_insert(&ufid_to_rte_flow,
+ CONST_CAST(struct cmap_node *, &data->node), hash);
+}
+
+static inline void
+ufid_to_rte_flow_disassociate(const ovs_u128 *ufid)
+{
+ size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
+ struct ufid_to_rte_flow_data *data;
+
+ CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
+ if (ovs_u128_equals(*ufid, data->ufid)) {
+ cmap_remove(&ufid_to_rte_flow,
+ CONST_CAST(struct cmap_node *, &data->node), hash);
+ ovsrcu_postpone(free, data);
+ return;
+ }
+ }
+
+ VLOG_WARN("ufid "UUID_FMT" is not associated with an rte flow\n",
+ UUID_ARGS((struct uuid *) ufid));
+}
+
+/*
+ * To avoid individual xrealloc calls for each new element, a 'curent_max'
+ * is used to keep track of current allocated number of elements. Starts
+ * by 8 and doubles on each xrealloc call.
+ */
+struct flow_patterns {
+ struct rte_flow_item *items;
+ int cnt;
+ int current_max;
+};
+
+struct flow_actions {
+ struct rte_flow_action *actions;
+ int cnt;
+ int current_max;
+};
+
+static void
+dump_flow_pattern(struct rte_flow_item *item)
+{
+ struct ds s;
+
+ if (!VLOG_IS_DBG_ENABLED() || item->type == RTE_FLOW_ITEM_TYPE_END) {
+ return;
+ }
+
+ ds_init(&s);
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ const struct rte_flow_item_eth *eth_spec = item->spec;
+ const struct rte_flow_item_eth *eth_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow eth pattern:\n");
+ if (eth_spec) {
+ ds_put_format(&s,
+ " Spec: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
+ "type=0x%04" PRIx16"\n",
+ ETH_ADDR_BYTES_ARGS(eth_spec->src.addr_bytes),
+ ETH_ADDR_BYTES_ARGS(eth_spec->dst.addr_bytes),
+ ntohs(eth_spec->type));
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+ if (eth_mask) {
+ ds_put_format(&s,
+ " Mask: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
+ "type=0x%04"PRIx16"\n",
+ ETH_ADDR_BYTES_ARGS(eth_mask->src.addr_bytes),
+ ETH_ADDR_BYTES_ARGS(eth_mask->dst.addr_bytes),
+ ntohs(eth_mask->type));
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ const struct rte_flow_item_vlan *vlan_spec = item->spec;
+ const struct rte_flow_item_vlan *vlan_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow vlan pattern:\n");
+ if (vlan_spec) {
+ ds_put_format(&s,
+ " Spec: inner_type=0x%"PRIx16", tci=0x%"PRIx16"\n",
+ ntohs(vlan_spec->inner_type), ntohs(vlan_spec->tci));
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+
+ if (vlan_mask) {
+ ds_put_format(&s,
+ " Mask: inner_type=0x%"PRIx16", tci=0x%"PRIx16"\n",
+ ntohs(vlan_mask->inner_type), ntohs(vlan_mask->tci));
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow ipv4 pattern:\n");
+ if (ipv4_spec) {
+ ds_put_format(&s,
+ " Spec: tos=0x%"PRIx8", ttl=%"PRIx8
+ ", proto=0x%"PRIx8
+ ", src="IP_FMT", dst="IP_FMT"\n",
+ ipv4_spec->hdr.type_of_service,
+ ipv4_spec->hdr.time_to_live,
+ ipv4_spec->hdr.next_proto_id,
+ IP_ARGS(ipv4_spec->hdr.src_addr),
+ IP_ARGS(ipv4_spec->hdr.dst_addr));
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+ if (ipv4_mask) {
+ ds_put_format(&s,
+ " Mask: tos=0x%"PRIx8", ttl=%"PRIx8
+ ", proto=0x%"PRIx8
+ ", src="IP_FMT", dst="IP_FMT"\n",
+ ipv4_mask->hdr.type_of_service,
+ ipv4_mask->hdr.time_to_live,
+ ipv4_mask->hdr.next_proto_id,
+ IP_ARGS(ipv4_mask->hdr.src_addr),
+ IP_ARGS(ipv4_mask->hdr.dst_addr));
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ const struct rte_flow_item_udp *udp_spec = item->spec;
+ const struct rte_flow_item_udp *udp_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow udp pattern:\n");
+ if (udp_spec) {
+ ds_put_format(&s,
+ " Spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
+ ntohs(udp_spec->hdr.src_port),
+ ntohs(udp_spec->hdr.dst_port));
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+ if (udp_mask) {
+ ds_put_format(&s,
+ " Mask: src_port=0x%"PRIx16
+ ", dst_port=0x%"PRIx16"\n",
+ ntohs(udp_mask->hdr.src_port),
+ ntohs(udp_mask->hdr.dst_port));
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ const struct rte_flow_item_sctp *sctp_spec = item->spec;
+ const struct rte_flow_item_sctp *sctp_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow sctp pattern:\n");
+ if (sctp_spec) {
+ ds_put_format(&s,
+ " Spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
+ ntohs(sctp_spec->hdr.src_port),
+ ntohs(sctp_spec->hdr.dst_port));
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+ if (sctp_mask) {
+ ds_put_format(&s,
+ " Mask: src_port=0x%"PRIx16
+ ", dst_port=0x%"PRIx16"\n",
+ ntohs(sctp_mask->hdr.src_port),
+ ntohs(sctp_mask->hdr.dst_port));
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
+ const struct rte_flow_item_icmp *icmp_spec = item->spec;
+ const struct rte_flow_item_icmp *icmp_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow icmp pattern:\n");
+ if (icmp_spec) {
+ ds_put_format(&s,
+ " Spec: icmp_type=%"PRIu8", icmp_code=%"PRIu8"\n",
+ icmp_spec->hdr.icmp_type,
+ icmp_spec->hdr.icmp_code);
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+ if (icmp_mask) {
+ ds_put_format(&s,
+ " Mask: icmp_type=0x%"PRIx8
+ ", icmp_code=0x%"PRIx8"\n",
+ icmp_spec->hdr.icmp_type,
+ icmp_spec->hdr.icmp_code);
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ const struct rte_flow_item_tcp *tcp_spec = item->spec;
+ const struct rte_flow_item_tcp *tcp_mask = item->mask;
+
+ ds_put_cstr(&s, "rte flow tcp pattern:\n");
+ if (tcp_spec) {
+ ds_put_format(&s,
+ " Spec: src_port=%"PRIu16", dst_port=%"PRIu16
+ ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
+ ntohs(tcp_spec->hdr.src_port),
+ ntohs(tcp_spec->hdr.dst_port),
+ tcp_spec->hdr.data_off,
+ tcp_spec->hdr.tcp_flags);
+ } else {
+ ds_put_cstr(&s, " Spec = null\n");
+ }
+ if (tcp_mask) {
+ ds_put_format(&s,
+ " Mask: src_port=%"PRIx16", dst_port=%"PRIx16
+ ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
+ ntohs(tcp_mask->hdr.src_port),
+ ntohs(tcp_mask->hdr.dst_port),
+ tcp_mask->hdr.data_off,
+ tcp_mask->hdr.tcp_flags);
+ } else {
+ ds_put_cstr(&s, " Mask = null\n");
+ }
+ }
+
+ VLOG_DBG("%s", ds_cstr(&s));
+ ds_destroy(&s);
+}
+
+static void
+add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,
+ const void *spec, const void *mask)
+{
+ int cnt = patterns->cnt;
+
+ if (cnt == 0) {
+ patterns->current_max = 8;
+ patterns->items = xcalloc(patterns->current_max,
+ sizeof *patterns->items);
+ } else if (cnt == patterns->current_max) {
+ patterns->current_max *= 2;
+ patterns->items = xrealloc(patterns->items, patterns->current_max *
+ sizeof *patterns->items);
+ }
+
+ patterns->items[cnt].type = type;
+ patterns->items[cnt].spec = spec;
+ patterns->items[cnt].mask = mask;
+ patterns->items[cnt].last = NULL;
+ dump_flow_pattern(&patterns->items[cnt]);
+ patterns->cnt++;
+}
+
+static void
+add_flow_action(struct flow_actions *actions, enum rte_flow_action_type type,
+ const void *conf)
+{
+ int cnt = actions->cnt;
+
+ if (cnt == 0) {
+ actions->current_max = 8;
+ actions->actions = xcalloc(actions->current_max,
+ sizeof *actions->actions);
+ } else if (cnt == actions->current_max) {
+ actions->current_max *= 2;
+ actions->actions = xrealloc(actions->actions, actions->current_max *
+ sizeof *actions->actions);
+ }
+
+ actions->actions[cnt].type = type;
+ actions->actions[cnt].conf = conf;
+ actions->cnt++;
+}
+
+struct action_rss_data {
+ struct rte_flow_action_rss conf;
+ uint16_t queue[0];
+};
+
+static struct action_rss_data *
+add_flow_rss_action(struct flow_actions *actions,
+ struct netdev *netdev)
+{
+ int i;
+ struct action_rss_data *rss_data;
+
+ rss_data = xmalloc(sizeof *rss_data +
+ netdev_n_rxq(netdev) * sizeof rss_data->queue[0]);
+ *rss_data = (struct action_rss_data) {
+ .conf = (struct rte_flow_action_rss) {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = 0,
+ .queue_num = netdev_n_rxq(netdev),
+ .queue = rss_data->queue,
+ .key_len = 0,
+ .key = NULL
+ },
+ };
+
+ /* Override queue array with default. */
+ for (i = 0; i < netdev_n_rxq(netdev); i++) {
+ rss_data->queue[i] = i;
+ }
+
+ add_flow_action(actions, RTE_FLOW_ACTION_TYPE_RSS, &rss_data->conf);
+
+ return rss_data;
+}
+
+static int
+netdev_offload_dpdk_add_flow(struct netdev *netdev,
+ const struct match *match,
+ struct nlattr *nl_actions OVS_UNUSED,
+ size_t actions_len OVS_UNUSED,
+ const ovs_u128 *ufid,
+ struct offload_info *info)
+{
+ const struct rte_flow_attr flow_attr = {
+ .group = 0,
+ .priority = 0,
+ .ingress = 1,
+ .egress = 0
+ };
+ struct flow_patterns patterns = { .items = NULL, .cnt = 0 };
+ struct flow_actions actions = { .actions = NULL, .cnt = 0 };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+ uint8_t proto = 0;
+ int ret = 0;
+ struct flow_items {
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ union {
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_sctp sctp;
+ struct rte_flow_item_icmp icmp;
+ };
+ } spec, mask;
+
+ memset(&spec, 0, sizeof spec);
+ memset(&mask, 0, sizeof mask);
+
+ /* Eth */
+ if (!eth_addr_is_zero(match->wc.masks.dl_src) ||
+ !eth_addr_is_zero(match->wc.masks.dl_dst)) {
+ memcpy(&spec.eth.dst, &match->flow.dl_dst, sizeof spec.eth.dst);
+ memcpy(&spec.eth.src, &match->flow.dl_src, sizeof spec.eth.src);
+ spec.eth.type = match->flow.dl_type;
+
+ memcpy(&mask.eth.dst, &match->wc.masks.dl_dst, sizeof mask.eth.dst);
+ memcpy(&mask.eth.src, &match->wc.masks.dl_src, sizeof mask.eth.src);
+ mask.eth.type = match->wc.masks.dl_type;
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ETH,
+ &spec.eth, &mask.eth);
+ } else {
+ /*
+ * If user specifies a flow (like UDP flow) without L2 patterns,
+ * OVS will at least set the dl_type. Normally, it's enough to
+ * create an eth pattern just with it. Unluckily, some Intel's
+ * NIC (such as XL710) doesn't support that. Below is a workaround,
+ * which simply matches any L2 pkts.
+ */
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ETH, NULL, NULL);
+ }
+
+ /* VLAN */
+ if (match->wc.masks.vlans[0].tci && match->flow.vlans[0].tci) {
+ spec.vlan.tci = match->flow.vlans[0].tci & ~htons(VLAN_CFI);
+ mask.vlan.tci = match->wc.masks.vlans[0].tci & ~htons(VLAN_CFI);
+
+ /* Match any protocols. */
+ mask.vlan.inner_type = 0;
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_VLAN,
+ &spec.vlan, &mask.vlan);
+ }
+
+ /* IP v4 */
+ if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
+ spec.ipv4.hdr.type_of_service = match->flow.nw_tos;
+ spec.ipv4.hdr.time_to_live = match->flow.nw_ttl;
+ spec.ipv4.hdr.next_proto_id = match->flow.nw_proto;
+ spec.ipv4.hdr.src_addr = match->flow.nw_src;
+ spec.ipv4.hdr.dst_addr = match->flow.nw_dst;
+
+ mask.ipv4.hdr.type_of_service = match->wc.masks.nw_tos;
+ mask.ipv4.hdr.time_to_live = match->wc.masks.nw_ttl;
+ mask.ipv4.hdr.next_proto_id = match->wc.masks.nw_proto;
+ mask.ipv4.hdr.src_addr = match->wc.masks.nw_src;
+ mask.ipv4.hdr.dst_addr = match->wc.masks.nw_dst;
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_IPV4,
+ &spec.ipv4, &mask.ipv4);
+
+ /* Save proto for L4 protocol setup. */
+ proto = spec.ipv4.hdr.next_proto_id &
+ mask.ipv4.hdr.next_proto_id;
+ }
+
+ if (proto != IPPROTO_ICMP && proto != IPPROTO_UDP &&
+ proto != IPPROTO_SCTP && proto != IPPROTO_TCP &&
+ (match->wc.masks.tp_src ||
+ match->wc.masks.tp_dst ||
+ match->wc.masks.tcp_flags)) {
+ VLOG_DBG("L4 Protocol (%u) not supported", proto);
+ ret = -1;
+ goto out;
+ }
+
+ if ((match->wc.masks.tp_src && match->wc.masks.tp_src != OVS_BE16_MAX) ||
+ (match->wc.masks.tp_dst && match->wc.masks.tp_dst != OVS_BE16_MAX)) {
+ ret = -1;
+ goto out;
+ }
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ spec.tcp.hdr.src_port = match->flow.tp_src;
+ spec.tcp.hdr.dst_port = match->flow.tp_dst;
+ spec.tcp.hdr.data_off = ntohs(match->flow.tcp_flags) >> 8;
+ spec.tcp.hdr.tcp_flags = ntohs(match->flow.tcp_flags) & 0xff;
+
+ mask.tcp.hdr.src_port = match->wc.masks.tp_src;
+ mask.tcp.hdr.dst_port = match->wc.masks.tp_dst;
+ mask.tcp.hdr.data_off = ntohs(match->wc.masks.tcp_flags) >> 8;
+ mask.tcp.hdr.tcp_flags = ntohs(match->wc.masks.tcp_flags) & 0xff;
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_TCP,
+ &spec.tcp, &mask.tcp);
+
+ /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
+ mask.ipv4.hdr.next_proto_id = 0;
+ break;
+
+ case IPPROTO_UDP:
+ spec.udp.hdr.src_port = match->flow.tp_src;
+ spec.udp.hdr.dst_port = match->flow.tp_dst;
+
+ mask.udp.hdr.src_port = match->wc.masks.tp_src;
+ mask.udp.hdr.dst_port = match->wc.masks.tp_dst;
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_UDP,
+ &spec.udp, &mask.udp);
+
+ /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
+ mask.ipv4.hdr.next_proto_id = 0;
+ break;
+
+ case IPPROTO_SCTP:
+ spec.sctp.hdr.src_port = match->flow.tp_src;
+ spec.sctp.hdr.dst_port = match->flow.tp_dst;
+
+ mask.sctp.hdr.src_port = match->wc.masks.tp_src;
+ mask.sctp.hdr.dst_port = match->wc.masks.tp_dst;
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_SCTP,
+ &spec.sctp, &mask.sctp);
+
+ /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
+ mask.ipv4.hdr.next_proto_id = 0;
+ break;
+
+ case IPPROTO_ICMP:
+ spec.icmp.hdr.icmp_type = (uint8_t) ntohs(match->flow.tp_src);
+ spec.icmp.hdr.icmp_code = (uint8_t) ntohs(match->flow.tp_dst);
+
+ mask.icmp.hdr.icmp_type = (uint8_t) ntohs(match->wc.masks.tp_src);
+ mask.icmp.hdr.icmp_code = (uint8_t) ntohs(match->wc.masks.tp_dst);
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ICMP,
+ &spec.icmp, &mask.icmp);
+
+ /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
+ mask.ipv4.hdr.next_proto_id = 0;
+ break;
+ }
+
+ add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);
+
+ struct rte_flow_action_mark mark;
+ struct action_rss_data *rss;
+
+ mark.id = info->flow_mark;
+ add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_MARK, &mark);
+
+ rss = add_flow_rss_action(&actions, netdev);
+ add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_END, NULL);
+
+ flow = netdev_dpdk_rte_flow_create(netdev, &flow_attr,
+ patterns.items,
+ actions.actions, &error);
+
+ free(rss);
+ if (!flow) {
+ VLOG_ERR("%s: rte flow creat error: %u : message : %s\n",
+ netdev_get_name(netdev), error.type, error.message);
+ ret = -1;
+ goto out;
+ }
+ ufid_to_rte_flow_associate(ufid, flow);
+ VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT"\n",
+ netdev_get_name(netdev), flow, UUID_ARGS((struct uuid *)ufid));
+
+out:
+ free(patterns.items);
+ free(actions.actions);
+ return ret;
+}
+
+/*
+ * Check if any unsupported flow patterns are specified.
+ */
+static int
+netdev_offload_dpdk_validate_flow(const struct match *match)
+{
+ struct match match_zero_wc;
+ const struct flow *masks = &match->wc.masks;
+
+ /* Create a wc-zeroed version of flow. */
+ match_init(&match_zero_wc, &match->flow, &match->wc);
+
+ if (!is_all_zeros(&match_zero_wc.flow.tunnel,
+ sizeof match_zero_wc.flow.tunnel)) {
+ goto err;
+ }
+
+ if (masks->metadata || masks->skb_priority ||
+ masks->pkt_mark || masks->dp_hash) {
+ goto err;
+ }
+
+ /* recirc id must be zero. */
+ if (match_zero_wc.flow.recirc_id) {
+ goto err;
+ }
+
+ if (masks->ct_state || masks->ct_nw_proto ||
+ masks->ct_zone || masks->ct_mark ||
+ !ovs_u128_is_zero(masks->ct_label)) {
+ goto err;
+ }
+
+ if (masks->conj_id || masks->actset_output) {
+ goto err;
+ }
+
+ /* Unsupported L2. */
+ if (!is_all_zeros(masks->mpls_lse, sizeof masks->mpls_lse)) {
+ goto err;
+ }
+
+ /* Unsupported L3. */
+ if (masks->ipv6_label || masks->ct_nw_src || masks->ct_nw_dst ||
+ !is_all_zeros(&masks->ipv6_src, sizeof masks->ipv6_src) ||
+ !is_all_zeros(&masks->ipv6_dst, sizeof masks->ipv6_dst) ||
+ !is_all_zeros(&masks->ct_ipv6_src, sizeof masks->ct_ipv6_src) ||
+ !is_all_zeros(&masks->ct_ipv6_dst, sizeof masks->ct_ipv6_dst) ||
+ !is_all_zeros(&masks->nd_target, sizeof masks->nd_target) ||
+ !is_all_zeros(&masks->nsh, sizeof masks->nsh) ||
+ !is_all_zeros(&masks->arp_sha, sizeof masks->arp_sha) ||
+ !is_all_zeros(&masks->arp_tha, sizeof masks->arp_tha)) {
+ goto err;
+ }
+
+ /* If fragmented, then don't HW accelerate - for now. */
+ if (match_zero_wc.flow.nw_frag) {
+ goto err;
+ }
+
+ /* Unsupported L4. */
+ if (masks->igmp_group_ip4 || masks->ct_tp_src || masks->ct_tp_dst) {
+ goto err;
+ }
+
+ return 0;
+
+err:
+ VLOG_ERR("cannot HW accelerate this flow due to unsupported protocols");
+ return -1;
+}
+
+static int
+netdev_offload_dpdk_destroy_flow(struct netdev *netdev,
+ const ovs_u128 *ufid,
+ struct rte_flow *rte_flow)
+{
+ struct rte_flow_error error;
+ int ret = netdev_dpdk_rte_flow_destroy(netdev, rte_flow, &error);
+
+ if (ret == 0) {
+ ufid_to_rte_flow_disassociate(ufid);
+ VLOG_DBG("%s: removed rte flow %p associated with ufid " UUID_FMT "\n",
+ netdev_get_name(netdev), rte_flow,
+ UUID_ARGS((struct uuid *)ufid));
+ } else {
+ VLOG_ERR("%s: rte flow destroy error: %u : message : %s\n",
+ netdev_get_name(netdev), error.type, error.message);
+ }
+
+ return ret;
+}
+
+static int
+netdev_offload_dpdk_flow_put(struct netdev *netdev, struct match *match,
+ struct nlattr *actions, size_t actions_len,
+ const ovs_u128 *ufid, struct offload_info *info,
+ struct dpif_flow_stats *stats OVS_UNUSED)
+{
+ struct rte_flow *rte_flow;
+ int ret;
+
+ /*
+ * If an old rte_flow exists, it means it's a flow modification.
+ * Here destroy the old rte flow first before adding a new one.
+ */
+ rte_flow = ufid_to_rte_flow_find(ufid);
+ if (rte_flow) {
+ ret = netdev_offload_dpdk_destroy_flow(netdev, ufid, rte_flow);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = netdev_offload_dpdk_validate_flow(match);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return netdev_offload_dpdk_add_flow(netdev, match, actions,
+ actions_len, ufid, info);
+}
+
+static int
+netdev_offload_dpdk_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
+ struct dpif_flow_stats *stats OVS_UNUSED)
+{
+ struct rte_flow *rte_flow = ufid_to_rte_flow_find(ufid);
+
+ if (!rte_flow) {
+ return -1;
+ }
+
+ return netdev_offload_dpdk_destroy_flow(netdev, ufid, rte_flow);
+}
+
+static int
+netdev_offload_dpdk_init_flow_api(struct netdev *netdev)
+{
+ return netdev_dpdk_flow_api_supported(netdev) ? 0 : EOPNOTSUPP;
+}
+
+const struct netdev_flow_api netdev_offload_dpdk = {
+ .type = "dpdk_flow_api",
+ .flow_put = netdev_offload_dpdk_flow_put,
+ .flow_del = netdev_offload_dpdk_flow_del,
+ .init_flow_api = netdev_offload_dpdk_init_flow_api,
+};
int netdev_unregister_flow_api_provider(const char *type);
#ifdef __linux__
-extern const struct netdev_flow_api netdev_tc_offloads;
+extern const struct netdev_flow_api netdev_offload_tc;
#endif
#ifdef DPDK_NETDEV
-extern const struct netdev_flow_api netdev_dpdk_offloads;
+extern const struct netdev_flow_api netdev_offload_dpdk;
#endif
#ifdef __cplusplus
--- /dev/null
+/*
+ * Copyright (c) 2016 Mellanox Technologies, Ltd.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include <errno.h>
+#include <linux/if_ether.h>
+
+#include "dpif.h"
+#include "hash.h"
+#include "openvswitch/hmap.h"
+#include "openvswitch/match.h"
+#include "openvswitch/ofpbuf.h"
+#include "openvswitch/thread.h"
+#include "openvswitch/types.h"
+#include "openvswitch/util.h"
+#include "openvswitch/vlog.h"
+#include "netdev-linux.h"
+#include "netdev-offload-provider.h"
+#include "netdev-provider.h"
+#include "netlink.h"
+#include "netlink-socket.h"
+#include "odp-netlink.h"
+#include "odp-util.h"
+#include "tc.h"
+#include "unaligned.h"
+#include "util.h"
+
+VLOG_DEFINE_THIS_MODULE(netdev_offload_tc);
+
+static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
+
+static struct hmap ufid_tc = HMAP_INITIALIZER(&ufid_tc);
+static bool multi_mask_per_prio = false;
+static bool block_support = false;
+
+struct netlink_field {
+ int offset;
+ int flower_offset;
+ int size;
+};
+
+static bool
+is_internal_port(const char *type)
+{
+ return !strcmp(type, "internal");
+}
+
+static enum tc_qdisc_hook
+get_tc_qdisc_hook(struct netdev *netdev)
+{
+ return is_internal_port(netdev_get_type(netdev)) ? TC_EGRESS : TC_INGRESS;
+}
+
+static struct netlink_field set_flower_map[][4] = {
+ [OVS_KEY_ATTR_IPV4] = {
+ { offsetof(struct ovs_key_ipv4, ipv4_src),
+ offsetof(struct tc_flower_key, ipv4.ipv4_src),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv4.ipv4_src)
+ },
+ { offsetof(struct ovs_key_ipv4, ipv4_dst),
+ offsetof(struct tc_flower_key, ipv4.ipv4_dst),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv4.ipv4_dst)
+ },
+ { offsetof(struct ovs_key_ipv4, ipv4_ttl),
+ offsetof(struct tc_flower_key, ipv4.rewrite_ttl),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv4.rewrite_ttl)
+ },
+ { offsetof(struct ovs_key_ipv4, ipv4_tos),
+ offsetof(struct tc_flower_key, ipv4.rewrite_tos),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv4.rewrite_tos)
+ },
+ },
+ [OVS_KEY_ATTR_IPV6] = {
+ { offsetof(struct ovs_key_ipv6, ipv6_src),
+ offsetof(struct tc_flower_key, ipv6.ipv6_src),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv6.ipv6_src)
+ },
+ { offsetof(struct ovs_key_ipv6, ipv6_dst),
+ offsetof(struct tc_flower_key, ipv6.ipv6_dst),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv6.ipv6_dst)
+ },
+ { offsetof(struct ovs_key_ipv6, ipv6_hlimit),
+ offsetof(struct tc_flower_key, ipv6.rewrite_hlimit),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv6.rewrite_hlimit)
+ },
+ { offsetof(struct ovs_key_ipv6, ipv6_tclass),
+ offsetof(struct tc_flower_key, ipv6.rewrite_tclass),
+ MEMBER_SIZEOF(struct tc_flower_key, ipv6.rewrite_tclass)
+ },
+ },
+ [OVS_KEY_ATTR_ETHERNET] = {
+ { offsetof(struct ovs_key_ethernet, eth_src),
+ offsetof(struct tc_flower_key, src_mac),
+ MEMBER_SIZEOF(struct tc_flower_key, src_mac)
+ },
+ { offsetof(struct ovs_key_ethernet, eth_dst),
+ offsetof(struct tc_flower_key, dst_mac),
+ MEMBER_SIZEOF(struct tc_flower_key, dst_mac)
+ },
+ },
+ [OVS_KEY_ATTR_ETHERTYPE] = {
+ { 0,
+ offsetof(struct tc_flower_key, eth_type),
+ MEMBER_SIZEOF(struct tc_flower_key, eth_type)
+ },
+ },
+ [OVS_KEY_ATTR_TCP] = {
+ { offsetof(struct ovs_key_tcp, tcp_src),
+ offsetof(struct tc_flower_key, tcp_src),
+ MEMBER_SIZEOF(struct tc_flower_key, tcp_src)
+ },
+ { offsetof(struct ovs_key_tcp, tcp_dst),
+ offsetof(struct tc_flower_key, tcp_dst),
+ MEMBER_SIZEOF(struct tc_flower_key, tcp_dst)
+ },
+ },
+ [OVS_KEY_ATTR_UDP] = {
+ { offsetof(struct ovs_key_udp, udp_src),
+ offsetof(struct tc_flower_key, udp_src),
+ MEMBER_SIZEOF(struct tc_flower_key, udp_src)
+ },
+ { offsetof(struct ovs_key_udp, udp_dst),
+ offsetof(struct tc_flower_key, udp_dst),
+ MEMBER_SIZEOF(struct tc_flower_key, udp_dst)
+ },
+ },
+};
+
+static struct ovs_mutex ufid_lock = OVS_MUTEX_INITIALIZER;
+
+/**
+ * struct ufid_tc_data - data entry for ufid_tc hmap.
+ * @ufid_node: Element in @ufid_tc hash table by ufid key.
+ * @tc_node: Element in @ufid_tc hash table by prio/handle/ifindex key.
+ * @ufid: ufid assigned to the flow
+ * @prio: tc priority
+ * @handle: tc handle
+ * @ifindex: netdev ifindex.
+ * @netdev: netdev associated with the tc rule
+ */
+struct ufid_tc_data {
+ struct hmap_node ufid_node;
+ struct hmap_node tc_node;
+ ovs_u128 ufid;
+ uint16_t prio;
+ uint32_t handle;
+ int ifindex;
+ struct netdev *netdev;
+};
+
+/* Remove matching ufid entry from ufid_tc hashmap. */
+static void
+del_ufid_tc_mapping(const ovs_u128 *ufid)
+{
+ size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
+ struct ufid_tc_data *data;
+
+ ovs_mutex_lock(&ufid_lock);
+ HMAP_FOR_EACH_WITH_HASH(data, ufid_node, ufid_hash, &ufid_tc) {
+ if (ovs_u128_equals(*ufid, data->ufid)) {
+ break;
+ }
+ }
+
+ if (!data) {
+ ovs_mutex_unlock(&ufid_lock);
+ return;
+ }
+
+ hmap_remove(&ufid_tc, &data->ufid_node);
+ hmap_remove(&ufid_tc, &data->tc_node);
+ netdev_close(data->netdev);
+ free(data);
+ ovs_mutex_unlock(&ufid_lock);
+}
+
+/* Wrapper function to delete filter and ufid tc mapping */
+static int
+del_filter_and_ufid_mapping(int ifindex, int prio, int handle,
+ uint32_t block_id, const ovs_u128 *ufid,
+ enum tc_qdisc_hook hook)
+{
+ int err;
+
+ err = tc_del_filter(ifindex, prio, handle, block_id, hook);
+ del_ufid_tc_mapping(ufid);
+
+ return err;
+}
+
+/* Add ufid entry to ufid_tc hashmap. */
+static void
+add_ufid_tc_mapping(const ovs_u128 *ufid, int prio, int handle,
+ struct netdev *netdev, int ifindex)
+{
+ size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
+ size_t tc_hash = hash_int(hash_int(prio, handle), ifindex);
+ struct ufid_tc_data *new_data = xzalloc(sizeof *new_data);
+
+ new_data->ufid = *ufid;
+ new_data->prio = prio;
+ new_data->handle = handle;
+ new_data->netdev = netdev_ref(netdev);
+ new_data->ifindex = ifindex;
+
+ ovs_mutex_lock(&ufid_lock);
+ hmap_insert(&ufid_tc, &new_data->ufid_node, ufid_hash);
+ hmap_insert(&ufid_tc, &new_data->tc_node, tc_hash);
+ ovs_mutex_unlock(&ufid_lock);
+}
+
+/* Get ufid from ufid_tc hashmap.
+ *
+ * If netdev output param is not NULL then the function will return
+ * associated netdev on success and a refcount is taken on that netdev.
+ * The caller is then responsible to close the netdev.
+ *
+ * Returns handle if successful and fill prio and netdev for that ufid.
+ * Otherwise returns 0.
+ */
+static int
+get_ufid_tc_mapping(const ovs_u128 *ufid, int *prio, struct netdev **netdev)
+{
+ size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
+ struct ufid_tc_data *data;
+ int handle = 0;
+
+ ovs_mutex_lock(&ufid_lock);
+ HMAP_FOR_EACH_WITH_HASH(data, ufid_node, ufid_hash, &ufid_tc) {
+ if (ovs_u128_equals(*ufid, data->ufid)) {
+ if (prio) {
+ *prio = data->prio;
+ }
+ if (netdev) {
+ *netdev = netdev_ref(data->netdev);
+ }
+ handle = data->handle;
+ break;
+ }
+ }
+ ovs_mutex_unlock(&ufid_lock);
+
+ return handle;
+}
+
+/* Find ufid entry in ufid_tc hashmap using prio, handle and netdev.
+ * The result is saved in ufid.
+ *
+ * Returns true on success.
+ */
+static bool
+find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
+{
+ int ifindex = netdev_get_ifindex(netdev);
+ struct ufid_tc_data *data;
+ size_t tc_hash = hash_int(hash_int(prio, handle), ifindex);
+
+ ovs_mutex_lock(&ufid_lock);
+ HMAP_FOR_EACH_WITH_HASH(data, tc_node, tc_hash, &ufid_tc) {
+ if (data->prio == prio && data->handle == handle
+ && data->ifindex == ifindex) {
+ *ufid = data->ufid;
+ break;
+ }
+ }
+ ovs_mutex_unlock(&ufid_lock);
+
+ return (data != NULL);
+}
+
+struct prio_map_data {
+ struct hmap_node node;
+ struct tc_flower_key mask;
+ ovs_be16 protocol;
+ uint16_t prio;
+};
+
+/* Get free prio for tc flower
+ * If prio is already allocated for mask/eth_type combination then return it.
+ * If not assign new prio.
+ *
+ * Return prio on success or 0 if we are out of prios.
+ */
+static uint16_t
+get_prio_for_tc_flower(struct tc_flower *flower)
+{
+ static struct hmap prios = HMAP_INITIALIZER(&prios);
+ static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
+ static uint16_t last_prio = TC_RESERVED_PRIORITY_MAX;
+ size_t key_len = sizeof(struct tc_flower_key);
+ size_t hash = hash_int((OVS_FORCE uint32_t) flower->key.eth_type, 0);
+ struct prio_map_data *data;
+ struct prio_map_data *new_data;
+
+ if (!multi_mask_per_prio) {
+ hash = hash_bytes(&flower->mask, key_len, hash);
+ }
+
+ /* We can use the same prio for same mask/eth combination but must have
+ * different prio if not. Flower classifier will reject same prio for
+ * different mask combination unless multi mask per prio is supported. */
+ ovs_mutex_lock(&prios_lock);
+ HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
+ if ((multi_mask_per_prio
+ || !memcmp(&flower->mask, &data->mask, key_len))
+ && data->protocol == flower->key.eth_type) {
+ ovs_mutex_unlock(&prios_lock);
+ return data->prio;
+ }
+ }
+
+ if (last_prio == UINT16_MAX) {
+ /* last_prio can overflow if there will be many different kinds of
+ * flows which shouldn't happen organically. */
+ ovs_mutex_unlock(&prios_lock);
+ return 0;
+ }
+
+ new_data = xzalloc(sizeof *new_data);
+ memcpy(&new_data->mask, &flower->mask, key_len);
+ new_data->prio = ++last_prio;
+ new_data->protocol = flower->key.eth_type;
+ hmap_insert(&prios, &new_data->node, hash);
+ ovs_mutex_unlock(&prios_lock);
+
+ return new_data->prio;
+}
+
+static uint32_t
+get_block_id_from_netdev(struct netdev *netdev)
+{
+ if (block_support) {
+ return netdev_get_block_id(netdev);
+ }
+
+ return 0;
+}
+
+static int
+netdev_tc_flow_flush(struct netdev *netdev)
+{
+ enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
+ int ifindex = netdev_get_ifindex(netdev);
+ uint32_t block_id = 0;
+
+ if (ifindex < 0) {
+ VLOG_ERR_RL(&error_rl, "flow_flush: failed to get ifindex for %s: %s",
+ netdev_get_name(netdev), ovs_strerror(-ifindex));
+ return -ifindex;
+ }
+
+ block_id = get_block_id_from_netdev(netdev);
+
+ return tc_flush(ifindex, block_id, hook);
+}
+
+static int
+netdev_tc_flow_dump_create(struct netdev *netdev,
+ struct netdev_flow_dump **dump_out)
+{
+ enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
+ struct netdev_flow_dump *dump;
+ uint32_t block_id = 0;
+ int ifindex;
+
+ ifindex = netdev_get_ifindex(netdev);
+ if (ifindex < 0) {
+ VLOG_ERR_RL(&error_rl, "dump_create: failed to get ifindex for %s: %s",
+ netdev_get_name(netdev), ovs_strerror(-ifindex));
+ return -ifindex;
+ }
+
+ block_id = get_block_id_from_netdev(netdev);
+ dump = xzalloc(sizeof *dump);
+ dump->nl_dump = xzalloc(sizeof *dump->nl_dump);
+ dump->netdev = netdev_ref(netdev);
+ tc_dump_flower_start(ifindex, dump->nl_dump, block_id, hook);
+
+ *dump_out = dump;
+
+ return 0;
+}
+
+static int
+netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump)
+{
+ nl_dump_done(dump->nl_dump);
+ netdev_close(dump->netdev);
+ free(dump->nl_dump);
+ free(dump);
+ return 0;
+}
+
+static void
+parse_flower_rewrite_to_netlink_action(struct ofpbuf *buf,
+ struct tc_flower *flower)
+{
+ char *mask = (char *) &flower->rewrite.mask;
+ char *data = (char *) &flower->rewrite.key;
+
+ for (int type = 0; type < ARRAY_SIZE(set_flower_map); type++) {
+ char *put = NULL;
+ size_t nested = 0;
+ int len = ovs_flow_key_attr_lens[type].len;
+
+ if (len <= 0) {
+ continue;
+ }
+
+ for (int j = 0; j < ARRAY_SIZE(set_flower_map[type]); j++) {
+ struct netlink_field *f = &set_flower_map[type][j];
+
+ if (!f->size) {
+ break;
+ }
+
+ if (!is_all_zeros(mask + f->flower_offset, f->size)) {
+ if (!put) {
+ nested = nl_msg_start_nested(buf,
+ OVS_ACTION_ATTR_SET_MASKED);
+ put = nl_msg_put_unspec_zero(buf, type, len * 2);
+ }
+
+ memcpy(put + f->offset, data + f->flower_offset, f->size);
+ memcpy(put + len + f->offset,
+ mask + f->flower_offset, f->size);
+ }
+ }
+
+ if (put) {
+ nl_msg_end_nested(buf, nested);
+ }
+ }
+}
+
+static void parse_tc_flower_geneve_opts(struct tc_action *action,
+ struct ofpbuf *buf)
+{
+ int tun_opt_len = action->encap.data.present.len;
+ size_t geneve_off;
+ int idx = 0;
+
+ if (!tun_opt_len) {
+ return;
+ }
+
+ geneve_off = nl_msg_start_nested(buf, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
+ while (tun_opt_len) {
+ struct geneve_opt *opt;
+
+ opt = &action->encap.data.opts.gnv[idx];
+ nl_msg_put(buf, opt, sizeof(struct geneve_opt) + opt->length * 4);
+ idx += sizeof(struct geneve_opt) / 4 + opt->length;
+ tun_opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
+ }
+ nl_msg_end_nested(buf, geneve_off);
+}
+
+static void
+flower_tun_opt_to_match(struct match *match, struct tc_flower *flower)
+{
+ struct geneve_opt *opt, *opt_mask;
+ int len, cnt = 0;
+
+ memcpy(match->flow.tunnel.metadata.opts.gnv,
+ flower->key.tunnel.metadata.opts.gnv,
+ flower->key.tunnel.metadata.present.len);
+ match->flow.tunnel.metadata.present.len =
+ flower->key.tunnel.metadata.present.len;
+ match->flow.tunnel.flags |= FLOW_TNL_F_UDPIF;
+ memcpy(match->wc.masks.tunnel.metadata.opts.gnv,
+ flower->mask.tunnel.metadata.opts.gnv,
+ flower->mask.tunnel.metadata.present.len);
+
+ len = flower->key.tunnel.metadata.present.len;
+ while (len) {
+ opt = &match->flow.tunnel.metadata.opts.gnv[cnt];
+ opt_mask = &match->wc.masks.tunnel.metadata.opts.gnv[cnt];
+
+ opt_mask->length = 0x1f;
+
+ cnt += sizeof(struct geneve_opt) / 4 + opt->length;
+ len -= sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ match->wc.masks.tunnel.metadata.present.len =
+ flower->mask.tunnel.metadata.present.len;
+ match->wc.masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
+}
+
+static int
+parse_tc_flower_to_match(struct tc_flower *flower,
+ struct match *match,
+ struct nlattr **actions,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs,
+ struct ofpbuf *buf)
+{
+ size_t act_off;
+ struct tc_flower_key *key = &flower->key;
+ struct tc_flower_key *mask = &flower->mask;
+ odp_port_t outport = 0;
+ struct tc_action *action;
+ int i;
+
+ ofpbuf_clear(buf);
+
+ match_init_catchall(match);
+ match_set_dl_src_masked(match, key->src_mac, mask->src_mac);
+ match_set_dl_dst_masked(match, key->dst_mac, mask->dst_mac);
+
+ if (eth_type_vlan(key->eth_type)) {
+ match->flow.vlans[0].tpid = key->eth_type;
+ match->wc.masks.vlans[0].tpid = OVS_BE16_MAX;
+ match_set_dl_vlan(match, htons(key->vlan_id[0]), 0);
+ match_set_dl_vlan_pcp(match, key->vlan_prio[0], 0);
+
+ if (eth_type_vlan(key->encap_eth_type[0])) {
+ match_set_dl_vlan(match, htons(key->vlan_id[1]), 1);
+ match_set_dl_vlan_pcp(match, key->vlan_prio[1], 1);
+ match_set_dl_type(match, key->encap_eth_type[1]);
+ match->flow.vlans[1].tpid = key->encap_eth_type[0];
+ match->wc.masks.vlans[1].tpid = OVS_BE16_MAX;
+ } else {
+ match_set_dl_type(match, key->encap_eth_type[0]);
+ }
+ flow_fix_vlan_tpid(&match->flow);
+ } else if (eth_type_mpls(key->eth_type)) {
+ match->flow.mpls_lse[0] = key->mpls_lse & mask->mpls_lse;
+ match->wc.masks.mpls_lse[0] = mask->mpls_lse;
+ match_set_dl_type(match, key->encap_eth_type[0]);
+ } else {
+ match_set_dl_type(match, key->eth_type);
+ }
+
+ if (is_ip_any(&match->flow)) {
+ if (key->ip_proto) {
+ match_set_nw_proto(match, key->ip_proto);
+ }
+
+ match_set_nw_tos_masked(match, key->ip_tos, mask->ip_tos);
+ match_set_nw_ttl_masked(match, key->ip_ttl, mask->ip_ttl);
+
+ if (mask->flags) {
+ uint8_t flags = 0;
+ uint8_t flags_mask = 0;
+
+ if (mask->flags & TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT) {
+ if (key->flags & TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT) {
+ flags |= FLOW_NW_FRAG_ANY;
+ }
+ flags_mask |= FLOW_NW_FRAG_ANY;
+ }
+
+ if (mask->flags & TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST) {
+ if (!(key->flags & TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)) {
+ flags |= FLOW_NW_FRAG_LATER;
+ }
+ flags_mask |= FLOW_NW_FRAG_LATER;
+ }
+
+ match_set_nw_frag_masked(match, flags, flags_mask);
+ }
+
+ match_set_nw_src_masked(match, key->ipv4.ipv4_src, mask->ipv4.ipv4_src);
+ match_set_nw_dst_masked(match, key->ipv4.ipv4_dst, mask->ipv4.ipv4_dst);
+
+ match_set_ipv6_src_masked(match,
+ &key->ipv6.ipv6_src, &mask->ipv6.ipv6_src);
+ match_set_ipv6_dst_masked(match,
+ &key->ipv6.ipv6_dst, &mask->ipv6.ipv6_dst);
+
+ if (key->ip_proto == IPPROTO_TCP) {
+ match_set_tp_dst_masked(match, key->tcp_dst, mask->tcp_dst);
+ match_set_tp_src_masked(match, key->tcp_src, mask->tcp_src);
+ match_set_tcp_flags_masked(match, key->tcp_flags, mask->tcp_flags);
+ } else if (key->ip_proto == IPPROTO_UDP) {
+ match_set_tp_dst_masked(match, key->udp_dst, mask->udp_dst);
+ match_set_tp_src_masked(match, key->udp_src, mask->udp_src);
+ } else if (key->ip_proto == IPPROTO_SCTP) {
+ match_set_tp_dst_masked(match, key->sctp_dst, mask->sctp_dst);
+ match_set_tp_src_masked(match, key->sctp_src, mask->sctp_src);
+ }
+ }
+
+ if (flower->tunnel) {
+ if (flower->mask.tunnel.id) {
+ match_set_tun_id(match, flower->key.tunnel.id);
+ }
+ if (flower->key.tunnel.ipv4.ipv4_dst) {
+ match_set_tun_src(match, flower->key.tunnel.ipv4.ipv4_src);
+ match_set_tun_dst(match, flower->key.tunnel.ipv4.ipv4_dst);
+ } else if (!is_all_zeros(&flower->key.tunnel.ipv6.ipv6_dst,
+ sizeof flower->key.tunnel.ipv6.ipv6_dst)) {
+ match_set_tun_ipv6_src(match, &flower->key.tunnel.ipv6.ipv6_src);
+ match_set_tun_ipv6_dst(match, &flower->key.tunnel.ipv6.ipv6_dst);
+ }
+ if (flower->key.tunnel.tos) {
+ match_set_tun_tos_masked(match, flower->key.tunnel.tos,
+ flower->mask.tunnel.tos);
+ }
+ if (flower->key.tunnel.ttl) {
+ match_set_tun_ttl_masked(match, flower->key.tunnel.ttl,
+ flower->mask.tunnel.ttl);
+ }
+ if (flower->key.tunnel.tp_dst) {
+ match_set_tun_tp_dst(match, flower->key.tunnel.tp_dst);
+ }
+ if (flower->key.tunnel.metadata.present.len) {
+ flower_tun_opt_to_match(match, flower);
+ }
+ }
+
+ act_off = nl_msg_start_nested(buf, OVS_FLOW_ATTR_ACTIONS);
+ {
+ action = flower->actions;
+ for (i = 0; i < flower->action_count; i++, action++) {
+ switch (action->type) {
+ case TC_ACT_VLAN_POP: {
+ nl_msg_put_flag(buf, OVS_ACTION_ATTR_POP_VLAN);
+ }
+ break;
+ case TC_ACT_VLAN_PUSH: {
+ struct ovs_action_push_vlan *push;
+
+ push = nl_msg_put_unspec_zero(buf, OVS_ACTION_ATTR_PUSH_VLAN,
+ sizeof *push);
+ push->vlan_tpid = action->vlan.vlan_push_tpid;
+ push->vlan_tci = htons(action->vlan.vlan_push_id
+ | (action->vlan.vlan_push_prio << 13)
+ | VLAN_CFI);
+ }
+ break;
+ case TC_ACT_PEDIT: {
+ parse_flower_rewrite_to_netlink_action(buf, flower);
+ }
+ break;
+ case TC_ACT_ENCAP: {
+ size_t set_offset = nl_msg_start_nested(buf, OVS_ACTION_ATTR_SET);
+ size_t tunnel_offset =
+ nl_msg_start_nested(buf, OVS_KEY_ATTR_TUNNEL);
+
+ if (action->encap.id_present) {
+ nl_msg_put_be64(buf, OVS_TUNNEL_KEY_ATTR_ID, action->encap.id);
+ }
+ if (action->encap.ipv4.ipv4_src) {
+ nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
+ action->encap.ipv4.ipv4_src);
+ }
+ if (action->encap.ipv4.ipv4_dst) {
+ nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
+ action->encap.ipv4.ipv4_dst);
+ }
+ if (!is_all_zeros(&action->encap.ipv6.ipv6_src,
+ sizeof action->encap.ipv6.ipv6_src)) {
+ nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
+ &action->encap.ipv6.ipv6_src);
+ }
+ if (!is_all_zeros(&action->encap.ipv6.ipv6_dst,
+ sizeof action->encap.ipv6.ipv6_dst)) {
+ nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
+ &action->encap.ipv6.ipv6_dst);
+ }
+ if (action->encap.tos) {
+ nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_TOS,
+ action->encap.tos);
+ }
+ if (action->encap.ttl) {
+ nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_TTL,
+ action->encap.ttl);
+ }
+ if (action->encap.tp_dst) {
+ nl_msg_put_be16(buf, OVS_TUNNEL_KEY_ATTR_TP_DST,
+ action->encap.tp_dst);
+ }
+ if (!action->encap.no_csum) {
+ nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_CSUM,
+ !action->encap.no_csum);
+ }
+
+ parse_tc_flower_geneve_opts(action, buf);
+ nl_msg_end_nested(buf, tunnel_offset);
+ nl_msg_end_nested(buf, set_offset);
+ }
+ break;
+ case TC_ACT_OUTPUT: {
+ if (action->out.ifindex_out) {
+ outport =
+ netdev_ifindex_to_odp_port(action->out.ifindex_out);
+ if (!outport) {
+ return ENOENT;
+ }
+ }
+ nl_msg_put_u32(buf, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(outport));
+ }
+ break;
+ }
+ }
+ }
+ nl_msg_end_nested(buf, act_off);
+
+ *actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr));
+
+ if (stats) {
+ memset(stats, 0, sizeof *stats);
+ stats->n_packets = get_32aligned_u64(&flower->stats.n_packets);
+ stats->n_bytes = get_32aligned_u64(&flower->stats.n_bytes);
+ stats->used = flower->lastused;
+ }
+
+ attrs->offloaded = (flower->offloaded_state == TC_OFFLOADED_STATE_IN_HW)
+ || (flower->offloaded_state == TC_OFFLOADED_STATE_UNDEFINED);
+ attrs->dp_layer = "tc";
+
+ return 0;
+}
+
+static bool
+netdev_tc_flow_dump_next(struct netdev_flow_dump *dump,
+ struct match *match,
+ struct nlattr **actions,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs,
+ ovs_u128 *ufid,
+ struct ofpbuf *rbuffer,
+ struct ofpbuf *wbuffer)
+{
+ struct ofpbuf nl_flow;
+
+ while (nl_dump_next(dump->nl_dump, &nl_flow, rbuffer)) {
+ struct tc_flower flower;
+ struct netdev *netdev = dump->netdev;
+
+ if (parse_netlink_to_tc_flower(&nl_flow, &flower)) {
+ continue;
+ }
+
+ if (parse_tc_flower_to_match(&flower, match, actions, stats, attrs,
+ wbuffer)) {
+ continue;
+ }
+
+ if (flower.act_cookie.len) {
+ *ufid = *((ovs_u128 *) flower.act_cookie.data);
+ } else if (!find_ufid(flower.prio, flower.handle, netdev, ufid)) {
+ continue;
+ }
+
+ match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
+ match->flow.in_port.odp_port = dump->port;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int
+parse_put_flow_set_masked_action(struct tc_flower *flower,
+ struct tc_action *action,
+ const struct nlattr *set,
+ size_t set_len,
+ bool hasmask)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
+ uint64_t set_stub[1024 / 8];
+ struct ofpbuf set_buf = OFPBUF_STUB_INITIALIZER(set_stub);
+ char *set_data, *set_mask;
+ char *key = (char *) &flower->rewrite.key;
+ char *mask = (char *) &flower->rewrite.mask;
+ const struct nlattr *attr;
+ int i, j, type;
+ size_t size;
+
+ /* copy so we can set attr mask to 0 for used ovs key struct members */
+ attr = ofpbuf_put(&set_buf, set, set_len);
+
+ type = nl_attr_type(attr);
+ size = nl_attr_get_size(attr) / 2;
+ set_data = CONST_CAST(char *, nl_attr_get(attr));
+ set_mask = set_data + size;
+
+ if (type >= ARRAY_SIZE(set_flower_map)
+ || !set_flower_map[type][0].size) {
+ VLOG_DBG_RL(&rl, "unsupported set action type: %d", type);
+ ofpbuf_uninit(&set_buf);
+ return EOPNOTSUPP;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(set_flower_map[type]); i++) {
+ struct netlink_field *f = &set_flower_map[type][i];
+
+ if (!f->size) {
+ break;
+ }
+
+ /* copy masked value */
+ for (j = 0; j < f->size; j++) {
+ char maskval = hasmask ? set_mask[f->offset + j] : 0xFF;
+
+ key[f->flower_offset + j] = maskval & set_data[f->offset + j];
+ mask[f->flower_offset + j] = maskval;
+
+ }
+
+ /* set its mask to 0 to show it's been used. */
+ if (hasmask) {
+ memset(set_mask + f->offset, 0, f->size);
+ }
+ }
+
+ if (!is_all_zeros(&flower->rewrite, sizeof flower->rewrite)) {
+ if (flower->rewrite.rewrite == false) {
+ flower->rewrite.rewrite = true;
+ action->type = TC_ACT_PEDIT;
+ flower->action_count++;
+ }
+ }
+
+ if (hasmask && !is_all_zeros(set_mask, size)) {
+ VLOG_DBG_RL(&rl, "unsupported sub attribute of set action type %d",
+ type);
+ ofpbuf_uninit(&set_buf);
+ return EOPNOTSUPP;
+ }
+
+ ofpbuf_uninit(&set_buf);
+ return 0;
+}
+
+static int
+parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action,
+ const struct nlattr *set, size_t set_len)
+{
+ const struct nlattr *tunnel;
+ const struct nlattr *tun_attr;
+ size_t tun_left, tunnel_len;
+
+ if (nl_attr_type(set) != OVS_KEY_ATTR_TUNNEL) {
+ return parse_put_flow_set_masked_action(flower, action, set,
+ set_len, false);
+ }
+
+ tunnel = nl_attr_get(set);
+ tunnel_len = nl_attr_get_size(set);
+
+ action->type = TC_ACT_ENCAP;
+ action->encap.id_present = false;
+ flower->action_count++;
+ NL_ATTR_FOR_EACH_UNSAFE(tun_attr, tun_left, tunnel, tunnel_len) {
+ switch (nl_attr_type(tun_attr)) {
+ case OVS_TUNNEL_KEY_ATTR_ID: {
+ action->encap.id = nl_attr_get_be64(tun_attr);
+ action->encap.id_present = true;
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: {
+ action->encap.ipv4.ipv4_src = nl_attr_get_be32(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV4_DST: {
+ action->encap.ipv4.ipv4_dst = nl_attr_get_be32(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TOS: {
+ action->encap.tos = nl_attr_get_u8(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TTL: {
+ action->encap.ttl = nl_attr_get_u8(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
+ action->encap.ipv6.ipv6_src =
+ nl_attr_get_in6_addr(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
+ action->encap.ipv6.ipv6_dst =
+ nl_attr_get_in6_addr(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TP_SRC: {
+ action->encap.tp_src = nl_attr_get_be16(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_TP_DST: {
+ action->encap.tp_dst = nl_attr_get_be16(tun_attr);
+ }
+ break;
+ case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: {
+ memcpy(action->encap.data.opts.gnv, nl_attr_get(tun_attr),
+ nl_attr_get_size(tun_attr));
+ action->encap.data.present.len = nl_attr_get_size(tun_attr);
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_key_and_mask(struct match *match)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
+ const struct flow *key = &match->flow;
+ struct flow *mask = &match->wc.masks;
+
+ if (mask->pkt_mark) {
+ VLOG_DBG_RL(&rl, "offloading attribute pkt_mark isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->recirc_id && key->recirc_id) {
+ VLOG_DBG_RL(&rl, "offloading attribute recirc_id isn't supported");
+ return EOPNOTSUPP;
+ }
+ mask->recirc_id = 0;
+
+ if (mask->dp_hash) {
+ VLOG_DBG_RL(&rl, "offloading attribute dp_hash isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->conj_id) {
+ VLOG_DBG_RL(&rl, "offloading attribute conj_id isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->skb_priority) {
+ VLOG_DBG_RL(&rl, "offloading attribute skb_priority isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->actset_output) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute actset_output isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->ct_state) {
+ VLOG_DBG_RL(&rl, "offloading attribute ct_state isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->ct_zone) {
+ VLOG_DBG_RL(&rl, "offloading attribute ct_zone isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->ct_mark) {
+ VLOG_DBG_RL(&rl, "offloading attribute ct_mark isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->packet_type && key->packet_type) {
+ VLOG_DBG_RL(&rl, "offloading attribute packet_type isn't supported");
+ return EOPNOTSUPP;
+ }
+ mask->packet_type = 0;
+
+ if (!ovs_u128_is_zero(mask->ct_label)) {
+ VLOG_DBG_RL(&rl, "offloading attribute ct_label isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ for (int i = 0; i < FLOW_N_REGS; i++) {
+ if (mask->regs[i]) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute regs[%d] isn't supported", i);
+ return EOPNOTSUPP;
+ }
+ }
+
+ if (mask->metadata) {
+ VLOG_DBG_RL(&rl, "offloading attribute metadata isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (mask->nw_tos) {
+ VLOG_DBG_RL(&rl, "offloading attribute nw_tos isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ for (int i = 1; i < FLOW_MAX_MPLS_LABELS; i++) {
+ if (mask->mpls_lse[i]) {
+ VLOG_DBG_RL(&rl, "offloading multiple mpls_lses isn't supported");
+ return EOPNOTSUPP;
+ }
+ }
+
+ if (key->dl_type == htons(ETH_TYPE_IP) &&
+ key->nw_proto == IPPROTO_ICMP) {
+ if (mask->tp_src) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute icmp_type isn't supported");
+ return EOPNOTSUPP;
+ }
+ if (mask->tp_dst) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute icmp_code isn't supported");
+ return EOPNOTSUPP;
+ }
+ } else if (key->dl_type == htons(ETH_TYPE_IP) &&
+ key->nw_proto == IPPROTO_IGMP) {
+ if (mask->tp_src) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute igmp_type isn't supported");
+ return EOPNOTSUPP;
+ }
+ if (mask->tp_dst) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute igmp_code isn't supported");
+ return EOPNOTSUPP;
+ }
+ } else if (key->dl_type == htons(ETH_TYPE_IPV6) &&
+ key->nw_proto == IPPROTO_ICMPV6) {
+ if (mask->tp_src) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute icmpv6_type isn't supported");
+ return EOPNOTSUPP;
+ }
+ if (mask->tp_dst) {
+ VLOG_DBG_RL(&rl,
+ "offloading attribute icmpv6_code isn't supported");
+ return EOPNOTSUPP;
+ }
+ } else if (key->dl_type == htons(OFP_DL_TYPE_NOT_ETH_TYPE)) {
+ VLOG_DBG_RL(&rl,
+ "offloading of non-ethernet packets isn't supported");
+ return EOPNOTSUPP;
+ }
+
+ if (!is_all_zeros(mask, sizeof *mask)) {
+ VLOG_DBG_RL(&rl, "offloading isn't supported, unknown attribute");
+ return EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl,
+ const struct flow_tnl *tnl_mask)
+{
+ struct geneve_opt *opt, *opt_mask;
+ int len, cnt = 0;
+
+ memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv,
+ tnl->metadata.present.len);
+ flower->key.tunnel.metadata.present.len = tnl->metadata.present.len;
+
+ memcpy(flower->mask.tunnel.metadata.opts.gnv, tnl_mask->metadata.opts.gnv,
+ tnl->metadata.present.len);
+
+ len = flower->key.tunnel.metadata.present.len;
+ while (len) {
+ opt = &flower->key.tunnel.metadata.opts.gnv[cnt];
+ opt_mask = &flower->mask.tunnel.metadata.opts.gnv[cnt];
+
+ opt_mask->length = opt->length;
+
+ cnt += sizeof(struct geneve_opt) / 4 + opt->length;
+ len -= sizeof(struct geneve_opt) + opt->length * 4;
+ }
+
+ flower->mask.tunnel.metadata.present.len = tnl->metadata.present.len;
+}
+
+static int
+netdev_tc_flow_put(struct netdev *netdev, struct match *match,
+ struct nlattr *actions, size_t actions_len,
+ const ovs_u128 *ufid, struct offload_info *info,
+ struct dpif_flow_stats *stats OVS_UNUSED)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
+ enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
+ struct tc_flower flower;
+ const struct flow *key = &match->flow;
+ struct flow *mask = &match->wc.masks;
+ const struct flow_tnl *tnl = &match->flow.tunnel;
+ const struct flow_tnl *tnl_mask = &mask->tunnel;
+ struct tc_action *action;
+ uint32_t block_id = 0;
+ struct nlattr *nla;
+ size_t left;
+ int prio = 0;
+ int handle;
+ int ifindex;
+ int err;
+
+ ifindex = netdev_get_ifindex(netdev);
+ if (ifindex < 0) {
+ VLOG_ERR_RL(&error_rl, "flow_put: failed to get ifindex for %s: %s",
+ netdev_get_name(netdev), ovs_strerror(-ifindex));
+ return -ifindex;
+ }
+
+ memset(&flower, 0, sizeof flower);
+
+ if (flow_tnl_dst_is_set(&key->tunnel)) {
+ VLOG_DBG_RL(&rl,
+ "tunnel: id %#" PRIx64 " src " IP_FMT
+ " dst " IP_FMT " tp_src %d tp_dst %d",
+ ntohll(tnl->tun_id),
+ IP_ARGS(tnl->ip_src), IP_ARGS(tnl->ip_dst),
+ ntohs(tnl->tp_src), ntohs(tnl->tp_dst));
+ flower.key.tunnel.id = tnl->tun_id;
+ flower.key.tunnel.ipv4.ipv4_src = tnl->ip_src;
+ flower.key.tunnel.ipv4.ipv4_dst = tnl->ip_dst;
+ flower.key.tunnel.ipv6.ipv6_src = tnl->ipv6_src;
+ flower.key.tunnel.ipv6.ipv6_dst = tnl->ipv6_dst;
+ flower.key.tunnel.tos = tnl->ip_tos;
+ flower.key.tunnel.ttl = tnl->ip_ttl;
+ flower.key.tunnel.tp_src = tnl->tp_src;
+ flower.key.tunnel.tp_dst = tnl->tp_dst;
+ flower.mask.tunnel.tos = tnl_mask->ip_tos;
+ flower.mask.tunnel.ttl = tnl_mask->ip_ttl;
+ flower.mask.tunnel.id = (tnl->flags & FLOW_TNL_F_KEY) ? tnl_mask->tun_id : 0;
+ flower_match_to_tun_opt(&flower, tnl, tnl_mask);
+ flower.tunnel = true;
+ }
+ memset(&mask->tunnel, 0, sizeof mask->tunnel);
+
+ flower.key.eth_type = key->dl_type;
+ flower.mask.eth_type = mask->dl_type;
+ if (mask->mpls_lse[0]) {
+ flower.key.mpls_lse = key->mpls_lse[0];
+ flower.mask.mpls_lse = mask->mpls_lse[0];
+ flower.key.encap_eth_type[0] = flower.key.eth_type;
+ }
+ mask->mpls_lse[0] = 0;
+
+ if (eth_type_vlan(key->vlans[0].tpid)) {
+ flower.key.encap_eth_type[0] = flower.key.eth_type;
+ flower.key.eth_type = key->vlans[0].tpid;
+ }
+ if (mask->vlans[0].tci) {
+ ovs_be16 vid_mask = mask->vlans[0].tci & htons(VLAN_VID_MASK);
+ ovs_be16 pcp_mask = mask->vlans[0].tci & htons(VLAN_PCP_MASK);
+ ovs_be16 cfi = mask->vlans[0].tci & htons(VLAN_CFI);
+
+ if (cfi && key->vlans[0].tci & htons(VLAN_CFI)
+ && (!vid_mask || vid_mask == htons(VLAN_VID_MASK))
+ && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK))
+ && (vid_mask || pcp_mask)) {
+ if (vid_mask) {
+ flower.key.vlan_id[0] = vlan_tci_to_vid(key->vlans[0].tci);
+ flower.mask.vlan_id[0] = vlan_tci_to_vid(mask->vlans[0].tci);
+ VLOG_DBG_RL(&rl, "vlan_id[0]: %d\n", flower.key.vlan_id[0]);
+ }
+ if (pcp_mask) {
+ flower.key.vlan_prio[0] = vlan_tci_to_pcp(key->vlans[0].tci);
+ flower.mask.vlan_prio[0] = vlan_tci_to_pcp(mask->vlans[0].tci);
+ VLOG_DBG_RL(&rl, "vlan_prio[0]: %d\n",
+ flower.key.vlan_prio[0]);
+ }
+ } else if (mask->vlans[0].tci == htons(0xffff) &&
+ ntohs(key->vlans[0].tci) == 0) {
+ /* exact && no vlan */
+ } else {
+ /* partial mask */
+ return EOPNOTSUPP;
+ }
+ }
+
+ if (eth_type_vlan(key->vlans[1].tpid)) {
+ flower.key.encap_eth_type[1] = flower.key.encap_eth_type[0];
+ flower.key.encap_eth_type[0] = key->vlans[1].tpid;
+ }
+ if (mask->vlans[1].tci) {
+ ovs_be16 vid_mask = mask->vlans[1].tci & htons(VLAN_VID_MASK);
+ ovs_be16 pcp_mask = mask->vlans[1].tci & htons(VLAN_PCP_MASK);
+ ovs_be16 cfi = mask->vlans[1].tci & htons(VLAN_CFI);
+
+ if (cfi && key->vlans[1].tci & htons(VLAN_CFI)
+ && (!vid_mask || vid_mask == htons(VLAN_VID_MASK))
+ && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK))
+ && (vid_mask || pcp_mask)) {
+ if (vid_mask) {
+ flower.key.vlan_id[1] = vlan_tci_to_vid(key->vlans[1].tci);
+ flower.mask.vlan_id[1] = vlan_tci_to_vid(mask->vlans[1].tci);
+ VLOG_DBG_RL(&rl, "vlan_id[1]: %d", flower.key.vlan_id[1]);
+ }
+ if (pcp_mask) {
+ flower.key.vlan_prio[1] = vlan_tci_to_pcp(key->vlans[1].tci);
+ flower.mask.vlan_prio[1] = vlan_tci_to_pcp(mask->vlans[1].tci);
+ VLOG_DBG_RL(&rl, "vlan_prio[1]: %d", flower.key.vlan_prio[1]);
+ }
+ } else if (mask->vlans[1].tci == htons(0xffff) &&
+ ntohs(key->vlans[1].tci) == 0) {
+ /* exact && no vlan */
+ } else {
+ /* partial mask */
+ return EOPNOTSUPP;
+ }
+ }
+ memset(mask->vlans, 0, sizeof mask->vlans);
+
+ flower.key.dst_mac = key->dl_dst;
+ flower.mask.dst_mac = mask->dl_dst;
+ flower.key.src_mac = key->dl_src;
+ flower.mask.src_mac = mask->dl_src;
+ memset(&mask->dl_dst, 0, sizeof mask->dl_dst);
+ memset(&mask->dl_src, 0, sizeof mask->dl_src);
+ mask->dl_type = 0;
+ mask->in_port.odp_port = 0;
+
+ if (is_ip_any(key)) {
+ flower.key.ip_proto = key->nw_proto;
+ flower.mask.ip_proto = mask->nw_proto;
+ mask->nw_proto = 0;
+ flower.key.ip_tos = key->nw_tos;
+ flower.mask.ip_tos = mask->nw_tos;
+ mask->nw_tos = 0;
+ flower.key.ip_ttl = key->nw_ttl;
+ flower.mask.ip_ttl = mask->nw_ttl;
+ mask->nw_ttl = 0;
+
+ if (mask->nw_frag & FLOW_NW_FRAG_ANY) {
+ flower.mask.flags |= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT;
+
+ if (key->nw_frag & FLOW_NW_FRAG_ANY) {
+ flower.key.flags |= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT;
+
+ if (mask->nw_frag & FLOW_NW_FRAG_LATER) {
+ flower.mask.flags |= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST;
+
+ if (!(key->nw_frag & FLOW_NW_FRAG_LATER)) {
+ flower.key.flags |= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST;
+ }
+ }
+ }
+
+ mask->nw_frag = 0;
+ }
+
+ if (key->nw_proto == IPPROTO_TCP) {
+ flower.key.tcp_dst = key->tp_dst;
+ flower.mask.tcp_dst = mask->tp_dst;
+ flower.key.tcp_src = key->tp_src;
+ flower.mask.tcp_src = mask->tp_src;
+ flower.key.tcp_flags = key->tcp_flags;
+ flower.mask.tcp_flags = mask->tcp_flags;
+ mask->tp_src = 0;
+ mask->tp_dst = 0;
+ mask->tcp_flags = 0;
+ } else if (key->nw_proto == IPPROTO_UDP) {
+ flower.key.udp_dst = key->tp_dst;
+ flower.mask.udp_dst = mask->tp_dst;
+ flower.key.udp_src = key->tp_src;
+ flower.mask.udp_src = mask->tp_src;
+ mask->tp_src = 0;
+ mask->tp_dst = 0;
+ } else if (key->nw_proto == IPPROTO_SCTP) {
+ flower.key.sctp_dst = key->tp_dst;
+ flower.mask.sctp_dst = mask->tp_dst;
+ flower.key.sctp_src = key->tp_src;
+ flower.mask.sctp_src = mask->tp_src;
+ mask->tp_src = 0;
+ mask->tp_dst = 0;
+ }
+
+ if (key->dl_type == htons(ETH_P_IP)) {
+ flower.key.ipv4.ipv4_src = key->nw_src;
+ flower.mask.ipv4.ipv4_src = mask->nw_src;
+ flower.key.ipv4.ipv4_dst = key->nw_dst;
+ flower.mask.ipv4.ipv4_dst = mask->nw_dst;
+ mask->nw_src = 0;
+ mask->nw_dst = 0;
+ } else if (key->dl_type == htons(ETH_P_IPV6)) {
+ flower.key.ipv6.ipv6_src = key->ipv6_src;
+ flower.mask.ipv6.ipv6_src = mask->ipv6_src;
+ flower.key.ipv6.ipv6_dst = key->ipv6_dst;
+ flower.mask.ipv6.ipv6_dst = mask->ipv6_dst;
+ memset(&mask->ipv6_src, 0, sizeof mask->ipv6_src);
+ memset(&mask->ipv6_dst, 0, sizeof mask->ipv6_dst);
+ }
+ }
+
+ err = test_key_and_mask(match);
+ if (err) {
+ return err;
+ }
+
+ NL_ATTR_FOR_EACH(nla, left, actions, actions_len) {
+ if (flower.action_count >= TCA_ACT_MAX_PRIO) {
+ VLOG_DBG_RL(&rl, "Can only support %d actions", flower.action_count);
+ return EOPNOTSUPP;
+ }
+ action = &flower.actions[flower.action_count];
+ if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
+ odp_port_t port = nl_attr_get_odp_port(nla);
+ struct netdev *outdev = netdev_ports_get(port, info->dpif_class);
+
+ action->out.ifindex_out = netdev_get_ifindex(outdev);
+ action->out.ingress = is_internal_port(netdev_get_type(outdev));
+ action->type = TC_ACT_OUTPUT;
+ flower.action_count++;
+ netdev_close(outdev);
+ } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_VLAN) {
+ const struct ovs_action_push_vlan *vlan_push = nl_attr_get(nla);
+
+ action->vlan.vlan_push_tpid = vlan_push->vlan_tpid;
+ action->vlan.vlan_push_id = vlan_tci_to_vid(vlan_push->vlan_tci);
+ action->vlan.vlan_push_prio = vlan_tci_to_pcp(vlan_push->vlan_tci);
+ action->type = TC_ACT_VLAN_PUSH;
+ flower.action_count++;
+ } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) {
+ action->type = TC_ACT_VLAN_POP;
+ flower.action_count++;
+ } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET) {
+ const struct nlattr *set = nl_attr_get(nla);
+ const size_t set_len = nl_attr_get_size(nla);
+
+ err = parse_put_flow_set_action(&flower, action, set, set_len);
+ if (err) {
+ return err;
+ }
+ if (action->type == TC_ACT_ENCAP) {
+ action->encap.tp_dst = info->tp_dst_port;
+ action->encap.no_csum = !info->tunnel_csum_on;
+ }
+ } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) {
+ const struct nlattr *set = nl_attr_get(nla);
+ const size_t set_len = nl_attr_get_size(nla);
+
+ err = parse_put_flow_set_masked_action(&flower, action, set,
+ set_len, true);
+ if (err) {
+ return err;
+ }
+ } else {
+ VLOG_DBG_RL(&rl, "unsupported put action type: %d",
+ nl_attr_type(nla));
+ return EOPNOTSUPP;
+ }
+ }
+
+ block_id = get_block_id_from_netdev(netdev);
+ handle = get_ufid_tc_mapping(ufid, &prio, NULL);
+ if (handle && prio) {
+ VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d", handle, prio);
+ del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid,
+ hook);
+ }
+
+ if (!prio) {
+ prio = get_prio_for_tc_flower(&flower);
+ if (prio == 0) {
+ VLOG_ERR_RL(&rl, "couldn't get tc prio: %s", ovs_strerror(ENOSPC));
+ return ENOSPC;
+ }
+ }
+
+ flower.act_cookie.data = ufid;
+ flower.act_cookie.len = sizeof *ufid;
+
+ err = tc_replace_flower(ifindex, prio, handle, &flower, block_id, hook);
+ if (!err) {
+ add_ufid_tc_mapping(ufid, flower.prio, flower.handle, netdev, ifindex);
+ }
+
+ return err;
+}
+
+static int
+netdev_tc_flow_get(struct netdev *netdev OVS_UNUSED,
+ struct match *match,
+ struct nlattr **actions,
+ const ovs_u128 *ufid,
+ struct dpif_flow_stats *stats,
+ struct dpif_flow_attrs *attrs,
+ struct ofpbuf *buf)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
+ struct netdev *dev;
+ struct tc_flower flower;
+ enum tc_qdisc_hook hook;
+ uint32_t block_id = 0;
+ odp_port_t in_port;
+ int prio = 0;
+ int ifindex;
+ int handle;
+ int err;
+
+ handle = get_ufid_tc_mapping(ufid, &prio, &dev);
+ if (!handle) {
+ return ENOENT;
+ }
+
+ hook = get_tc_qdisc_hook(dev);
+
+ ifindex = netdev_get_ifindex(dev);
+ if (ifindex < 0) {
+ VLOG_ERR_RL(&error_rl, "flow_get: failed to get ifindex for %s: %s",
+ netdev_get_name(dev), ovs_strerror(-ifindex));
+ netdev_close(dev);
+ return -ifindex;
+ }
+
+ block_id = get_block_id_from_netdev(dev);
+ VLOG_DBG_RL(&rl, "flow get (dev %s prio %d handle %d block_id %d)",
+ netdev_get_name(dev), prio, handle, block_id);
+ err = tc_get_flower(ifindex, prio, handle, &flower, block_id, hook);
+ netdev_close(dev);
+ if (err) {
+ VLOG_ERR_RL(&error_rl, "flow get failed (dev %s prio %d handle %d): %s",
+ netdev_get_name(dev), prio, handle, ovs_strerror(err));
+ return err;
+ }
+
+ in_port = netdev_ifindex_to_odp_port(ifindex);
+ parse_tc_flower_to_match(&flower, match, actions, stats, attrs, buf);
+
+ match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
+ match->flow.in_port.odp_port = in_port;
+
+ return 0;
+}
+
+static int
+netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
+ const ovs_u128 *ufid,
+ struct dpif_flow_stats *stats)
+{
+ struct tc_flower flower;
+ enum tc_qdisc_hook hook;
+ uint32_t block_id = 0;
+ struct netdev *dev;
+ int prio = 0;
+ int ifindex;
+ int handle;
+ int error;
+
+ handle = get_ufid_tc_mapping(ufid, &prio, &dev);
+ if (!handle) {
+ return ENOENT;
+ }
+
+ hook = get_tc_qdisc_hook(dev);
+
+ ifindex = netdev_get_ifindex(dev);
+ if (ifindex < 0) {
+ VLOG_ERR_RL(&error_rl, "flow_del: failed to get ifindex for %s: %s",
+ netdev_get_name(dev), ovs_strerror(-ifindex));
+ netdev_close(dev);
+ return -ifindex;
+ }
+
+ block_id = get_block_id_from_netdev(dev);
+
+ if (stats) {
+ memset(stats, 0, sizeof *stats);
+ if (!tc_get_flower(ifindex, prio, handle, &flower, block_id, hook)) {
+ stats->n_packets = get_32aligned_u64(&flower.stats.n_packets);
+ stats->n_bytes = get_32aligned_u64(&flower.stats.n_bytes);
+ stats->used = flower.lastused;
+ }
+ }
+
+ error = del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid,
+ hook);
+
+ netdev_close(dev);
+
+ return error;
+}
+
+static void
+probe_multi_mask_per_prio(int ifindex)
+{
+ struct tc_flower flower;
+ int block_id = 0;
+ int error;
+
+ error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS);
+ if (error) {
+ return;
+ }
+
+ memset(&flower, 0, sizeof flower);
+
+ flower.key.eth_type = htons(ETH_P_IP);
+ flower.mask.eth_type = OVS_BE16_MAX;
+ memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac);
+ memset(&flower.mask.dst_mac, 0xff, sizeof flower.mask.dst_mac);
+
+ error = tc_replace_flower(ifindex, 1, 1, &flower, block_id, TC_INGRESS);
+ if (error) {
+ goto out;
+ }
+
+ memset(&flower.key.src_mac, 0x11, sizeof flower.key.src_mac);
+ memset(&flower.mask.src_mac, 0xff, sizeof flower.mask.src_mac);
+
+ error = tc_replace_flower(ifindex, 1, 2, &flower, block_id, TC_INGRESS);
+ tc_del_filter(ifindex, 1, 1, block_id, TC_INGRESS);
+
+ if (error) {
+ goto out;
+ }
+
+ tc_del_filter(ifindex, 1, 2, block_id, TC_INGRESS);
+
+ multi_mask_per_prio = true;
+ VLOG_INFO("probe tc: multiple masks on single tc prio is supported.");
+
+out:
+ tc_add_del_qdisc(ifindex, false, block_id, TC_INGRESS);
+}
+
+static void
+probe_tc_block_support(int ifindex)
+{
+ struct tc_flower flower;
+ uint32_t block_id = 1;
+ int error;
+
+ error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS);
+ if (error) {
+ return;
+ }
+
+ memset(&flower, 0, sizeof flower);
+
+ flower.key.eth_type = htons(ETH_P_IP);
+ flower.mask.eth_type = OVS_BE16_MAX;
+ memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac);
+ memset(&flower.mask.dst_mac, 0xff, sizeof flower.mask.dst_mac);
+
+ error = tc_replace_flower(ifindex, 1, 1, &flower, block_id, TC_INGRESS);
+
+ tc_add_del_qdisc(ifindex, false, block_id, TC_INGRESS);
+
+ if (!error) {
+ block_support = true;
+ VLOG_INFO("probe tc: block offload is supported.");
+ }
+}
+
+static int
+netdev_tc_init_flow_api(struct netdev *netdev)
+{
+ static struct ovsthread_once multi_mask_once = OVSTHREAD_ONCE_INITIALIZER;
+ static struct ovsthread_once block_once = OVSTHREAD_ONCE_INITIALIZER;
+ enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
+ uint32_t block_id = 0;
+ int ifindex;
+ int error;
+
+ ifindex = netdev_get_ifindex(netdev);
+ if (ifindex < 0) {
+ VLOG_INFO("init: failed to get ifindex for %s: %s",
+ netdev_get_name(netdev), ovs_strerror(-ifindex));
+ return -ifindex;
+ }
+
+ /* make sure there is no ingress qdisc */
+ tc_add_del_qdisc(ifindex, false, 0, TC_INGRESS);
+
+ if (ovsthread_once_start(&block_once)) {
+ probe_tc_block_support(ifindex);
+ ovsthread_once_done(&block_once);
+ }
+
+ if (ovsthread_once_start(&multi_mask_once)) {
+ probe_multi_mask_per_prio(ifindex);
+ ovsthread_once_done(&multi_mask_once);
+ }
+
+ block_id = get_block_id_from_netdev(netdev);
+ error = tc_add_del_qdisc(ifindex, true, block_id, hook);
+
+ if (error && error != EEXIST) {
+ VLOG_INFO("failed adding ingress qdisc required for offloading: %s",
+ ovs_strerror(error));
+ return error;
+ }
+
+ VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev));
+
+ return 0;
+}
+
+const struct netdev_flow_api netdev_offload_tc = {
+ .type = "linux_tc",
+ .flow_flush = netdev_tc_flow_flush,
+ .flow_dump_create = netdev_tc_flow_dump_create,
+ .flow_dump_destroy = netdev_tc_flow_dump_destroy,
+ .flow_dump_next = netdev_tc_flow_dump_next,
+ .flow_put = netdev_tc_flow_put,
+ .flow_get = netdev_tc_flow_get,
+ .flow_del = netdev_tc_flow_del,
+ .init_flow_api = netdev_tc_init_flow_api,
+};
+++ /dev/null
-/*
- * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
- * Copyright (c) 2019 Mellanox Technologies, Ltd.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <config.h>
-
-#include <rte_flow.h>
-
-#include "cmap.h"
-#include "dpif-netdev.h"
-#include "netdev-offload-provider.h"
-#include "netdev-provider.h"
-#include "openvswitch/match.h"
-#include "openvswitch/vlog.h"
-#include "packets.h"
-#include "uuid.h"
-
-VLOG_DEFINE_THIS_MODULE(netdev_rte_offloads);
-
-/* Thread-safety
- * =============
- *
- * Below API is NOT thread safe in following terms:
- *
- * - The caller must be sure that none of these functions will be called
- * simultaneously. Even for different 'netdev's.
- *
- * - The caller must be sure that 'netdev' will not be destructed/deallocated.
- *
- * - The caller must be sure that 'netdev' configuration will not be changed.
- * For example, simultaneous call of 'netdev_reconfigure()' for the same
- * 'netdev' is forbidden.
- *
- * For current implementation all above restrictions could be fulfilled by
- * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
-
-/*
- * A mapping from ufid to dpdk rte_flow.
- */
-static struct cmap ufid_to_rte_flow = CMAP_INITIALIZER;
-
-struct ufid_to_rte_flow_data {
- struct cmap_node node;
- ovs_u128 ufid;
- struct rte_flow *rte_flow;
-};
-
-/* Find rte_flow with @ufid. */
-static struct rte_flow *
-ufid_to_rte_flow_find(const ovs_u128 *ufid)
-{
- size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
- struct ufid_to_rte_flow_data *data;
-
- CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
- if (ovs_u128_equals(*ufid, data->ufid)) {
- return data->rte_flow;
- }
- }
-
- return NULL;
-}
-
-static inline void
-ufid_to_rte_flow_associate(const ovs_u128 *ufid,
- struct rte_flow *rte_flow)
-{
- size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
- struct ufid_to_rte_flow_data *data = xzalloc(sizeof *data);
-
- /*
- * We should not simply overwrite an existing rte flow.
- * We should have deleted it first before re-adding it.
- * Thus, if following assert triggers, something is wrong:
- * the rte_flow is not destroyed.
- */
- ovs_assert(ufid_to_rte_flow_find(ufid) == NULL);
-
- data->ufid = *ufid;
- data->rte_flow = rte_flow;
-
- cmap_insert(&ufid_to_rte_flow,
- CONST_CAST(struct cmap_node *, &data->node), hash);
-}
-
-static inline void
-ufid_to_rte_flow_disassociate(const ovs_u128 *ufid)
-{
- size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
- struct ufid_to_rte_flow_data *data;
-
- CMAP_FOR_EACH_WITH_HASH (data, node, hash, &ufid_to_rte_flow) {
- if (ovs_u128_equals(*ufid, data->ufid)) {
- cmap_remove(&ufid_to_rte_flow,
- CONST_CAST(struct cmap_node *, &data->node), hash);
- ovsrcu_postpone(free, data);
- return;
- }
- }
-
- VLOG_WARN("ufid "UUID_FMT" is not associated with an rte flow\n",
- UUID_ARGS((struct uuid *) ufid));
-}
-
-/*
- * To avoid individual xrealloc calls for each new element, a 'curent_max'
- * is used to keep track of current allocated number of elements. Starts
- * by 8 and doubles on each xrealloc call.
- */
-struct flow_patterns {
- struct rte_flow_item *items;
- int cnt;
- int current_max;
-};
-
-struct flow_actions {
- struct rte_flow_action *actions;
- int cnt;
- int current_max;
-};
-
-static void
-dump_flow_pattern(struct rte_flow_item *item)
-{
- struct ds s;
-
- if (!VLOG_IS_DBG_ENABLED() || item->type == RTE_FLOW_ITEM_TYPE_END) {
- return;
- }
-
- ds_init(&s);
-
- if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
- const struct rte_flow_item_eth *eth_spec = item->spec;
- const struct rte_flow_item_eth *eth_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow eth pattern:\n");
- if (eth_spec) {
- ds_put_format(&s,
- " Spec: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
- "type=0x%04" PRIx16"\n",
- ETH_ADDR_BYTES_ARGS(eth_spec->src.addr_bytes),
- ETH_ADDR_BYTES_ARGS(eth_spec->dst.addr_bytes),
- ntohs(eth_spec->type));
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
- if (eth_mask) {
- ds_put_format(&s,
- " Mask: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
- "type=0x%04"PRIx16"\n",
- ETH_ADDR_BYTES_ARGS(eth_mask->src.addr_bytes),
- ETH_ADDR_BYTES_ARGS(eth_mask->dst.addr_bytes),
- ntohs(eth_mask->type));
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- const struct rte_flow_item_vlan *vlan_spec = item->spec;
- const struct rte_flow_item_vlan *vlan_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow vlan pattern:\n");
- if (vlan_spec) {
- ds_put_format(&s,
- " Spec: inner_type=0x%"PRIx16", tci=0x%"PRIx16"\n",
- ntohs(vlan_spec->inner_type), ntohs(vlan_spec->tci));
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
-
- if (vlan_mask) {
- ds_put_format(&s,
- " Mask: inner_type=0x%"PRIx16", tci=0x%"PRIx16"\n",
- ntohs(vlan_mask->inner_type), ntohs(vlan_mask->tci));
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
- const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
- const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow ipv4 pattern:\n");
- if (ipv4_spec) {
- ds_put_format(&s,
- " Spec: tos=0x%"PRIx8", ttl=%"PRIx8
- ", proto=0x%"PRIx8
- ", src="IP_FMT", dst="IP_FMT"\n",
- ipv4_spec->hdr.type_of_service,
- ipv4_spec->hdr.time_to_live,
- ipv4_spec->hdr.next_proto_id,
- IP_ARGS(ipv4_spec->hdr.src_addr),
- IP_ARGS(ipv4_spec->hdr.dst_addr));
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
- if (ipv4_mask) {
- ds_put_format(&s,
- " Mask: tos=0x%"PRIx8", ttl=%"PRIx8
- ", proto=0x%"PRIx8
- ", src="IP_FMT", dst="IP_FMT"\n",
- ipv4_mask->hdr.type_of_service,
- ipv4_mask->hdr.time_to_live,
- ipv4_mask->hdr.next_proto_id,
- IP_ARGS(ipv4_mask->hdr.src_addr),
- IP_ARGS(ipv4_mask->hdr.dst_addr));
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- const struct rte_flow_item_udp *udp_spec = item->spec;
- const struct rte_flow_item_udp *udp_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow udp pattern:\n");
- if (udp_spec) {
- ds_put_format(&s,
- " Spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
- ntohs(udp_spec->hdr.src_port),
- ntohs(udp_spec->hdr.dst_port));
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
- if (udp_mask) {
- ds_put_format(&s,
- " Mask: src_port=0x%"PRIx16
- ", dst_port=0x%"PRIx16"\n",
- ntohs(udp_mask->hdr.src_port),
- ntohs(udp_mask->hdr.dst_port));
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
- const struct rte_flow_item_sctp *sctp_spec = item->spec;
- const struct rte_flow_item_sctp *sctp_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow sctp pattern:\n");
- if (sctp_spec) {
- ds_put_format(&s,
- " Spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
- ntohs(sctp_spec->hdr.src_port),
- ntohs(sctp_spec->hdr.dst_port));
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
- if (sctp_mask) {
- ds_put_format(&s,
- " Mask: src_port=0x%"PRIx16
- ", dst_port=0x%"PRIx16"\n",
- ntohs(sctp_mask->hdr.src_port),
- ntohs(sctp_mask->hdr.dst_port));
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
- const struct rte_flow_item_icmp *icmp_spec = item->spec;
- const struct rte_flow_item_icmp *icmp_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow icmp pattern:\n");
- if (icmp_spec) {
- ds_put_format(&s,
- " Spec: icmp_type=%"PRIu8", icmp_code=%"PRIu8"\n",
- icmp_spec->hdr.icmp_type,
- icmp_spec->hdr.icmp_code);
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
- if (icmp_mask) {
- ds_put_format(&s,
- " Mask: icmp_type=0x%"PRIx8
- ", icmp_code=0x%"PRIx8"\n",
- icmp_spec->hdr.icmp_type,
- icmp_spec->hdr.icmp_code);
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- const struct rte_flow_item_tcp *tcp_spec = item->spec;
- const struct rte_flow_item_tcp *tcp_mask = item->mask;
-
- ds_put_cstr(&s, "rte flow tcp pattern:\n");
- if (tcp_spec) {
- ds_put_format(&s,
- " Spec: src_port=%"PRIu16", dst_port=%"PRIu16
- ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
- ntohs(tcp_spec->hdr.src_port),
- ntohs(tcp_spec->hdr.dst_port),
- tcp_spec->hdr.data_off,
- tcp_spec->hdr.tcp_flags);
- } else {
- ds_put_cstr(&s, " Spec = null\n");
- }
- if (tcp_mask) {
- ds_put_format(&s,
- " Mask: src_port=%"PRIx16", dst_port=%"PRIx16
- ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
- ntohs(tcp_mask->hdr.src_port),
- ntohs(tcp_mask->hdr.dst_port),
- tcp_mask->hdr.data_off,
- tcp_mask->hdr.tcp_flags);
- } else {
- ds_put_cstr(&s, " Mask = null\n");
- }
- }
-
- VLOG_DBG("%s", ds_cstr(&s));
- ds_destroy(&s);
-}
-
-static void
-add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,
- const void *spec, const void *mask)
-{
- int cnt = patterns->cnt;
-
- if (cnt == 0) {
- patterns->current_max = 8;
- patterns->items = xcalloc(patterns->current_max,
- sizeof *patterns->items);
- } else if (cnt == patterns->current_max) {
- patterns->current_max *= 2;
- patterns->items = xrealloc(patterns->items, patterns->current_max *
- sizeof *patterns->items);
- }
-
- patterns->items[cnt].type = type;
- patterns->items[cnt].spec = spec;
- patterns->items[cnt].mask = mask;
- patterns->items[cnt].last = NULL;
- dump_flow_pattern(&patterns->items[cnt]);
- patterns->cnt++;
-}
-
-static void
-add_flow_action(struct flow_actions *actions, enum rte_flow_action_type type,
- const void *conf)
-{
- int cnt = actions->cnt;
-
- if (cnt == 0) {
- actions->current_max = 8;
- actions->actions = xcalloc(actions->current_max,
- sizeof *actions->actions);
- } else if (cnt == actions->current_max) {
- actions->current_max *= 2;
- actions->actions = xrealloc(actions->actions, actions->current_max *
- sizeof *actions->actions);
- }
-
- actions->actions[cnt].type = type;
- actions->actions[cnt].conf = conf;
- actions->cnt++;
-}
-
-struct action_rss_data {
- struct rte_flow_action_rss conf;
- uint16_t queue[0];
-};
-
-static struct action_rss_data *
-add_flow_rss_action(struct flow_actions *actions,
- struct netdev *netdev)
-{
- int i;
- struct action_rss_data *rss_data;
-
- rss_data = xmalloc(sizeof *rss_data +
- netdev_n_rxq(netdev) * sizeof rss_data->queue[0]);
- *rss_data = (struct action_rss_data) {
- .conf = (struct rte_flow_action_rss) {
- .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
- .level = 0,
- .types = 0,
- .queue_num = netdev_n_rxq(netdev),
- .queue = rss_data->queue,
- .key_len = 0,
- .key = NULL
- },
- };
-
- /* Override queue array with default. */
- for (i = 0; i < netdev_n_rxq(netdev); i++) {
- rss_data->queue[i] = i;
- }
-
- add_flow_action(actions, RTE_FLOW_ACTION_TYPE_RSS, &rss_data->conf);
-
- return rss_data;
-}
-
-static int
-netdev_rte_offloads_add_flow(struct netdev *netdev,
- const struct match *match,
- struct nlattr *nl_actions OVS_UNUSED,
- size_t actions_len OVS_UNUSED,
- const ovs_u128 *ufid,
- struct offload_info *info)
-{
- const struct rte_flow_attr flow_attr = {
- .group = 0,
- .priority = 0,
- .ingress = 1,
- .egress = 0
- };
- struct flow_patterns patterns = { .items = NULL, .cnt = 0 };
- struct flow_actions actions = { .actions = NULL, .cnt = 0 };
- struct rte_flow *flow;
- struct rte_flow_error error;
- uint8_t proto = 0;
- int ret = 0;
- struct flow_items {
- struct rte_flow_item_eth eth;
- struct rte_flow_item_vlan vlan;
- struct rte_flow_item_ipv4 ipv4;
- union {
- struct rte_flow_item_tcp tcp;
- struct rte_flow_item_udp udp;
- struct rte_flow_item_sctp sctp;
- struct rte_flow_item_icmp icmp;
- };
- } spec, mask;
-
- memset(&spec, 0, sizeof spec);
- memset(&mask, 0, sizeof mask);
-
- /* Eth */
- if (!eth_addr_is_zero(match->wc.masks.dl_src) ||
- !eth_addr_is_zero(match->wc.masks.dl_dst)) {
- memcpy(&spec.eth.dst, &match->flow.dl_dst, sizeof spec.eth.dst);
- memcpy(&spec.eth.src, &match->flow.dl_src, sizeof spec.eth.src);
- spec.eth.type = match->flow.dl_type;
-
- memcpy(&mask.eth.dst, &match->wc.masks.dl_dst, sizeof mask.eth.dst);
- memcpy(&mask.eth.src, &match->wc.masks.dl_src, sizeof mask.eth.src);
- mask.eth.type = match->wc.masks.dl_type;
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ETH,
- &spec.eth, &mask.eth);
- } else {
- /*
- * If user specifies a flow (like UDP flow) without L2 patterns,
- * OVS will at least set the dl_type. Normally, it's enough to
- * create an eth pattern just with it. Unluckily, some Intel's
- * NIC (such as XL710) doesn't support that. Below is a workaround,
- * which simply matches any L2 pkts.
- */
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ETH, NULL, NULL);
- }
-
- /* VLAN */
- if (match->wc.masks.vlans[0].tci && match->flow.vlans[0].tci) {
- spec.vlan.tci = match->flow.vlans[0].tci & ~htons(VLAN_CFI);
- mask.vlan.tci = match->wc.masks.vlans[0].tci & ~htons(VLAN_CFI);
-
- /* Match any protocols. */
- mask.vlan.inner_type = 0;
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_VLAN,
- &spec.vlan, &mask.vlan);
- }
-
- /* IP v4 */
- if (match->flow.dl_type == htons(ETH_TYPE_IP)) {
- spec.ipv4.hdr.type_of_service = match->flow.nw_tos;
- spec.ipv4.hdr.time_to_live = match->flow.nw_ttl;
- spec.ipv4.hdr.next_proto_id = match->flow.nw_proto;
- spec.ipv4.hdr.src_addr = match->flow.nw_src;
- spec.ipv4.hdr.dst_addr = match->flow.nw_dst;
-
- mask.ipv4.hdr.type_of_service = match->wc.masks.nw_tos;
- mask.ipv4.hdr.time_to_live = match->wc.masks.nw_ttl;
- mask.ipv4.hdr.next_proto_id = match->wc.masks.nw_proto;
- mask.ipv4.hdr.src_addr = match->wc.masks.nw_src;
- mask.ipv4.hdr.dst_addr = match->wc.masks.nw_dst;
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_IPV4,
- &spec.ipv4, &mask.ipv4);
-
- /* Save proto for L4 protocol setup. */
- proto = spec.ipv4.hdr.next_proto_id &
- mask.ipv4.hdr.next_proto_id;
- }
-
- if (proto != IPPROTO_ICMP && proto != IPPROTO_UDP &&
- proto != IPPROTO_SCTP && proto != IPPROTO_TCP &&
- (match->wc.masks.tp_src ||
- match->wc.masks.tp_dst ||
- match->wc.masks.tcp_flags)) {
- VLOG_DBG("L4 Protocol (%u) not supported", proto);
- ret = -1;
- goto out;
- }
-
- if ((match->wc.masks.tp_src && match->wc.masks.tp_src != OVS_BE16_MAX) ||
- (match->wc.masks.tp_dst && match->wc.masks.tp_dst != OVS_BE16_MAX)) {
- ret = -1;
- goto out;
- }
-
- switch (proto) {
- case IPPROTO_TCP:
- spec.tcp.hdr.src_port = match->flow.tp_src;
- spec.tcp.hdr.dst_port = match->flow.tp_dst;
- spec.tcp.hdr.data_off = ntohs(match->flow.tcp_flags) >> 8;
- spec.tcp.hdr.tcp_flags = ntohs(match->flow.tcp_flags) & 0xff;
-
- mask.tcp.hdr.src_port = match->wc.masks.tp_src;
- mask.tcp.hdr.dst_port = match->wc.masks.tp_dst;
- mask.tcp.hdr.data_off = ntohs(match->wc.masks.tcp_flags) >> 8;
- mask.tcp.hdr.tcp_flags = ntohs(match->wc.masks.tcp_flags) & 0xff;
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_TCP,
- &spec.tcp, &mask.tcp);
-
- /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
- mask.ipv4.hdr.next_proto_id = 0;
- break;
-
- case IPPROTO_UDP:
- spec.udp.hdr.src_port = match->flow.tp_src;
- spec.udp.hdr.dst_port = match->flow.tp_dst;
-
- mask.udp.hdr.src_port = match->wc.masks.tp_src;
- mask.udp.hdr.dst_port = match->wc.masks.tp_dst;
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_UDP,
- &spec.udp, &mask.udp);
-
- /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
- mask.ipv4.hdr.next_proto_id = 0;
- break;
-
- case IPPROTO_SCTP:
- spec.sctp.hdr.src_port = match->flow.tp_src;
- spec.sctp.hdr.dst_port = match->flow.tp_dst;
-
- mask.sctp.hdr.src_port = match->wc.masks.tp_src;
- mask.sctp.hdr.dst_port = match->wc.masks.tp_dst;
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_SCTP,
- &spec.sctp, &mask.sctp);
-
- /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
- mask.ipv4.hdr.next_proto_id = 0;
- break;
-
- case IPPROTO_ICMP:
- spec.icmp.hdr.icmp_type = (uint8_t) ntohs(match->flow.tp_src);
- spec.icmp.hdr.icmp_code = (uint8_t) ntohs(match->flow.tp_dst);
-
- mask.icmp.hdr.icmp_type = (uint8_t) ntohs(match->wc.masks.tp_src);
- mask.icmp.hdr.icmp_code = (uint8_t) ntohs(match->wc.masks.tp_dst);
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_ICMP,
- &spec.icmp, &mask.icmp);
-
- /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
- mask.ipv4.hdr.next_proto_id = 0;
- break;
- }
-
- add_flow_pattern(&patterns, RTE_FLOW_ITEM_TYPE_END, NULL, NULL);
-
- struct rte_flow_action_mark mark;
- struct action_rss_data *rss;
-
- mark.id = info->flow_mark;
- add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_MARK, &mark);
-
- rss = add_flow_rss_action(&actions, netdev);
- add_flow_action(&actions, RTE_FLOW_ACTION_TYPE_END, NULL);
-
- flow = netdev_dpdk_rte_flow_create(netdev, &flow_attr,
- patterns.items,
- actions.actions, &error);
-
- free(rss);
- if (!flow) {
- VLOG_ERR("%s: rte flow creat error: %u : message : %s\n",
- netdev_get_name(netdev), error.type, error.message);
- ret = -1;
- goto out;
- }
- ufid_to_rte_flow_associate(ufid, flow);
- VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT"\n",
- netdev_get_name(netdev), flow, UUID_ARGS((struct uuid *)ufid));
-
-out:
- free(patterns.items);
- free(actions.actions);
- return ret;
-}
-
-/*
- * Check if any unsupported flow patterns are specified.
- */
-static int
-netdev_rte_offloads_validate_flow(const struct match *match)
-{
- struct match match_zero_wc;
- const struct flow *masks = &match->wc.masks;
-
- /* Create a wc-zeroed version of flow. */
- match_init(&match_zero_wc, &match->flow, &match->wc);
-
- if (!is_all_zeros(&match_zero_wc.flow.tunnel,
- sizeof match_zero_wc.flow.tunnel)) {
- goto err;
- }
-
- if (masks->metadata || masks->skb_priority ||
- masks->pkt_mark || masks->dp_hash) {
- goto err;
- }
-
- /* recirc id must be zero. */
- if (match_zero_wc.flow.recirc_id) {
- goto err;
- }
-
- if (masks->ct_state || masks->ct_nw_proto ||
- masks->ct_zone || masks->ct_mark ||
- !ovs_u128_is_zero(masks->ct_label)) {
- goto err;
- }
-
- if (masks->conj_id || masks->actset_output) {
- goto err;
- }
-
- /* Unsupported L2. */
- if (!is_all_zeros(masks->mpls_lse, sizeof masks->mpls_lse)) {
- goto err;
- }
-
- /* Unsupported L3. */
- if (masks->ipv6_label || masks->ct_nw_src || masks->ct_nw_dst ||
- !is_all_zeros(&masks->ipv6_src, sizeof masks->ipv6_src) ||
- !is_all_zeros(&masks->ipv6_dst, sizeof masks->ipv6_dst) ||
- !is_all_zeros(&masks->ct_ipv6_src, sizeof masks->ct_ipv6_src) ||
- !is_all_zeros(&masks->ct_ipv6_dst, sizeof masks->ct_ipv6_dst) ||
- !is_all_zeros(&masks->nd_target, sizeof masks->nd_target) ||
- !is_all_zeros(&masks->nsh, sizeof masks->nsh) ||
- !is_all_zeros(&masks->arp_sha, sizeof masks->arp_sha) ||
- !is_all_zeros(&masks->arp_tha, sizeof masks->arp_tha)) {
- goto err;
- }
-
- /* If fragmented, then don't HW accelerate - for now. */
- if (match_zero_wc.flow.nw_frag) {
- goto err;
- }
-
- /* Unsupported L4. */
- if (masks->igmp_group_ip4 || masks->ct_tp_src || masks->ct_tp_dst) {
- goto err;
- }
-
- return 0;
-
-err:
- VLOG_ERR("cannot HW accelerate this flow due to unsupported protocols");
- return -1;
-}
-
-static int
-netdev_rte_offloads_destroy_flow(struct netdev *netdev,
- const ovs_u128 *ufid,
- struct rte_flow *rte_flow)
-{
- struct rte_flow_error error;
- int ret = netdev_dpdk_rte_flow_destroy(netdev, rte_flow, &error);
-
- if (ret == 0) {
- ufid_to_rte_flow_disassociate(ufid);
- VLOG_DBG("%s: removed rte flow %p associated with ufid " UUID_FMT "\n",
- netdev_get_name(netdev), rte_flow,
- UUID_ARGS((struct uuid *)ufid));
- } else {
- VLOG_ERR("%s: rte flow destroy error: %u : message : %s\n",
- netdev_get_name(netdev), error.type, error.message);
- }
-
- return ret;
-}
-
-static int
-netdev_rte_offloads_flow_put(struct netdev *netdev, struct match *match,
- struct nlattr *actions, size_t actions_len,
- const ovs_u128 *ufid, struct offload_info *info,
- struct dpif_flow_stats *stats OVS_UNUSED)
-{
- struct rte_flow *rte_flow;
- int ret;
-
- /*
- * If an old rte_flow exists, it means it's a flow modification.
- * Here destroy the old rte flow first before adding a new one.
- */
- rte_flow = ufid_to_rte_flow_find(ufid);
- if (rte_flow) {
- ret = netdev_rte_offloads_destroy_flow(netdev, ufid, rte_flow);
- if (ret < 0) {
- return ret;
- }
- }
-
- ret = netdev_rte_offloads_validate_flow(match);
- if (ret < 0) {
- return ret;
- }
-
- return netdev_rte_offloads_add_flow(netdev, match, actions,
- actions_len, ufid, info);
-}
-
-static int
-netdev_rte_offloads_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
- struct dpif_flow_stats *stats OVS_UNUSED)
-{
- struct rte_flow *rte_flow = ufid_to_rte_flow_find(ufid);
-
- if (!rte_flow) {
- return -1;
- }
-
- return netdev_rte_offloads_destroy_flow(netdev, ufid, rte_flow);
-}
-
-static int
-netdev_rte_offloads_init_flow_api(struct netdev *netdev)
-{
- return netdev_dpdk_flow_api_supported(netdev) ? 0 : EOPNOTSUPP;
-}
-
-const struct netdev_flow_api netdev_dpdk_offloads = {
- .type = "dpdk_flow_api",
- .flow_put = netdev_rte_offloads_flow_put,
- .flow_del = netdev_rte_offloads_flow_del,
- .init_flow_api = netdev_rte_offloads_init_flow_api,
-};
+++ /dev/null
-
-/*
- * Copyright (c) 2016 Mellanox Technologies, Ltd.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <config.h>
-
-#include <errno.h>
-#include <linux/if_ether.h>
-
-#include "dpif.h"
-#include "hash.h"
-#include "openvswitch/hmap.h"
-#include "openvswitch/match.h"
-#include "openvswitch/ofpbuf.h"
-#include "openvswitch/thread.h"
-#include "openvswitch/types.h"
-#include "openvswitch/util.h"
-#include "openvswitch/vlog.h"
-#include "netdev-linux.h"
-#include "netdev-offload-provider.h"
-#include "netdev-provider.h"
-#include "netlink.h"
-#include "netlink-socket.h"
-#include "odp-netlink.h"
-#include "odp-util.h"
-#include "tc.h"
-#include "unaligned.h"
-#include "util.h"
-
-VLOG_DEFINE_THIS_MODULE(netdev_tc_offloads);
-
-static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
-
-static struct hmap ufid_tc = HMAP_INITIALIZER(&ufid_tc);
-static bool multi_mask_per_prio = false;
-static bool block_support = false;
-
-struct netlink_field {
- int offset;
- int flower_offset;
- int size;
-};
-
-static bool
-is_internal_port(const char *type)
-{
- return !strcmp(type, "internal");
-}
-
-static enum tc_qdisc_hook
-get_tc_qdisc_hook(struct netdev *netdev)
-{
- return is_internal_port(netdev_get_type(netdev)) ? TC_EGRESS : TC_INGRESS;
-}
-
-static struct netlink_field set_flower_map[][4] = {
- [OVS_KEY_ATTR_IPV4] = {
- { offsetof(struct ovs_key_ipv4, ipv4_src),
- offsetof(struct tc_flower_key, ipv4.ipv4_src),
- MEMBER_SIZEOF(struct tc_flower_key, ipv4.ipv4_src)
- },
- { offsetof(struct ovs_key_ipv4, ipv4_dst),
- offsetof(struct tc_flower_key, ipv4.ipv4_dst),
- MEMBER_SIZEOF(struct tc_flower_key, ipv4.ipv4_dst)
- },
- { offsetof(struct ovs_key_ipv4, ipv4_ttl),
- offsetof(struct tc_flower_key, ipv4.rewrite_ttl),
- MEMBER_SIZEOF(struct tc_flower_key, ipv4.rewrite_ttl)
- },
- { offsetof(struct ovs_key_ipv4, ipv4_tos),
- offsetof(struct tc_flower_key, ipv4.rewrite_tos),
- MEMBER_SIZEOF(struct tc_flower_key, ipv4.rewrite_tos)
- },
- },
- [OVS_KEY_ATTR_IPV6] = {
- { offsetof(struct ovs_key_ipv6, ipv6_src),
- offsetof(struct tc_flower_key, ipv6.ipv6_src),
- MEMBER_SIZEOF(struct tc_flower_key, ipv6.ipv6_src)
- },
- { offsetof(struct ovs_key_ipv6, ipv6_dst),
- offsetof(struct tc_flower_key, ipv6.ipv6_dst),
- MEMBER_SIZEOF(struct tc_flower_key, ipv6.ipv6_dst)
- },
- { offsetof(struct ovs_key_ipv6, ipv6_hlimit),
- offsetof(struct tc_flower_key, ipv6.rewrite_hlimit),
- MEMBER_SIZEOF(struct tc_flower_key, ipv6.rewrite_hlimit)
- },
- { offsetof(struct ovs_key_ipv6, ipv6_tclass),
- offsetof(struct tc_flower_key, ipv6.rewrite_tclass),
- MEMBER_SIZEOF(struct tc_flower_key, ipv6.rewrite_tclass)
- },
- },
- [OVS_KEY_ATTR_ETHERNET] = {
- { offsetof(struct ovs_key_ethernet, eth_src),
- offsetof(struct tc_flower_key, src_mac),
- MEMBER_SIZEOF(struct tc_flower_key, src_mac)
- },
- { offsetof(struct ovs_key_ethernet, eth_dst),
- offsetof(struct tc_flower_key, dst_mac),
- MEMBER_SIZEOF(struct tc_flower_key, dst_mac)
- },
- },
- [OVS_KEY_ATTR_ETHERTYPE] = {
- { 0,
- offsetof(struct tc_flower_key, eth_type),
- MEMBER_SIZEOF(struct tc_flower_key, eth_type)
- },
- },
- [OVS_KEY_ATTR_TCP] = {
- { offsetof(struct ovs_key_tcp, tcp_src),
- offsetof(struct tc_flower_key, tcp_src),
- MEMBER_SIZEOF(struct tc_flower_key, tcp_src)
- },
- { offsetof(struct ovs_key_tcp, tcp_dst),
- offsetof(struct tc_flower_key, tcp_dst),
- MEMBER_SIZEOF(struct tc_flower_key, tcp_dst)
- },
- },
- [OVS_KEY_ATTR_UDP] = {
- { offsetof(struct ovs_key_udp, udp_src),
- offsetof(struct tc_flower_key, udp_src),
- MEMBER_SIZEOF(struct tc_flower_key, udp_src)
- },
- { offsetof(struct ovs_key_udp, udp_dst),
- offsetof(struct tc_flower_key, udp_dst),
- MEMBER_SIZEOF(struct tc_flower_key, udp_dst)
- },
- },
-};
-
-static struct ovs_mutex ufid_lock = OVS_MUTEX_INITIALIZER;
-
-/**
- * struct ufid_tc_data - data entry for ufid_tc hmap.
- * @ufid_node: Element in @ufid_tc hash table by ufid key.
- * @tc_node: Element in @ufid_tc hash table by prio/handle/ifindex key.
- * @ufid: ufid assigned to the flow
- * @prio: tc priority
- * @handle: tc handle
- * @ifindex: netdev ifindex.
- * @netdev: netdev associated with the tc rule
- */
-struct ufid_tc_data {
- struct hmap_node ufid_node;
- struct hmap_node tc_node;
- ovs_u128 ufid;
- uint16_t prio;
- uint32_t handle;
- int ifindex;
- struct netdev *netdev;
-};
-
-/* Remove matching ufid entry from ufid_tc hashmap. */
-static void
-del_ufid_tc_mapping(const ovs_u128 *ufid)
-{
- size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
- struct ufid_tc_data *data;
-
- ovs_mutex_lock(&ufid_lock);
- HMAP_FOR_EACH_WITH_HASH(data, ufid_node, ufid_hash, &ufid_tc) {
- if (ovs_u128_equals(*ufid, data->ufid)) {
- break;
- }
- }
-
- if (!data) {
- ovs_mutex_unlock(&ufid_lock);
- return;
- }
-
- hmap_remove(&ufid_tc, &data->ufid_node);
- hmap_remove(&ufid_tc, &data->tc_node);
- netdev_close(data->netdev);
- free(data);
- ovs_mutex_unlock(&ufid_lock);
-}
-
-/* Wrapper function to delete filter and ufid tc mapping */
-static int
-del_filter_and_ufid_mapping(int ifindex, int prio, int handle,
- uint32_t block_id, const ovs_u128 *ufid,
- enum tc_qdisc_hook hook)
-{
- int err;
-
- err = tc_del_filter(ifindex, prio, handle, block_id, hook);
- del_ufid_tc_mapping(ufid);
-
- return err;
-}
-
-/* Add ufid entry to ufid_tc hashmap. */
-static void
-add_ufid_tc_mapping(const ovs_u128 *ufid, int prio, int handle,
- struct netdev *netdev, int ifindex)
-{
- size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
- size_t tc_hash = hash_int(hash_int(prio, handle), ifindex);
- struct ufid_tc_data *new_data = xzalloc(sizeof *new_data);
-
- new_data->ufid = *ufid;
- new_data->prio = prio;
- new_data->handle = handle;
- new_data->netdev = netdev_ref(netdev);
- new_data->ifindex = ifindex;
-
- ovs_mutex_lock(&ufid_lock);
- hmap_insert(&ufid_tc, &new_data->ufid_node, ufid_hash);
- hmap_insert(&ufid_tc, &new_data->tc_node, tc_hash);
- ovs_mutex_unlock(&ufid_lock);
-}
-
-/* Get ufid from ufid_tc hashmap.
- *
- * If netdev output param is not NULL then the function will return
- * associated netdev on success and a refcount is taken on that netdev.
- * The caller is then responsible to close the netdev.
- *
- * Returns handle if successful and fill prio and netdev for that ufid.
- * Otherwise returns 0.
- */
-static int
-get_ufid_tc_mapping(const ovs_u128 *ufid, int *prio, struct netdev **netdev)
-{
- size_t ufid_hash = hash_bytes(ufid, sizeof *ufid, 0);
- struct ufid_tc_data *data;
- int handle = 0;
-
- ovs_mutex_lock(&ufid_lock);
- HMAP_FOR_EACH_WITH_HASH(data, ufid_node, ufid_hash, &ufid_tc) {
- if (ovs_u128_equals(*ufid, data->ufid)) {
- if (prio) {
- *prio = data->prio;
- }
- if (netdev) {
- *netdev = netdev_ref(data->netdev);
- }
- handle = data->handle;
- break;
- }
- }
- ovs_mutex_unlock(&ufid_lock);
-
- return handle;
-}
-
-/* Find ufid entry in ufid_tc hashmap using prio, handle and netdev.
- * The result is saved in ufid.
- *
- * Returns true on success.
- */
-static bool
-find_ufid(int prio, int handle, struct netdev *netdev, ovs_u128 *ufid)
-{
- int ifindex = netdev_get_ifindex(netdev);
- struct ufid_tc_data *data;
- size_t tc_hash = hash_int(hash_int(prio, handle), ifindex);
-
- ovs_mutex_lock(&ufid_lock);
- HMAP_FOR_EACH_WITH_HASH(data, tc_node, tc_hash, &ufid_tc) {
- if (data->prio == prio && data->handle == handle
- && data->ifindex == ifindex) {
- *ufid = data->ufid;
- break;
- }
- }
- ovs_mutex_unlock(&ufid_lock);
-
- return (data != NULL);
-}
-
-struct prio_map_data {
- struct hmap_node node;
- struct tc_flower_key mask;
- ovs_be16 protocol;
- uint16_t prio;
-};
-
-/* Get free prio for tc flower
- * If prio is already allocated for mask/eth_type combination then return it.
- * If not assign new prio.
- *
- * Return prio on success or 0 if we are out of prios.
- */
-static uint16_t
-get_prio_for_tc_flower(struct tc_flower *flower)
-{
- static struct hmap prios = HMAP_INITIALIZER(&prios);
- static struct ovs_mutex prios_lock = OVS_MUTEX_INITIALIZER;
- static uint16_t last_prio = TC_RESERVED_PRIORITY_MAX;
- size_t key_len = sizeof(struct tc_flower_key);
- size_t hash = hash_int((OVS_FORCE uint32_t) flower->key.eth_type, 0);
- struct prio_map_data *data;
- struct prio_map_data *new_data;
-
- if (!multi_mask_per_prio) {
- hash = hash_bytes(&flower->mask, key_len, hash);
- }
-
- /* We can use the same prio for same mask/eth combination but must have
- * different prio if not. Flower classifier will reject same prio for
- * different mask combination unless multi mask per prio is supported. */
- ovs_mutex_lock(&prios_lock);
- HMAP_FOR_EACH_WITH_HASH(data, node, hash, &prios) {
- if ((multi_mask_per_prio
- || !memcmp(&flower->mask, &data->mask, key_len))
- && data->protocol == flower->key.eth_type) {
- ovs_mutex_unlock(&prios_lock);
- return data->prio;
- }
- }
-
- if (last_prio == UINT16_MAX) {
- /* last_prio can overflow if there will be many different kinds of
- * flows which shouldn't happen organically. */
- ovs_mutex_unlock(&prios_lock);
- return 0;
- }
-
- new_data = xzalloc(sizeof *new_data);
- memcpy(&new_data->mask, &flower->mask, key_len);
- new_data->prio = ++last_prio;
- new_data->protocol = flower->key.eth_type;
- hmap_insert(&prios, &new_data->node, hash);
- ovs_mutex_unlock(&prios_lock);
-
- return new_data->prio;
-}
-
-static uint32_t
-get_block_id_from_netdev(struct netdev *netdev)
-{
- if (block_support) {
- return netdev_get_block_id(netdev);
- }
-
- return 0;
-}
-
-static int
-netdev_tc_flow_flush(struct netdev *netdev)
-{
- enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
- int ifindex = netdev_get_ifindex(netdev);
- uint32_t block_id = 0;
-
- if (ifindex < 0) {
- VLOG_ERR_RL(&error_rl, "flow_flush: failed to get ifindex for %s: %s",
- netdev_get_name(netdev), ovs_strerror(-ifindex));
- return -ifindex;
- }
-
- block_id = get_block_id_from_netdev(netdev);
-
- return tc_flush(ifindex, block_id, hook);
-}
-
-static int
-netdev_tc_flow_dump_create(struct netdev *netdev,
- struct netdev_flow_dump **dump_out)
-{
- enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
- struct netdev_flow_dump *dump;
- uint32_t block_id = 0;
- int ifindex;
-
- ifindex = netdev_get_ifindex(netdev);
- if (ifindex < 0) {
- VLOG_ERR_RL(&error_rl, "dump_create: failed to get ifindex for %s: %s",
- netdev_get_name(netdev), ovs_strerror(-ifindex));
- return -ifindex;
- }
-
- block_id = get_block_id_from_netdev(netdev);
- dump = xzalloc(sizeof *dump);
- dump->nl_dump = xzalloc(sizeof *dump->nl_dump);
- dump->netdev = netdev_ref(netdev);
- tc_dump_flower_start(ifindex, dump->nl_dump, block_id, hook);
-
- *dump_out = dump;
-
- return 0;
-}
-
-static int
-netdev_tc_flow_dump_destroy(struct netdev_flow_dump *dump)
-{
- nl_dump_done(dump->nl_dump);
- netdev_close(dump->netdev);
- free(dump->nl_dump);
- free(dump);
- return 0;
-}
-
-static void
-parse_flower_rewrite_to_netlink_action(struct ofpbuf *buf,
- struct tc_flower *flower)
-{
- char *mask = (char *) &flower->rewrite.mask;
- char *data = (char *) &flower->rewrite.key;
-
- for (int type = 0; type < ARRAY_SIZE(set_flower_map); type++) {
- char *put = NULL;
- size_t nested = 0;
- int len = ovs_flow_key_attr_lens[type].len;
-
- if (len <= 0) {
- continue;
- }
-
- for (int j = 0; j < ARRAY_SIZE(set_flower_map[type]); j++) {
- struct netlink_field *f = &set_flower_map[type][j];
-
- if (!f->size) {
- break;
- }
-
- if (!is_all_zeros(mask + f->flower_offset, f->size)) {
- if (!put) {
- nested = nl_msg_start_nested(buf,
- OVS_ACTION_ATTR_SET_MASKED);
- put = nl_msg_put_unspec_zero(buf, type, len * 2);
- }
-
- memcpy(put + f->offset, data + f->flower_offset, f->size);
- memcpy(put + len + f->offset,
- mask + f->flower_offset, f->size);
- }
- }
-
- if (put) {
- nl_msg_end_nested(buf, nested);
- }
- }
-}
-
-static void parse_tc_flower_geneve_opts(struct tc_action *action,
- struct ofpbuf *buf)
-{
- int tun_opt_len = action->encap.data.present.len;
- size_t geneve_off;
- int idx = 0;
-
- if (!tun_opt_len) {
- return;
- }
-
- geneve_off = nl_msg_start_nested(buf, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
- while (tun_opt_len) {
- struct geneve_opt *opt;
-
- opt = &action->encap.data.opts.gnv[idx];
- nl_msg_put(buf, opt, sizeof(struct geneve_opt) + opt->length * 4);
- idx += sizeof(struct geneve_opt) / 4 + opt->length;
- tun_opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
- }
- nl_msg_end_nested(buf, geneve_off);
-}
-
-static void
-flower_tun_opt_to_match(struct match *match, struct tc_flower *flower)
-{
- struct geneve_opt *opt, *opt_mask;
- int len, cnt = 0;
-
- memcpy(match->flow.tunnel.metadata.opts.gnv,
- flower->key.tunnel.metadata.opts.gnv,
- flower->key.tunnel.metadata.present.len);
- match->flow.tunnel.metadata.present.len =
- flower->key.tunnel.metadata.present.len;
- match->flow.tunnel.flags |= FLOW_TNL_F_UDPIF;
- memcpy(match->wc.masks.tunnel.metadata.opts.gnv,
- flower->mask.tunnel.metadata.opts.gnv,
- flower->mask.tunnel.metadata.present.len);
-
- len = flower->key.tunnel.metadata.present.len;
- while (len) {
- opt = &match->flow.tunnel.metadata.opts.gnv[cnt];
- opt_mask = &match->wc.masks.tunnel.metadata.opts.gnv[cnt];
-
- opt_mask->length = 0x1f;
-
- cnt += sizeof(struct geneve_opt) / 4 + opt->length;
- len -= sizeof(struct geneve_opt) + opt->length * 4;
- }
-
- match->wc.masks.tunnel.metadata.present.len =
- flower->mask.tunnel.metadata.present.len;
- match->wc.masks.tunnel.flags |= FLOW_TNL_F_UDPIF;
-}
-
-static int
-parse_tc_flower_to_match(struct tc_flower *flower,
- struct match *match,
- struct nlattr **actions,
- struct dpif_flow_stats *stats,
- struct dpif_flow_attrs *attrs,
- struct ofpbuf *buf)
-{
- size_t act_off;
- struct tc_flower_key *key = &flower->key;
- struct tc_flower_key *mask = &flower->mask;
- odp_port_t outport = 0;
- struct tc_action *action;
- int i;
-
- ofpbuf_clear(buf);
-
- match_init_catchall(match);
- match_set_dl_src_masked(match, key->src_mac, mask->src_mac);
- match_set_dl_dst_masked(match, key->dst_mac, mask->dst_mac);
-
- if (eth_type_vlan(key->eth_type)) {
- match->flow.vlans[0].tpid = key->eth_type;
- match->wc.masks.vlans[0].tpid = OVS_BE16_MAX;
- match_set_dl_vlan(match, htons(key->vlan_id[0]), 0);
- match_set_dl_vlan_pcp(match, key->vlan_prio[0], 0);
-
- if (eth_type_vlan(key->encap_eth_type[0])) {
- match_set_dl_vlan(match, htons(key->vlan_id[1]), 1);
- match_set_dl_vlan_pcp(match, key->vlan_prio[1], 1);
- match_set_dl_type(match, key->encap_eth_type[1]);
- match->flow.vlans[1].tpid = key->encap_eth_type[0];
- match->wc.masks.vlans[1].tpid = OVS_BE16_MAX;
- } else {
- match_set_dl_type(match, key->encap_eth_type[0]);
- }
- flow_fix_vlan_tpid(&match->flow);
- } else if (eth_type_mpls(key->eth_type)) {
- match->flow.mpls_lse[0] = key->mpls_lse & mask->mpls_lse;
- match->wc.masks.mpls_lse[0] = mask->mpls_lse;
- match_set_dl_type(match, key->encap_eth_type[0]);
- } else {
- match_set_dl_type(match, key->eth_type);
- }
-
- if (is_ip_any(&match->flow)) {
- if (key->ip_proto) {
- match_set_nw_proto(match, key->ip_proto);
- }
-
- match_set_nw_tos_masked(match, key->ip_tos, mask->ip_tos);
- match_set_nw_ttl_masked(match, key->ip_ttl, mask->ip_ttl);
-
- if (mask->flags) {
- uint8_t flags = 0;
- uint8_t flags_mask = 0;
-
- if (mask->flags & TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT) {
- if (key->flags & TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT) {
- flags |= FLOW_NW_FRAG_ANY;
- }
- flags_mask |= FLOW_NW_FRAG_ANY;
- }
-
- if (mask->flags & TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST) {
- if (!(key->flags & TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)) {
- flags |= FLOW_NW_FRAG_LATER;
- }
- flags_mask |= FLOW_NW_FRAG_LATER;
- }
-
- match_set_nw_frag_masked(match, flags, flags_mask);
- }
-
- match_set_nw_src_masked(match, key->ipv4.ipv4_src, mask->ipv4.ipv4_src);
- match_set_nw_dst_masked(match, key->ipv4.ipv4_dst, mask->ipv4.ipv4_dst);
-
- match_set_ipv6_src_masked(match,
- &key->ipv6.ipv6_src, &mask->ipv6.ipv6_src);
- match_set_ipv6_dst_masked(match,
- &key->ipv6.ipv6_dst, &mask->ipv6.ipv6_dst);
-
- if (key->ip_proto == IPPROTO_TCP) {
- match_set_tp_dst_masked(match, key->tcp_dst, mask->tcp_dst);
- match_set_tp_src_masked(match, key->tcp_src, mask->tcp_src);
- match_set_tcp_flags_masked(match, key->tcp_flags, mask->tcp_flags);
- } else if (key->ip_proto == IPPROTO_UDP) {
- match_set_tp_dst_masked(match, key->udp_dst, mask->udp_dst);
- match_set_tp_src_masked(match, key->udp_src, mask->udp_src);
- } else if (key->ip_proto == IPPROTO_SCTP) {
- match_set_tp_dst_masked(match, key->sctp_dst, mask->sctp_dst);
- match_set_tp_src_masked(match, key->sctp_src, mask->sctp_src);
- }
- }
-
- if (flower->tunnel) {
- if (flower->mask.tunnel.id) {
- match_set_tun_id(match, flower->key.tunnel.id);
- }
- if (flower->key.tunnel.ipv4.ipv4_dst) {
- match_set_tun_src(match, flower->key.tunnel.ipv4.ipv4_src);
- match_set_tun_dst(match, flower->key.tunnel.ipv4.ipv4_dst);
- } else if (!is_all_zeros(&flower->key.tunnel.ipv6.ipv6_dst,
- sizeof flower->key.tunnel.ipv6.ipv6_dst)) {
- match_set_tun_ipv6_src(match, &flower->key.tunnel.ipv6.ipv6_src);
- match_set_tun_ipv6_dst(match, &flower->key.tunnel.ipv6.ipv6_dst);
- }
- if (flower->key.tunnel.tos) {
- match_set_tun_tos_masked(match, flower->key.tunnel.tos,
- flower->mask.tunnel.tos);
- }
- if (flower->key.tunnel.ttl) {
- match_set_tun_ttl_masked(match, flower->key.tunnel.ttl,
- flower->mask.tunnel.ttl);
- }
- if (flower->key.tunnel.tp_dst) {
- match_set_tun_tp_dst(match, flower->key.tunnel.tp_dst);
- }
- if (flower->key.tunnel.metadata.present.len) {
- flower_tun_opt_to_match(match, flower);
- }
- }
-
- act_off = nl_msg_start_nested(buf, OVS_FLOW_ATTR_ACTIONS);
- {
- action = flower->actions;
- for (i = 0; i < flower->action_count; i++, action++) {
- switch (action->type) {
- case TC_ACT_VLAN_POP: {
- nl_msg_put_flag(buf, OVS_ACTION_ATTR_POP_VLAN);
- }
- break;
- case TC_ACT_VLAN_PUSH: {
- struct ovs_action_push_vlan *push;
-
- push = nl_msg_put_unspec_zero(buf, OVS_ACTION_ATTR_PUSH_VLAN,
- sizeof *push);
- push->vlan_tpid = action->vlan.vlan_push_tpid;
- push->vlan_tci = htons(action->vlan.vlan_push_id
- | (action->vlan.vlan_push_prio << 13)
- | VLAN_CFI);
- }
- break;
- case TC_ACT_PEDIT: {
- parse_flower_rewrite_to_netlink_action(buf, flower);
- }
- break;
- case TC_ACT_ENCAP: {
- size_t set_offset = nl_msg_start_nested(buf, OVS_ACTION_ATTR_SET);
- size_t tunnel_offset =
- nl_msg_start_nested(buf, OVS_KEY_ATTR_TUNNEL);
-
- if (action->encap.id_present) {
- nl_msg_put_be64(buf, OVS_TUNNEL_KEY_ATTR_ID, action->encap.id);
- }
- if (action->encap.ipv4.ipv4_src) {
- nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
- action->encap.ipv4.ipv4_src);
- }
- if (action->encap.ipv4.ipv4_dst) {
- nl_msg_put_be32(buf, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
- action->encap.ipv4.ipv4_dst);
- }
- if (!is_all_zeros(&action->encap.ipv6.ipv6_src,
- sizeof action->encap.ipv6.ipv6_src)) {
- nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
- &action->encap.ipv6.ipv6_src);
- }
- if (!is_all_zeros(&action->encap.ipv6.ipv6_dst,
- sizeof action->encap.ipv6.ipv6_dst)) {
- nl_msg_put_in6_addr(buf, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
- &action->encap.ipv6.ipv6_dst);
- }
- if (action->encap.tos) {
- nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_TOS,
- action->encap.tos);
- }
- if (action->encap.ttl) {
- nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_TTL,
- action->encap.ttl);
- }
- if (action->encap.tp_dst) {
- nl_msg_put_be16(buf, OVS_TUNNEL_KEY_ATTR_TP_DST,
- action->encap.tp_dst);
- }
- if (!action->encap.no_csum) {
- nl_msg_put_u8(buf, OVS_TUNNEL_KEY_ATTR_CSUM,
- !action->encap.no_csum);
- }
-
- parse_tc_flower_geneve_opts(action, buf);
- nl_msg_end_nested(buf, tunnel_offset);
- nl_msg_end_nested(buf, set_offset);
- }
- break;
- case TC_ACT_OUTPUT: {
- if (action->out.ifindex_out) {
- outport =
- netdev_ifindex_to_odp_port(action->out.ifindex_out);
- if (!outport) {
- return ENOENT;
- }
- }
- nl_msg_put_u32(buf, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(outport));
- }
- break;
- }
- }
- }
- nl_msg_end_nested(buf, act_off);
-
- *actions = ofpbuf_at_assert(buf, act_off, sizeof(struct nlattr));
-
- if (stats) {
- memset(stats, 0, sizeof *stats);
- stats->n_packets = get_32aligned_u64(&flower->stats.n_packets);
- stats->n_bytes = get_32aligned_u64(&flower->stats.n_bytes);
- stats->used = flower->lastused;
- }
-
- attrs->offloaded = (flower->offloaded_state == TC_OFFLOADED_STATE_IN_HW)
- || (flower->offloaded_state == TC_OFFLOADED_STATE_UNDEFINED);
- attrs->dp_layer = "tc";
-
- return 0;
-}
-
-static bool
-netdev_tc_flow_dump_next(struct netdev_flow_dump *dump,
- struct match *match,
- struct nlattr **actions,
- struct dpif_flow_stats *stats,
- struct dpif_flow_attrs *attrs,
- ovs_u128 *ufid,
- struct ofpbuf *rbuffer,
- struct ofpbuf *wbuffer)
-{
- struct ofpbuf nl_flow;
-
- while (nl_dump_next(dump->nl_dump, &nl_flow, rbuffer)) {
- struct tc_flower flower;
- struct netdev *netdev = dump->netdev;
-
- if (parse_netlink_to_tc_flower(&nl_flow, &flower)) {
- continue;
- }
-
- if (parse_tc_flower_to_match(&flower, match, actions, stats, attrs,
- wbuffer)) {
- continue;
- }
-
- if (flower.act_cookie.len) {
- *ufid = *((ovs_u128 *) flower.act_cookie.data);
- } else if (!find_ufid(flower.prio, flower.handle, netdev, ufid)) {
- continue;
- }
-
- match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
- match->flow.in_port.odp_port = dump->port;
-
- return true;
- }
-
- return false;
-}
-
-static int
-parse_put_flow_set_masked_action(struct tc_flower *flower,
- struct tc_action *action,
- const struct nlattr *set,
- size_t set_len,
- bool hasmask)
-{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
- uint64_t set_stub[1024 / 8];
- struct ofpbuf set_buf = OFPBUF_STUB_INITIALIZER(set_stub);
- char *set_data, *set_mask;
- char *key = (char *) &flower->rewrite.key;
- char *mask = (char *) &flower->rewrite.mask;
- const struct nlattr *attr;
- int i, j, type;
- size_t size;
-
- /* copy so we can set attr mask to 0 for used ovs key struct members */
- attr = ofpbuf_put(&set_buf, set, set_len);
-
- type = nl_attr_type(attr);
- size = nl_attr_get_size(attr) / 2;
- set_data = CONST_CAST(char *, nl_attr_get(attr));
- set_mask = set_data + size;
-
- if (type >= ARRAY_SIZE(set_flower_map)
- || !set_flower_map[type][0].size) {
- VLOG_DBG_RL(&rl, "unsupported set action type: %d", type);
- ofpbuf_uninit(&set_buf);
- return EOPNOTSUPP;
- }
-
- for (i = 0; i < ARRAY_SIZE(set_flower_map[type]); i++) {
- struct netlink_field *f = &set_flower_map[type][i];
-
- if (!f->size) {
- break;
- }
-
- /* copy masked value */
- for (j = 0; j < f->size; j++) {
- char maskval = hasmask ? set_mask[f->offset + j] : 0xFF;
-
- key[f->flower_offset + j] = maskval & set_data[f->offset + j];
- mask[f->flower_offset + j] = maskval;
-
- }
-
- /* set its mask to 0 to show it's been used. */
- if (hasmask) {
- memset(set_mask + f->offset, 0, f->size);
- }
- }
-
- if (!is_all_zeros(&flower->rewrite, sizeof flower->rewrite)) {
- if (flower->rewrite.rewrite == false) {
- flower->rewrite.rewrite = true;
- action->type = TC_ACT_PEDIT;
- flower->action_count++;
- }
- }
-
- if (hasmask && !is_all_zeros(set_mask, size)) {
- VLOG_DBG_RL(&rl, "unsupported sub attribute of set action type %d",
- type);
- ofpbuf_uninit(&set_buf);
- return EOPNOTSUPP;
- }
-
- ofpbuf_uninit(&set_buf);
- return 0;
-}
-
-static int
-parse_put_flow_set_action(struct tc_flower *flower, struct tc_action *action,
- const struct nlattr *set, size_t set_len)
-{
- const struct nlattr *tunnel;
- const struct nlattr *tun_attr;
- size_t tun_left, tunnel_len;
-
- if (nl_attr_type(set) != OVS_KEY_ATTR_TUNNEL) {
- return parse_put_flow_set_masked_action(flower, action, set,
- set_len, false);
- }
-
- tunnel = nl_attr_get(set);
- tunnel_len = nl_attr_get_size(set);
-
- action->type = TC_ACT_ENCAP;
- action->encap.id_present = false;
- flower->action_count++;
- NL_ATTR_FOR_EACH_UNSAFE(tun_attr, tun_left, tunnel, tunnel_len) {
- switch (nl_attr_type(tun_attr)) {
- case OVS_TUNNEL_KEY_ATTR_ID: {
- action->encap.id = nl_attr_get_be64(tun_attr);
- action->encap.id_present = true;
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: {
- action->encap.ipv4.ipv4_src = nl_attr_get_be32(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_IPV4_DST: {
- action->encap.ipv4.ipv4_dst = nl_attr_get_be32(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_TOS: {
- action->encap.tos = nl_attr_get_u8(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_TTL: {
- action->encap.ttl = nl_attr_get_u8(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: {
- action->encap.ipv6.ipv6_src =
- nl_attr_get_in6_addr(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_IPV6_DST: {
- action->encap.ipv6.ipv6_dst =
- nl_attr_get_in6_addr(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_TP_SRC: {
- action->encap.tp_src = nl_attr_get_be16(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_TP_DST: {
- action->encap.tp_dst = nl_attr_get_be16(tun_attr);
- }
- break;
- case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: {
- memcpy(action->encap.data.opts.gnv, nl_attr_get(tun_attr),
- nl_attr_get_size(tun_attr));
- action->encap.data.present.len = nl_attr_get_size(tun_attr);
- }
- break;
- }
- }
-
- return 0;
-}
-
-static int
-test_key_and_mask(struct match *match)
-{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
- const struct flow *key = &match->flow;
- struct flow *mask = &match->wc.masks;
-
- if (mask->pkt_mark) {
- VLOG_DBG_RL(&rl, "offloading attribute pkt_mark isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->recirc_id && key->recirc_id) {
- VLOG_DBG_RL(&rl, "offloading attribute recirc_id isn't supported");
- return EOPNOTSUPP;
- }
- mask->recirc_id = 0;
-
- if (mask->dp_hash) {
- VLOG_DBG_RL(&rl, "offloading attribute dp_hash isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->conj_id) {
- VLOG_DBG_RL(&rl, "offloading attribute conj_id isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->skb_priority) {
- VLOG_DBG_RL(&rl, "offloading attribute skb_priority isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->actset_output) {
- VLOG_DBG_RL(&rl,
- "offloading attribute actset_output isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->ct_state) {
- VLOG_DBG_RL(&rl, "offloading attribute ct_state isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->ct_zone) {
- VLOG_DBG_RL(&rl, "offloading attribute ct_zone isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->ct_mark) {
- VLOG_DBG_RL(&rl, "offloading attribute ct_mark isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->packet_type && key->packet_type) {
- VLOG_DBG_RL(&rl, "offloading attribute packet_type isn't supported");
- return EOPNOTSUPP;
- }
- mask->packet_type = 0;
-
- if (!ovs_u128_is_zero(mask->ct_label)) {
- VLOG_DBG_RL(&rl, "offloading attribute ct_label isn't supported");
- return EOPNOTSUPP;
- }
-
- for (int i = 0; i < FLOW_N_REGS; i++) {
- if (mask->regs[i]) {
- VLOG_DBG_RL(&rl,
- "offloading attribute regs[%d] isn't supported", i);
- return EOPNOTSUPP;
- }
- }
-
- if (mask->metadata) {
- VLOG_DBG_RL(&rl, "offloading attribute metadata isn't supported");
- return EOPNOTSUPP;
- }
-
- if (mask->nw_tos) {
- VLOG_DBG_RL(&rl, "offloading attribute nw_tos isn't supported");
- return EOPNOTSUPP;
- }
-
- for (int i = 1; i < FLOW_MAX_MPLS_LABELS; i++) {
- if (mask->mpls_lse[i]) {
- VLOG_DBG_RL(&rl, "offloading multiple mpls_lses isn't supported");
- return EOPNOTSUPP;
- }
- }
-
- if (key->dl_type == htons(ETH_TYPE_IP) &&
- key->nw_proto == IPPROTO_ICMP) {
- if (mask->tp_src) {
- VLOG_DBG_RL(&rl,
- "offloading attribute icmp_type isn't supported");
- return EOPNOTSUPP;
- }
- if (mask->tp_dst) {
- VLOG_DBG_RL(&rl,
- "offloading attribute icmp_code isn't supported");
- return EOPNOTSUPP;
- }
- } else if (key->dl_type == htons(ETH_TYPE_IP) &&
- key->nw_proto == IPPROTO_IGMP) {
- if (mask->tp_src) {
- VLOG_DBG_RL(&rl,
- "offloading attribute igmp_type isn't supported");
- return EOPNOTSUPP;
- }
- if (mask->tp_dst) {
- VLOG_DBG_RL(&rl,
- "offloading attribute igmp_code isn't supported");
- return EOPNOTSUPP;
- }
- } else if (key->dl_type == htons(ETH_TYPE_IPV6) &&
- key->nw_proto == IPPROTO_ICMPV6) {
- if (mask->tp_src) {
- VLOG_DBG_RL(&rl,
- "offloading attribute icmpv6_type isn't supported");
- return EOPNOTSUPP;
- }
- if (mask->tp_dst) {
- VLOG_DBG_RL(&rl,
- "offloading attribute icmpv6_code isn't supported");
- return EOPNOTSUPP;
- }
- } else if (key->dl_type == htons(OFP_DL_TYPE_NOT_ETH_TYPE)) {
- VLOG_DBG_RL(&rl,
- "offloading of non-ethernet packets isn't supported");
- return EOPNOTSUPP;
- }
-
- if (!is_all_zeros(mask, sizeof *mask)) {
- VLOG_DBG_RL(&rl, "offloading isn't supported, unknown attribute");
- return EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static void
-flower_match_to_tun_opt(struct tc_flower *flower, const struct flow_tnl *tnl,
- const struct flow_tnl *tnl_mask)
-{
- struct geneve_opt *opt, *opt_mask;
- int len, cnt = 0;
-
- memcpy(flower->key.tunnel.metadata.opts.gnv, tnl->metadata.opts.gnv,
- tnl->metadata.present.len);
- flower->key.tunnel.metadata.present.len = tnl->metadata.present.len;
-
- memcpy(flower->mask.tunnel.metadata.opts.gnv, tnl_mask->metadata.opts.gnv,
- tnl->metadata.present.len);
-
- len = flower->key.tunnel.metadata.present.len;
- while (len) {
- opt = &flower->key.tunnel.metadata.opts.gnv[cnt];
- opt_mask = &flower->mask.tunnel.metadata.opts.gnv[cnt];
-
- opt_mask->length = opt->length;
-
- cnt += sizeof(struct geneve_opt) / 4 + opt->length;
- len -= sizeof(struct geneve_opt) + opt->length * 4;
- }
-
- flower->mask.tunnel.metadata.present.len = tnl->metadata.present.len;
-}
-
-static int
-netdev_tc_flow_put(struct netdev *netdev, struct match *match,
- struct nlattr *actions, size_t actions_len,
- const ovs_u128 *ufid, struct offload_info *info,
- struct dpif_flow_stats *stats OVS_UNUSED)
-{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
- enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
- struct tc_flower flower;
- const struct flow *key = &match->flow;
- struct flow *mask = &match->wc.masks;
- const struct flow_tnl *tnl = &match->flow.tunnel;
- const struct flow_tnl *tnl_mask = &mask->tunnel;
- struct tc_action *action;
- uint32_t block_id = 0;
- struct nlattr *nla;
- size_t left;
- int prio = 0;
- int handle;
- int ifindex;
- int err;
-
- ifindex = netdev_get_ifindex(netdev);
- if (ifindex < 0) {
- VLOG_ERR_RL(&error_rl, "flow_put: failed to get ifindex for %s: %s",
- netdev_get_name(netdev), ovs_strerror(-ifindex));
- return -ifindex;
- }
-
- memset(&flower, 0, sizeof flower);
-
- if (flow_tnl_dst_is_set(&key->tunnel)) {
- VLOG_DBG_RL(&rl,
- "tunnel: id %#" PRIx64 " src " IP_FMT
- " dst " IP_FMT " tp_src %d tp_dst %d",
- ntohll(tnl->tun_id),
- IP_ARGS(tnl->ip_src), IP_ARGS(tnl->ip_dst),
- ntohs(tnl->tp_src), ntohs(tnl->tp_dst));
- flower.key.tunnel.id = tnl->tun_id;
- flower.key.tunnel.ipv4.ipv4_src = tnl->ip_src;
- flower.key.tunnel.ipv4.ipv4_dst = tnl->ip_dst;
- flower.key.tunnel.ipv6.ipv6_src = tnl->ipv6_src;
- flower.key.tunnel.ipv6.ipv6_dst = tnl->ipv6_dst;
- flower.key.tunnel.tos = tnl->ip_tos;
- flower.key.tunnel.ttl = tnl->ip_ttl;
- flower.key.tunnel.tp_src = tnl->tp_src;
- flower.key.tunnel.tp_dst = tnl->tp_dst;
- flower.mask.tunnel.tos = tnl_mask->ip_tos;
- flower.mask.tunnel.ttl = tnl_mask->ip_ttl;
- flower.mask.tunnel.id = (tnl->flags & FLOW_TNL_F_KEY) ? tnl_mask->tun_id : 0;
- flower_match_to_tun_opt(&flower, tnl, tnl_mask);
- flower.tunnel = true;
- }
- memset(&mask->tunnel, 0, sizeof mask->tunnel);
-
- flower.key.eth_type = key->dl_type;
- flower.mask.eth_type = mask->dl_type;
- if (mask->mpls_lse[0]) {
- flower.key.mpls_lse = key->mpls_lse[0];
- flower.mask.mpls_lse = mask->mpls_lse[0];
- flower.key.encap_eth_type[0] = flower.key.eth_type;
- }
- mask->mpls_lse[0] = 0;
-
- if (eth_type_vlan(key->vlans[0].tpid)) {
- flower.key.encap_eth_type[0] = flower.key.eth_type;
- flower.key.eth_type = key->vlans[0].tpid;
- }
- if (mask->vlans[0].tci) {
- ovs_be16 vid_mask = mask->vlans[0].tci & htons(VLAN_VID_MASK);
- ovs_be16 pcp_mask = mask->vlans[0].tci & htons(VLAN_PCP_MASK);
- ovs_be16 cfi = mask->vlans[0].tci & htons(VLAN_CFI);
-
- if (cfi && key->vlans[0].tci & htons(VLAN_CFI)
- && (!vid_mask || vid_mask == htons(VLAN_VID_MASK))
- && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK))
- && (vid_mask || pcp_mask)) {
- if (vid_mask) {
- flower.key.vlan_id[0] = vlan_tci_to_vid(key->vlans[0].tci);
- flower.mask.vlan_id[0] = vlan_tci_to_vid(mask->vlans[0].tci);
- VLOG_DBG_RL(&rl, "vlan_id[0]: %d\n", flower.key.vlan_id[0]);
- }
- if (pcp_mask) {
- flower.key.vlan_prio[0] = vlan_tci_to_pcp(key->vlans[0].tci);
- flower.mask.vlan_prio[0] = vlan_tci_to_pcp(mask->vlans[0].tci);
- VLOG_DBG_RL(&rl, "vlan_prio[0]: %d\n",
- flower.key.vlan_prio[0]);
- }
- } else if (mask->vlans[0].tci == htons(0xffff) &&
- ntohs(key->vlans[0].tci) == 0) {
- /* exact && no vlan */
- } else {
- /* partial mask */
- return EOPNOTSUPP;
- }
- }
-
- if (eth_type_vlan(key->vlans[1].tpid)) {
- flower.key.encap_eth_type[1] = flower.key.encap_eth_type[0];
- flower.key.encap_eth_type[0] = key->vlans[1].tpid;
- }
- if (mask->vlans[1].tci) {
- ovs_be16 vid_mask = mask->vlans[1].tci & htons(VLAN_VID_MASK);
- ovs_be16 pcp_mask = mask->vlans[1].tci & htons(VLAN_PCP_MASK);
- ovs_be16 cfi = mask->vlans[1].tci & htons(VLAN_CFI);
-
- if (cfi && key->vlans[1].tci & htons(VLAN_CFI)
- && (!vid_mask || vid_mask == htons(VLAN_VID_MASK))
- && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK))
- && (vid_mask || pcp_mask)) {
- if (vid_mask) {
- flower.key.vlan_id[1] = vlan_tci_to_vid(key->vlans[1].tci);
- flower.mask.vlan_id[1] = vlan_tci_to_vid(mask->vlans[1].tci);
- VLOG_DBG_RL(&rl, "vlan_id[1]: %d", flower.key.vlan_id[1]);
- }
- if (pcp_mask) {
- flower.key.vlan_prio[1] = vlan_tci_to_pcp(key->vlans[1].tci);
- flower.mask.vlan_prio[1] = vlan_tci_to_pcp(mask->vlans[1].tci);
- VLOG_DBG_RL(&rl, "vlan_prio[1]: %d", flower.key.vlan_prio[1]);
- }
- } else if (mask->vlans[1].tci == htons(0xffff) &&
- ntohs(key->vlans[1].tci) == 0) {
- /* exact && no vlan */
- } else {
- /* partial mask */
- return EOPNOTSUPP;
- }
- }
- memset(mask->vlans, 0, sizeof mask->vlans);
-
- flower.key.dst_mac = key->dl_dst;
- flower.mask.dst_mac = mask->dl_dst;
- flower.key.src_mac = key->dl_src;
- flower.mask.src_mac = mask->dl_src;
- memset(&mask->dl_dst, 0, sizeof mask->dl_dst);
- memset(&mask->dl_src, 0, sizeof mask->dl_src);
- mask->dl_type = 0;
- mask->in_port.odp_port = 0;
-
- if (is_ip_any(key)) {
- flower.key.ip_proto = key->nw_proto;
- flower.mask.ip_proto = mask->nw_proto;
- mask->nw_proto = 0;
- flower.key.ip_tos = key->nw_tos;
- flower.mask.ip_tos = mask->nw_tos;
- mask->nw_tos = 0;
- flower.key.ip_ttl = key->nw_ttl;
- flower.mask.ip_ttl = mask->nw_ttl;
- mask->nw_ttl = 0;
-
- if (mask->nw_frag & FLOW_NW_FRAG_ANY) {
- flower.mask.flags |= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT;
-
- if (key->nw_frag & FLOW_NW_FRAG_ANY) {
- flower.key.flags |= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT;
-
- if (mask->nw_frag & FLOW_NW_FRAG_LATER) {
- flower.mask.flags |= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST;
-
- if (!(key->nw_frag & FLOW_NW_FRAG_LATER)) {
- flower.key.flags |= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST;
- }
- }
- }
-
- mask->nw_frag = 0;
- }
-
- if (key->nw_proto == IPPROTO_TCP) {
- flower.key.tcp_dst = key->tp_dst;
- flower.mask.tcp_dst = mask->tp_dst;
- flower.key.tcp_src = key->tp_src;
- flower.mask.tcp_src = mask->tp_src;
- flower.key.tcp_flags = key->tcp_flags;
- flower.mask.tcp_flags = mask->tcp_flags;
- mask->tp_src = 0;
- mask->tp_dst = 0;
- mask->tcp_flags = 0;
- } else if (key->nw_proto == IPPROTO_UDP) {
- flower.key.udp_dst = key->tp_dst;
- flower.mask.udp_dst = mask->tp_dst;
- flower.key.udp_src = key->tp_src;
- flower.mask.udp_src = mask->tp_src;
- mask->tp_src = 0;
- mask->tp_dst = 0;
- } else if (key->nw_proto == IPPROTO_SCTP) {
- flower.key.sctp_dst = key->tp_dst;
- flower.mask.sctp_dst = mask->tp_dst;
- flower.key.sctp_src = key->tp_src;
- flower.mask.sctp_src = mask->tp_src;
- mask->tp_src = 0;
- mask->tp_dst = 0;
- }
-
- if (key->dl_type == htons(ETH_P_IP)) {
- flower.key.ipv4.ipv4_src = key->nw_src;
- flower.mask.ipv4.ipv4_src = mask->nw_src;
- flower.key.ipv4.ipv4_dst = key->nw_dst;
- flower.mask.ipv4.ipv4_dst = mask->nw_dst;
- mask->nw_src = 0;
- mask->nw_dst = 0;
- } else if (key->dl_type == htons(ETH_P_IPV6)) {
- flower.key.ipv6.ipv6_src = key->ipv6_src;
- flower.mask.ipv6.ipv6_src = mask->ipv6_src;
- flower.key.ipv6.ipv6_dst = key->ipv6_dst;
- flower.mask.ipv6.ipv6_dst = mask->ipv6_dst;
- memset(&mask->ipv6_src, 0, sizeof mask->ipv6_src);
- memset(&mask->ipv6_dst, 0, sizeof mask->ipv6_dst);
- }
- }
-
- err = test_key_and_mask(match);
- if (err) {
- return err;
- }
-
- NL_ATTR_FOR_EACH(nla, left, actions, actions_len) {
- if (flower.action_count >= TCA_ACT_MAX_PRIO) {
- VLOG_DBG_RL(&rl, "Can only support %d actions", flower.action_count);
- return EOPNOTSUPP;
- }
- action = &flower.actions[flower.action_count];
- if (nl_attr_type(nla) == OVS_ACTION_ATTR_OUTPUT) {
- odp_port_t port = nl_attr_get_odp_port(nla);
- struct netdev *outdev = netdev_ports_get(port, info->dpif_class);
-
- action->out.ifindex_out = netdev_get_ifindex(outdev);
- action->out.ingress = is_internal_port(netdev_get_type(outdev));
- action->type = TC_ACT_OUTPUT;
- flower.action_count++;
- netdev_close(outdev);
- } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_PUSH_VLAN) {
- const struct ovs_action_push_vlan *vlan_push = nl_attr_get(nla);
-
- action->vlan.vlan_push_tpid = vlan_push->vlan_tpid;
- action->vlan.vlan_push_id = vlan_tci_to_vid(vlan_push->vlan_tci);
- action->vlan.vlan_push_prio = vlan_tci_to_pcp(vlan_push->vlan_tci);
- action->type = TC_ACT_VLAN_PUSH;
- flower.action_count++;
- } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_POP_VLAN) {
- action->type = TC_ACT_VLAN_POP;
- flower.action_count++;
- } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET) {
- const struct nlattr *set = nl_attr_get(nla);
- const size_t set_len = nl_attr_get_size(nla);
-
- err = parse_put_flow_set_action(&flower, action, set, set_len);
- if (err) {
- return err;
- }
- if (action->type == TC_ACT_ENCAP) {
- action->encap.tp_dst = info->tp_dst_port;
- action->encap.no_csum = !info->tunnel_csum_on;
- }
- } else if (nl_attr_type(nla) == OVS_ACTION_ATTR_SET_MASKED) {
- const struct nlattr *set = nl_attr_get(nla);
- const size_t set_len = nl_attr_get_size(nla);
-
- err = parse_put_flow_set_masked_action(&flower, action, set,
- set_len, true);
- if (err) {
- return err;
- }
- } else {
- VLOG_DBG_RL(&rl, "unsupported put action type: %d",
- nl_attr_type(nla));
- return EOPNOTSUPP;
- }
- }
-
- block_id = get_block_id_from_netdev(netdev);
- handle = get_ufid_tc_mapping(ufid, &prio, NULL);
- if (handle && prio) {
- VLOG_DBG_RL(&rl, "updating old handle: %d prio: %d", handle, prio);
- del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid,
- hook);
- }
-
- if (!prio) {
- prio = get_prio_for_tc_flower(&flower);
- if (prio == 0) {
- VLOG_ERR_RL(&rl, "couldn't get tc prio: %s", ovs_strerror(ENOSPC));
- return ENOSPC;
- }
- }
-
- flower.act_cookie.data = ufid;
- flower.act_cookie.len = sizeof *ufid;
-
- err = tc_replace_flower(ifindex, prio, handle, &flower, block_id, hook);
- if (!err) {
- add_ufid_tc_mapping(ufid, flower.prio, flower.handle, netdev, ifindex);
- }
-
- return err;
-}
-
-static int
-netdev_tc_flow_get(struct netdev *netdev OVS_UNUSED,
- struct match *match,
- struct nlattr **actions,
- const ovs_u128 *ufid,
- struct dpif_flow_stats *stats,
- struct dpif_flow_attrs *attrs,
- struct ofpbuf *buf)
-{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
- struct netdev *dev;
- struct tc_flower flower;
- enum tc_qdisc_hook hook;
- uint32_t block_id = 0;
- odp_port_t in_port;
- int prio = 0;
- int ifindex;
- int handle;
- int err;
-
- handle = get_ufid_tc_mapping(ufid, &prio, &dev);
- if (!handle) {
- return ENOENT;
- }
-
- hook = get_tc_qdisc_hook(dev);
-
- ifindex = netdev_get_ifindex(dev);
- if (ifindex < 0) {
- VLOG_ERR_RL(&error_rl, "flow_get: failed to get ifindex for %s: %s",
- netdev_get_name(dev), ovs_strerror(-ifindex));
- netdev_close(dev);
- return -ifindex;
- }
-
- block_id = get_block_id_from_netdev(dev);
- VLOG_DBG_RL(&rl, "flow get (dev %s prio %d handle %d block_id %d)",
- netdev_get_name(dev), prio, handle, block_id);
- err = tc_get_flower(ifindex, prio, handle, &flower, block_id, hook);
- netdev_close(dev);
- if (err) {
- VLOG_ERR_RL(&error_rl, "flow get failed (dev %s prio %d handle %d): %s",
- netdev_get_name(dev), prio, handle, ovs_strerror(err));
- return err;
- }
-
- in_port = netdev_ifindex_to_odp_port(ifindex);
- parse_tc_flower_to_match(&flower, match, actions, stats, attrs, buf);
-
- match->wc.masks.in_port.odp_port = u32_to_odp(UINT32_MAX);
- match->flow.in_port.odp_port = in_port;
-
- return 0;
-}
-
-static int
-netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
- const ovs_u128 *ufid,
- struct dpif_flow_stats *stats)
-{
- struct tc_flower flower;
- enum tc_qdisc_hook hook;
- uint32_t block_id = 0;
- struct netdev *dev;
- int prio = 0;
- int ifindex;
- int handle;
- int error;
-
- handle = get_ufid_tc_mapping(ufid, &prio, &dev);
- if (!handle) {
- return ENOENT;
- }
-
- hook = get_tc_qdisc_hook(dev);
-
- ifindex = netdev_get_ifindex(dev);
- if (ifindex < 0) {
- VLOG_ERR_RL(&error_rl, "flow_del: failed to get ifindex for %s: %s",
- netdev_get_name(dev), ovs_strerror(-ifindex));
- netdev_close(dev);
- return -ifindex;
- }
-
- block_id = get_block_id_from_netdev(dev);
-
- if (stats) {
- memset(stats, 0, sizeof *stats);
- if (!tc_get_flower(ifindex, prio, handle, &flower, block_id, hook)) {
- stats->n_packets = get_32aligned_u64(&flower.stats.n_packets);
- stats->n_bytes = get_32aligned_u64(&flower.stats.n_bytes);
- stats->used = flower.lastused;
- }
- }
-
- error = del_filter_and_ufid_mapping(ifindex, prio, handle, block_id, ufid,
- hook);
-
- netdev_close(dev);
-
- return error;
-}
-
-static void
-probe_multi_mask_per_prio(int ifindex)
-{
- struct tc_flower flower;
- int block_id = 0;
- int error;
-
- error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS);
- if (error) {
- return;
- }
-
- memset(&flower, 0, sizeof flower);
-
- flower.key.eth_type = htons(ETH_P_IP);
- flower.mask.eth_type = OVS_BE16_MAX;
- memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac);
- memset(&flower.mask.dst_mac, 0xff, sizeof flower.mask.dst_mac);
-
- error = tc_replace_flower(ifindex, 1, 1, &flower, block_id, TC_INGRESS);
- if (error) {
- goto out;
- }
-
- memset(&flower.key.src_mac, 0x11, sizeof flower.key.src_mac);
- memset(&flower.mask.src_mac, 0xff, sizeof flower.mask.src_mac);
-
- error = tc_replace_flower(ifindex, 1, 2, &flower, block_id, TC_INGRESS);
- tc_del_filter(ifindex, 1, 1, block_id, TC_INGRESS);
-
- if (error) {
- goto out;
- }
-
- tc_del_filter(ifindex, 1, 2, block_id, TC_INGRESS);
-
- multi_mask_per_prio = true;
- VLOG_INFO("probe tc: multiple masks on single tc prio is supported.");
-
-out:
- tc_add_del_qdisc(ifindex, false, block_id, TC_INGRESS);
-}
-
-static void
-probe_tc_block_support(int ifindex)
-{
- struct tc_flower flower;
- uint32_t block_id = 1;
- int error;
-
- error = tc_add_del_qdisc(ifindex, true, block_id, TC_INGRESS);
- if (error) {
- return;
- }
-
- memset(&flower, 0, sizeof flower);
-
- flower.key.eth_type = htons(ETH_P_IP);
- flower.mask.eth_type = OVS_BE16_MAX;
- memset(&flower.key.dst_mac, 0x11, sizeof flower.key.dst_mac);
- memset(&flower.mask.dst_mac, 0xff, sizeof flower.mask.dst_mac);
-
- error = tc_replace_flower(ifindex, 1, 1, &flower, block_id, TC_INGRESS);
-
- tc_add_del_qdisc(ifindex, false, block_id, TC_INGRESS);
-
- if (!error) {
- block_support = true;
- VLOG_INFO("probe tc: block offload is supported.");
- }
-}
-
-static int
-netdev_tc_init_flow_api(struct netdev *netdev)
-{
- static struct ovsthread_once multi_mask_once = OVSTHREAD_ONCE_INITIALIZER;
- static struct ovsthread_once block_once = OVSTHREAD_ONCE_INITIALIZER;
- enum tc_qdisc_hook hook = get_tc_qdisc_hook(netdev);
- uint32_t block_id = 0;
- int ifindex;
- int error;
-
- ifindex = netdev_get_ifindex(netdev);
- if (ifindex < 0) {
- VLOG_INFO("init: failed to get ifindex for %s: %s",
- netdev_get_name(netdev), ovs_strerror(-ifindex));
- return -ifindex;
- }
-
- /* make sure there is no ingress qdisc */
- tc_add_del_qdisc(ifindex, false, 0, TC_INGRESS);
-
- if (ovsthread_once_start(&block_once)) {
- probe_tc_block_support(ifindex);
- ovsthread_once_done(&block_once);
- }
-
- if (ovsthread_once_start(&multi_mask_once)) {
- probe_multi_mask_per_prio(ifindex);
- ovsthread_once_done(&multi_mask_once);
- }
-
- block_id = get_block_id_from_netdev(netdev);
- error = tc_add_del_qdisc(ifindex, true, block_id, hook);
-
- if (error && error != EEXIST) {
- VLOG_INFO("failed adding ingress qdisc required for offloading: %s",
- ovs_strerror(error));
- return error;
- }
-
- VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev));
-
- return 0;
-}
-
-const struct netdev_flow_api netdev_tc_offloads = {
- .type = "linux_tc",
- .flow_flush = netdev_tc_flow_flush,
- .flow_dump_create = netdev_tc_flow_dump_create,
- .flow_dump_destroy = netdev_tc_flow_dump_destroy,
- .flow_dump_next = netdev_tc_flow_dump_next,
- .flow_put = netdev_tc_flow_put,
- .flow_get = netdev_tc_flow_get,
- .flow_del = netdev_tc_flow_del,
- .init_flow_api = netdev_tc_init_flow_api,
-};
netdev_register_provider(&netdev_tap_class);
netdev_vport_tunnel_register();
- netdev_register_flow_api_provider(&netdev_tc_offloads);
+ netdev_register_flow_api_provider(&netdev_offload_tc);
#endif
#if defined(__FreeBSD__) || defined(__NetBSD__)
netdev_register_provider(&netdev_tap_class);