/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2019 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
#include <config.h>
+#include <sys/types.h>
+#include <netinet/in.h>
#include <arpa/inet.h>
#include "odp-util.h"
#include <errno.h>
#include <inttypes.h>
#include <math.h>
-#include <netinet/in.h>
#include <netinet/icmp6.h>
#include <netinet/ip6.h>
#include <stdlib.h>
#include "uuid.h"
#include "openvswitch/vlog.h"
#include "openvswitch/match.h"
+#include "odp-netlink-macros.h"
+#include "csum.h"
VLOG_DEFINE_THIS_MODULE(odp_util);
static const char *delimiters = ", \t\r\n";
static const char *delimiters_end = ", \t\r\n)";
-struct attr_len_tbl {
- int len;
- const struct attr_len_tbl *next;
- int next_max;
+#define MAX_ODP_NESTED 32
+
+struct parse_odp_context {
+ const struct simap *port_names;
+ int depth; /* Current nested depth of odp string. */
};
-#define ATTR_LEN_INVALID -1
-#define ATTR_LEN_VARIABLE -2
-#define ATTR_LEN_NESTED -3
-static int parse_odp_key_mask_attr(const char *, const struct simap *port_names,
- struct ofpbuf *, struct ofpbuf *);
+static int parse_odp_key_mask_attr(struct parse_odp_context *, const char *,
+ struct ofpbuf *, struct ofpbuf *);
+
+static int parse_odp_key_mask_attr__(struct parse_odp_context *, const char *,
+ struct ofpbuf *, struct ofpbuf *);
+
static void format_odp_key_attr(const struct nlattr *a,
const struct nlattr *ma,
const struct hmap *portno_names, struct ds *ds,
const ovs_32aligned_u128 *mask, bool verbose);
static int scan_u128(const char *s, ovs_u128 *value, ovs_u128 *mask);
-static int parse_odp_action(const char *s, const struct simap *port_names,
+static int parse_odp_action(struct parse_odp_context *context, const char *s,
+ struct ofpbuf *actions);
+
+static int parse_odp_action__(struct parse_odp_context *context, const char *s,
struct ofpbuf *actions);
/* Returns one the following for the action with the given OVS_ACTION_ATTR_*
switch ((enum ovs_action_attr) type) {
case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
+ case OVS_ACTION_ATTR_LB_OUTPUT: return sizeof(uint32_t);
case OVS_ACTION_ATTR_TRUNC: return sizeof(struct ovs_action_trunc);
case OVS_ACTION_ATTR_TUNNEL_PUSH: return ATTR_LEN_VARIABLE;
case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
case OVS_ACTION_ATTR_SET_MASKED: return ATTR_LEN_VARIABLE;
case OVS_ACTION_ATTR_SAMPLE: return ATTR_LEN_VARIABLE;
case OVS_ACTION_ATTR_CT: return ATTR_LEN_VARIABLE;
+ case OVS_ACTION_ATTR_CT_CLEAR: return 0;
case OVS_ACTION_ATTR_PUSH_ETH: return sizeof(struct ovs_action_push_eth);
case OVS_ACTION_ATTR_POP_ETH: return 0;
case OVS_ACTION_ATTR_CLONE: return ATTR_LEN_VARIABLE;
+ case OVS_ACTION_ATTR_PUSH_NSH: return ATTR_LEN_VARIABLE;
+ case OVS_ACTION_ATTR_POP_NSH: return 0;
+ case OVS_ACTION_ATTR_CHECK_PKT_LEN: return ATTR_LEN_VARIABLE;
+ case OVS_ACTION_ATTR_DROP: return sizeof(uint32_t);
case OVS_ACTION_ATTR_UNSPEC:
case __OVS_ACTION_ATTR_MAX:
case OVS_KEY_ATTR_ICMPV6: return "icmpv6";
case OVS_KEY_ATTR_ARP: return "arp";
case OVS_KEY_ATTR_ND: return "nd";
+ case OVS_KEY_ATTR_ND_EXTENSIONS: return "nd_ext";
case OVS_KEY_ATTR_MPLS: return "mpls";
case OVS_KEY_ATTR_DP_HASH: return "dp_hash";
case OVS_KEY_ATTR_RECIRC_ID: return "recirc_id";
case OVS_KEY_ATTR_PACKET_TYPE: return "packet_type";
+ case OVS_KEY_ATTR_NSH: return "nsh";
case __OVS_KEY_ATTR_MAX:
default:
ds_put_format(ds, ")");
}
+static void
+format_nsh_key(struct ds *ds, const struct ovs_key_nsh *key)
+{
+ ds_put_format(ds, "flags=%d", key->flags);
+ ds_put_format(ds, ",ttl=%d", key->ttl);
+ ds_put_format(ds, ",mdtype=%d", key->mdtype);
+ ds_put_format(ds, ",np=%d", key->np);
+ ds_put_format(ds, ",spi=0x%x",
+ nsh_path_hdr_to_spi_uint32(key->path_hdr));
+ ds_put_format(ds, ",si=%d",
+ nsh_path_hdr_to_si(key->path_hdr));
+
+ switch (key->mdtype) {
+ case NSH_M_TYPE1:
+ for (int i = 0; i < 4; i++) {
+ ds_put_format(ds, ",c%d=0x%x", i + 1, ntohl(key->context[i]));
+ }
+ break;
+ case NSH_M_TYPE2:
+ default:
+ /* No support for matching other metadata formats yet. */
+ break;
+ }
+}
+
+static void
+format_uint8_masked(struct ds *s, bool *first, const char *name,
+ uint8_t value, uint8_t mask)
+{
+ if (mask != 0) {
+ if (!*first) {
+ ds_put_char(s, ',');
+ }
+ ds_put_format(s, "%s=", name);
+ if (mask == UINT8_MAX) {
+ ds_put_format(s, "%"PRIu8, value);
+ } else {
+ ds_put_format(s, "0x%02"PRIx8"/0x%02"PRIx8, value, mask);
+ }
+ *first = false;
+ }
+}
+
+static void
+format_be32_masked(struct ds *s, bool *first, const char *name,
+ ovs_be32 value, ovs_be32 mask)
+{
+ if (mask != htonl(0)) {
+ if (!*first) {
+ ds_put_char(s, ',');
+ }
+ ds_put_format(s, "%s=", name);
+ if (mask == OVS_BE32_MAX) {
+ ds_put_format(s, "0x%"PRIx32, ntohl(value));
+ } else {
+ ds_put_format(s, "0x%"PRIx32"/0x%08"PRIx32,
+ ntohl(value), ntohl(mask));
+ }
+ *first = false;
+ }
+}
+
+static void
+format_nsh_key_mask(struct ds *ds, const struct ovs_key_nsh *key,
+ const struct ovs_key_nsh *mask)
+{
+ if (!mask) {
+ format_nsh_key(ds, key);
+ } else {
+ bool first = true;
+ uint32_t spi = nsh_path_hdr_to_spi_uint32(key->path_hdr);
+ uint32_t spi_mask = nsh_path_hdr_to_spi_uint32(mask->path_hdr);
+ if (spi_mask == (NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
+ spi_mask = UINT32_MAX;
+ }
+ uint8_t si = nsh_path_hdr_to_si(key->path_hdr);
+ uint8_t si_mask = nsh_path_hdr_to_si(mask->path_hdr);
+
+ format_uint8_masked(ds, &first, "flags", key->flags, mask->flags);
+ format_uint8_masked(ds, &first, "ttl", key->ttl, mask->ttl);
+ format_uint8_masked(ds, &first, "mdtype", key->mdtype, mask->mdtype);
+ format_uint8_masked(ds, &first, "np", key->np, mask->np);
+ format_be32_masked(ds, &first, "spi", htonl(spi), htonl(spi_mask));
+ format_uint8_masked(ds, &first, "si", si, si_mask);
+ format_be32_masked(ds, &first, "c1", key->context[0],
+ mask->context[0]);
+ format_be32_masked(ds, &first, "c2", key->context[1],
+ mask->context[1]);
+ format_be32_masked(ds, &first, "c3", key->context[2],
+ mask->context[2]);
+ format_be32_masked(ds, &first, "c4", key->context[3],
+ mask->context[3]);
+ }
+}
+
+static void
+format_odp_push_nsh_action(struct ds *ds,
+ const struct nsh_hdr *nsh_hdr)
+ {
+ size_t mdlen = nsh_hdr_len(nsh_hdr) - NSH_BASE_HDR_LEN;
+ uint32_t spi = ntohl(nsh_get_spi(nsh_hdr));
+ uint8_t si = nsh_get_si(nsh_hdr);
+ uint8_t flags = nsh_get_flags(nsh_hdr);
+ uint8_t ttl = nsh_get_ttl(nsh_hdr);
+
+ ds_put_cstr(ds, "push_nsh(");
+ ds_put_format(ds, "flags=%d", flags);
+ ds_put_format(ds, ",ttl=%d", ttl);
+ ds_put_format(ds, ",mdtype=%d", nsh_hdr->md_type);
+ ds_put_format(ds, ",np=%d", nsh_hdr->next_proto);
+ ds_put_format(ds, ",spi=0x%x", spi);
+ ds_put_format(ds, ",si=%d", si);
+ switch (nsh_hdr->md_type) {
+ case NSH_M_TYPE1: {
+ const struct nsh_md1_ctx *md1_ctx = &nsh_hdr->md1;
+ for (int i = 0; i < 4; i++) {
+ ds_put_format(ds, ",c%d=0x%x", i + 1,
+ ntohl(get_16aligned_be32(&md1_ctx->context[i])));
+ }
+ break;
+ }
+ case NSH_M_TYPE2: {
+ const struct nsh_md2_tlv *md2_ctx = &nsh_hdr->md2;
+ ds_put_cstr(ds, ",md2=");
+ ds_put_hex(ds, md2_ctx, mdlen);
+ break;
+ }
+ default:
+ ds_put_cstr(ds, ",<error: unknown mdtype>");
+ break;
+ }
+ ds_put_format(ds, ")");
+}
+
static const char *
slow_path_reason_to_string(uint32_t reason)
{
const uint8_t *userdata = nl_attr_get(userdata_attr);
size_t userdata_len = nl_attr_get_size(userdata_attr);
bool userdata_unspec = true;
- union user_action_cookie cookie;
+ struct user_action_cookie cookie;
- if (userdata_len >= sizeof cookie.type
- && userdata_len <= sizeof cookie) {
-
- memset(&cookie, 0, sizeof cookie);
- memcpy(&cookie, userdata, userdata_len);
+ if (userdata_len == sizeof cookie) {
+ memcpy(&cookie, userdata, sizeof cookie);
userdata_unspec = false;
- if (userdata_len == sizeof cookie.sflow
- && cookie.type == USER_ACTION_COOKIE_SFLOW) {
+ if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
ds_put_format(ds, ",sFlow("
"vid=%"PRIu16",pcp=%d,output=%"PRIu32")",
vlan_tci_to_vid(cookie.sflow.vlan_tci),
vlan_tci_to_pcp(cookie.sflow.vlan_tci),
cookie.sflow.output);
- } else if (userdata_len == sizeof cookie.slow_path
- && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
+ } else if (cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
ds_put_cstr(ds, ",slow_path(");
format_flags(ds, slow_path_reason_to_string,
cookie.slow_path.reason, ',');
ds_put_format(ds, ")");
- } else if (userdata_len == sizeof cookie.flow_sample
- && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
+ } else if (cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
ds_put_format(ds, ",flow_sample(probability=%"PRIu16
",collector_set_id=%"PRIu32
",obs_domain_id=%"PRIu32
ds_put_cstr(ds, ",egress");
}
ds_put_char(ds, ')');
- } else if (userdata_len >= sizeof cookie.ipfix
- && cookie.type == USER_ACTION_COOKIE_IPFIX) {
+ } else if (cookie.type == USER_ACTION_COOKIE_IPFIX) {
ds_put_format(ds, ",ipfix(output_port=");
odp_portno_name_format(portno_names,
cookie.ipfix.output_odp_port, ds);
ds_put_char(ds, ')');
+ } else if (cookie.type == USER_ACTION_COOKIE_CONTROLLER) {
+ ds_put_format(ds, ",controller(reason=%"PRIu16
+ ",dont_send=%d"
+ ",continuation=%d"
+ ",recirc_id=%"PRIu32
+ ",rule_cookie=%#"PRIx64
+ ",controller_id=%"PRIu16
+ ",max_len=%"PRIu16,
+ cookie.controller.reason,
+ !!cookie.controller.dont_send,
+ !!cookie.controller.continuation,
+ cookie.controller.recirc_id,
+ ntohll(get_32aligned_be64(
+ &cookie.controller.rule_cookie)),
+ cookie.controller.controller_id,
+ cookie.controller.max_len);
+ ds_put_char(ds, ')');
} else {
userdata_unspec = true;
}
ds_put_format(ds, "hash(");
if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
- ds_put_format(ds, "hash_l4(%"PRIu32")", hash_act->hash_basis);
+ ds_put_format(ds, "l4(%"PRIu32")", hash_act->hash_basis);
+ } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) {
+ ds_put_format(ds, "sym_l4(%"PRIu32")", hash_act->hash_basis);
} else {
ds_put_format(ds, "Unknown hash algorithm(%"PRIu32")",
hash_act->hash_alg);
gnh->oam ? "oam," : "",
gnh->critical ? "crit," : "",
ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
-
+
if (gnh->opt_len) {
ds_put_cstr(ds, ",options(");
format_geneve_opts(gnh->options, NULL, gnh->opt_len * 4,
}
ds_put_char(ds, ')');
- } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
+ } else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
+ data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
const struct gre_base_hdr *greh;
ovs_16aligned_be32 *options;
options++;
}
ds_put_format(ds, ")");
+ } else if (data->tnl_type == OVS_VPORT_TYPE_ERSPAN ||
+ data->tnl_type == OVS_VPORT_TYPE_IP6ERSPAN) {
+ const struct gre_base_hdr *greh;
+ const struct erspan_base_hdr *ersh;
+
+ greh = (const struct gre_base_hdr *) l4;
+ ersh = ERSPAN_HDR(greh);
+
+ if (ersh->ver == 1) {
+ ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
+ ersh + 1);
+ ds_put_format(ds, "erspan(ver=1,sid=0x%"PRIx16",idx=0x%"PRIx32")",
+ get_sid(ersh), ntohl(get_16aligned_be32(index)));
+ } else if (ersh->ver == 2) {
+ struct erspan_md2 *md2 = ALIGNED_CAST(struct erspan_md2 *,
+ ersh + 1);
+ ds_put_format(ds, "erspan(ver=2,sid=0x%"PRIx16
+ ",dir=%"PRIu8",hwid=0x%"PRIx8")",
+ get_sid(ersh), md2->dir, get_hwid(md2));
+ } else {
+ VLOG_WARN("%s Invalid ERSPAN version %d\n", __func__, ersh->ver);
+ }
+ } else if (data->tnl_type == OVS_VPORT_TYPE_GTPU) {
+ const struct gtpuhdr *gtph;
+
+ gtph = format_udp_tnl_push_header(ds, udp);
+
+ ds_put_format(ds, "gtpu(flags=0x%"PRIx8
+ ",msgtype=%"PRIu8",teid=0x%"PRIx32")",
+ gtph->md.flags, gtph->md.msgtype,
+ ntohl(get_16aligned_be32(>ph->teid)));
}
+
ds_put_format(ds, ")");
}
[OVS_CT_ATTR_HELPER] = { .type = NL_A_STRING, .optional = true,
.min_len = 1, .max_len = 16 },
[OVS_CT_ATTR_NAT] = { .type = NL_A_UNSPEC, .optional = true },
+ [OVS_CT_ATTR_TIMEOUT] = { .type = NL_A_STRING, .optional = true,
+ .min_len = 1, .max_len = 32 },
};
static void
ovs_32aligned_u128 mask;
} *label;
const uint32_t *mark;
- const char *helper;
+ const char *helper, *timeout;
uint16_t zone;
bool commit, force;
const struct nlattr *nat;
mark = a[OVS_CT_ATTR_MARK] ? nl_attr_get(a[OVS_CT_ATTR_MARK]) : NULL;
label = a[OVS_CT_ATTR_LABELS] ? nl_attr_get(a[OVS_CT_ATTR_LABELS]): NULL;
helper = a[OVS_CT_ATTR_HELPER] ? nl_attr_get(a[OVS_CT_ATTR_HELPER]) : NULL;
+ timeout = a[OVS_CT_ATTR_TIMEOUT] ?
+ nl_attr_get(a[OVS_CT_ATTR_TIMEOUT]) : NULL;
nat = a[OVS_CT_ATTR_NAT];
ds_put_format(ds, "ct");
- if (commit || force || zone || mark || label || helper || nat) {
+ if (commit || force || zone || mark || label || helper || timeout || nat) {
ds_put_cstr(ds, "(");
if (commit) {
ds_put_format(ds, "commit,");
if (helper) {
ds_put_format(ds, "helper=%s,", helper);
}
+ if (timeout) {
+ ds_put_format(ds, "timeout=%s", timeout);
+ }
if (nat) {
format_odp_ct_nat(ds, nat);
}
}
}
+static const struct attr_len_tbl
+ovs_nsh_key_attr_lens[OVS_NSH_KEY_ATTR_MAX + 1] = {
+ [OVS_NSH_KEY_ATTR_BASE] = { .len = 8 },
+ [OVS_NSH_KEY_ATTR_MD1] = { .len = 16 },
+ [OVS_NSH_KEY_ATTR_MD2] = { .len = ATTR_LEN_VARIABLE },
+};
+
+static void
+format_odp_set_nsh(struct ds *ds, const struct nlattr *attr)
+{
+ unsigned int left;
+ const struct nlattr *a;
+ struct ovs_key_nsh nsh;
+ struct ovs_key_nsh nsh_mask;
+
+ memset(&nsh, 0, sizeof nsh);
+ memset(&nsh_mask, 0xff, sizeof nsh_mask);
+
+ NL_NESTED_FOR_EACH (a, left, attr) {
+ enum ovs_nsh_key_attr type = nl_attr_type(a);
+ size_t len = nl_attr_get_size(a);
+
+ if (type >= OVS_NSH_KEY_ATTR_MAX) {
+ return;
+ }
+
+ int expected_len = ovs_nsh_key_attr_lens[type].len;
+ if ((expected_len != ATTR_LEN_VARIABLE) && (len != 2 * expected_len)) {
+ return;
+ }
+
+ switch (type) {
+ case OVS_NSH_KEY_ATTR_UNSPEC:
+ break;
+ case OVS_NSH_KEY_ATTR_BASE: {
+ const struct ovs_nsh_key_base *base = nl_attr_get(a);
+ const struct ovs_nsh_key_base *base_mask = base + 1;
+ memcpy(&nsh, base, sizeof(*base));
+ memcpy(&nsh_mask, base_mask, sizeof(*base_mask));
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD1: {
+ const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
+ const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
+ memcpy(&nsh.context, &md1->context, sizeof(*md1));
+ memcpy(&nsh_mask.context, &md1_mask->context, sizeof(*md1_mask));
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD2:
+ case __OVS_NSH_KEY_ATTR_MAX:
+ default:
+ /* No support for matching other metadata formats yet. */
+ break;
+ }
+ }
+
+ ds_put_cstr(ds, "set(nsh(");
+ format_nsh_key_mask(ds, &nsh, &nsh_mask);
+ ds_put_cstr(ds, "))");
+}
+
+static void
+format_odp_check_pkt_len_action(struct ds *ds, const struct nlattr *attr,
+ const struct hmap *portno_names OVS_UNUSED)
+{
+ static const struct nl_policy ovs_cpl_policy[] = {
+ [OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = { .type = NL_A_U16 },
+ [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = { .type = NL_A_NESTED },
+ [OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]
+ = { .type = NL_A_NESTED },
+ };
+ struct nlattr *a[ARRAY_SIZE(ovs_cpl_policy)];
+ ds_put_cstr(ds, "check_pkt_len");
+ if (!nl_parse_nested(attr, ovs_cpl_policy, a, ARRAY_SIZE(a))) {
+ ds_put_cstr(ds, "(error)");
+ return;
+ }
+
+ if (!a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] ||
+ !a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL]) {
+ ds_put_cstr(ds, "(error)");
+ return;
+ }
+
+ uint16_t pkt_len = nl_attr_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
+ ds_put_format(ds, "(size=%u,gt(", pkt_len);
+ const struct nlattr *acts;
+ acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
+ format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
+ portno_names);
+
+ ds_put_cstr(ds, "),le(");
+ acts = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
+ format_odp_actions(ds, nl_attr_get(acts), nl_attr_get_size(acts),
+ portno_names);
+ ds_put_cstr(ds, "))");
+}
+
static void
format_odp_action(struct ds *ds, const struct nlattr *a,
const struct hmap *portno_names)
case OVS_ACTION_ATTR_OUTPUT:
odp_portno_name_format(portno_names, nl_attr_get_odp_port(a), ds);
break;
+ case OVS_ACTION_ATTR_LB_OUTPUT:
+ ds_put_format(ds, "lb_output(%"PRIu32")", nl_attr_get_u32(a));
+ break;
case OVS_ACTION_ATTR_TRUNC: {
const struct ovs_action_trunc *trunc =
nl_attr_get_unspec(a, sizeof *trunc);
break;
case OVS_ACTION_ATTR_SET_MASKED:
a = nl_attr_get(a);
+ /* OVS_KEY_ATTR_NSH is nested attribute, so it needs special process */
+ if (nl_attr_type(a) == OVS_KEY_ATTR_NSH) {
+ format_odp_set_nsh(ds, a);
+ break;
+ }
size = nl_attr_get_size(a) / 2;
ds_put_cstr(ds, "set(");
case OVS_ACTION_ATTR_CT:
format_odp_conntrack_action(ds, a);
break;
+ case OVS_ACTION_ATTR_CT_CLEAR:
+ ds_put_cstr(ds, "ct_clear");
+ break;
case OVS_ACTION_ATTR_CLONE:
format_odp_clone_action(ds, a, portno_names);
break;
+ case OVS_ACTION_ATTR_PUSH_NSH: {
+ uint32_t buffer[NSH_HDR_MAX_LEN / 4];
+ struct nsh_hdr *nsh_hdr = ALIGNED_CAST(struct nsh_hdr *, buffer);
+ nsh_reset_ver_flags_ttl_len(nsh_hdr);
+ odp_nsh_hdr_from_attr(nl_attr_get(a), nsh_hdr, NSH_HDR_MAX_LEN);
+ format_odp_push_nsh_action(ds, nsh_hdr);
+ break;
+ }
+ case OVS_ACTION_ATTR_POP_NSH:
+ ds_put_cstr(ds, "pop_nsh()");
+ break;
+ case OVS_ACTION_ATTR_CHECK_PKT_LEN:
+ format_odp_check_pkt_len_action(ds, a, portno_names);
+ break;
+ case OVS_ACTION_ATTR_DROP:
+ ds_put_cstr(ds, "drop");
+ break;
case OVS_ACTION_ATTR_UNSPEC:
case __OVS_ACTION_ATTR_MAX:
default:
parse_odp_userspace_action(const char *s, struct ofpbuf *actions)
{
uint32_t pid;
- union user_action_cookie cookie;
+ struct user_action_cookie cookie;
struct ofpbuf buf;
odp_port_t tunnel_out_port;
int n = -1;
}
ofpbuf_init(&buf, 16);
+ memset(&cookie, 0, sizeof cookie);
+ user_data = &cookie;
+ user_data_size = sizeof cookie;
{
uint32_t output;
uint32_t probability;
uint32_t collector_set_id;
uint32_t obs_domain_id;
uint32_t obs_point_id;
+
+ /* USER_ACTION_COOKIE_CONTROLLER. */
+ uint8_t dont_send;
+ uint8_t continuation;
+ uint16_t reason;
+ uint32_t recirc_id;
+ uint64_t rule_cookie;
+ uint16_t controller_id;
+ uint16_t max_len;
+
int vid, pcp;
int n1 = -1;
if (ovs_scan(&s[n], ",sFlow(vid=%i,"
}
cookie.type = USER_ACTION_COOKIE_SFLOW;
+ cookie.ofp_in_port = OFPP_NONE;
+ cookie.ofproto_uuid = UUID_ZERO;
cookie.sflow.vlan_tci = htons(tci);
cookie.sflow.output = output;
- user_data = &cookie;
- user_data_size = sizeof cookie.sflow;
} else if (ovs_scan(&s[n], ",slow_path(%n",
&n1)) {
n += n1;
cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
- cookie.slow_path.unused = 0;
+ cookie.ofp_in_port = OFPP_NONE;
+ cookie.ofproto_uuid = UUID_ZERO;
cookie.slow_path.reason = 0;
res = parse_odp_flags(&s[n], slow_path_reason_to_string,
goto out;
}
n += res + 1;
-
- user_data = &cookie;
- user_data_size = sizeof cookie.slow_path;
} else if (ovs_scan(&s[n], ",flow_sample(probability=%"SCNi32","
"collector_set_id=%"SCNi32","
"obs_domain_id=%"SCNi32","
n += n1;
cookie.type = USER_ACTION_COOKIE_FLOW_SAMPLE;
+ cookie.ofp_in_port = OFPP_NONE;
+ cookie.ofproto_uuid = UUID_ZERO;
cookie.flow_sample.probability = probability;
cookie.flow_sample.collector_set_id = collector_set_id;
cookie.flow_sample.obs_domain_id = obs_domain_id;
cookie.flow_sample.obs_point_id = obs_point_id;
cookie.flow_sample.output_odp_port = u32_to_odp(output);
- user_data = &cookie;
- user_data_size = sizeof cookie.flow_sample;
if (ovs_scan(&s[n], ",ingress%n", &n1)) {
cookie.flow_sample.direction = NX_ACTION_SAMPLE_INGRESS;
&output, &n1) ) {
n += n1;
cookie.type = USER_ACTION_COOKIE_IPFIX;
+ cookie.ofp_in_port = OFPP_NONE;
+ cookie.ofproto_uuid = UUID_ZERO;
cookie.ipfix.output_odp_port = u32_to_odp(output);
- user_data = &cookie;
- user_data_size = sizeof cookie.ipfix;
- } else if (ovs_scan(&s[n], ",userdata(%n",
- &n1)) {
+ } else if (ovs_scan(&s[n], ",controller(reason=%"SCNu16
+ ",dont_send=%"SCNu8
+ ",continuation=%"SCNu8
+ ",recirc_id=%"SCNu32
+ ",rule_cookie=%"SCNx64
+ ",controller_id=%"SCNu16
+ ",max_len=%"SCNu16")%n",
+ &reason, &dont_send, &continuation, &recirc_id,
+ &rule_cookie, &controller_id, &max_len, &n1)) {
+ n += n1;
+ cookie.type = USER_ACTION_COOKIE_CONTROLLER;
+ cookie.ofp_in_port = OFPP_NONE;
+ cookie.ofproto_uuid = UUID_ZERO;
+ cookie.controller.dont_send = dont_send ? true : false;
+ cookie.controller.continuation = continuation ? true : false;
+ cookie.controller.reason = reason;
+ cookie.controller.recirc_id = recirc_id;
+ put_32aligned_be64(&cookie.controller.rule_cookie,
+ htonll(rule_cookie));
+ cookie.controller.controller_id = controller_id;
+ cookie.controller.max_len = max_len;
+ } else if (ovs_scan(&s[n], ",userdata(%n", &n1)) {
char *end;
n += n1;
int n1 = -1;
if (ovs_scan(&s[n], ",tunnel_out_port=%"SCNi32")%n",
&tunnel_out_port, &n1)) {
- odp_put_userspace_action(pid, user_data, user_data_size,
- tunnel_out_port, include_actions, actions);
- res = n + n1;
+ res = odp_put_userspace_action(pid, user_data, user_data_size,
+ tunnel_out_port, include_actions,
+ actions, NULL);
+ if (!res) {
+ res = n + n1;
+ }
goto out;
} else if (s[n] == ')') {
- odp_put_userspace_action(pid, user_data, user_data_size,
- ODPP_NONE, include_actions, actions);
- res = n + 1;
+ res = odp_put_userspace_action(pid, user_data, user_data_size,
+ ODPP_NONE, include_actions,
+ actions, NULL);
+ if (!res) {
+ res = n + 1;
+ }
goto out;
}
}
struct ovs_16aligned_ip6_hdr *ip6;
struct udp_header *udp;
struct gre_base_hdr *greh;
- uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, csum;
+ struct erspan_base_hdr *ersh;
+ struct erspan_md2 *md2;
+ uint16_t gre_proto, gre_flags, dl_type, udp_src, udp_dst, udp_csum, sid;
ovs_be32 sip, dip;
- uint32_t tnl_type = 0, header_len = 0, ip_len = 0;
+ uint32_t tnl_type = 0, header_len = 0, ip_len = 0, erspan_idx = 0;
void *l3, *l4;
int n = 0;
+ uint8_t hwid, dir;
+ uint32_t teid;
+ uint8_t gtpu_flags, gtpu_msgtype;
if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
return -EINVAL;
if (eth->eth_type == htons(ETH_TYPE_IP)) {
/* IPv4 */
uint16_t ip_frag_off;
+ memset(ip, 0, sizeof(*ip));
if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
IP_SCAN_ARGS(&sip),
put_16aligned_be32(&ip->ip_src, sip);
put_16aligned_be32(&ip->ip_dst, dip);
ip->ip_frag_off = htons(ip_frag_off);
+ ip->ip_ihl_ver = IP_IHL_VER(5, 4);
ip_len = sizeof *ip;
+ ip->ip_csum = csum(ip, ip_len);
} else {
char sip6_s[IPV6_SCAN_LEN + 1];
char dip6_s[IPV6_SCAN_LEN + 1];
udp = (struct udp_header *) l4;
greh = (struct gre_base_hdr *) l4;
if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16",csum=0x%"SCNx16"),",
- &udp_src, &udp_dst, &csum)) {
+ &udp_src, &udp_dst, &udp_csum)) {
uint32_t vx_flags, vni;
udp->udp_src = htons(udp_src);
udp->udp_dst = htons(udp_dst);
udp->udp_len = 0;
- udp->udp_csum = htons(csum);
+ udp->udp_csum = htons(udp_csum);
if (ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
&vx_flags, &vni)) {
} else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
&gre_flags, &gre_proto)){
- tnl_type = OVS_VPORT_TYPE_GRE;
+ if (eth->eth_type == htons(ETH_TYPE_IP)) {
+ tnl_type = OVS_VPORT_TYPE_GRE;
+ } else {
+ tnl_type = OVS_VPORT_TYPE_IP6GRE;
+ }
greh->flags = htons(gre_flags);
greh->protocol = htons(gre_proto);
ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
if (greh->flags & htons(GRE_CSUM)) {
+ uint16_t csum;
if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx16, &csum)) {
return -EINVAL;
}
header_len = sizeof *eth + ip_len +
((uint8_t *) options - (uint8_t *) greh);
+ } else if (ovs_scan_len(s, &n, "erspan(ver=1,sid="SCNx16",idx=0x"SCNx32")",
+ &sid, &erspan_idx)) {
+ ersh = ERSPAN_HDR(greh);
+ ovs_16aligned_be32 *index = ALIGNED_CAST(ovs_16aligned_be32 *,
+ ersh + 1);
+
+ if (eth->eth_type == htons(ETH_TYPE_IP)) {
+ tnl_type = OVS_VPORT_TYPE_ERSPAN;
+ } else {
+ tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
+ }
+
+ greh->flags = htons(GRE_SEQ);
+ greh->protocol = htons(ETH_TYPE_ERSPAN1);
+
+ ersh->ver = 1;
+ set_sid(ersh, sid);
+ put_16aligned_be32(index, htonl(erspan_idx));
+
+ if (!ovs_scan_len(s, &n, ")")) {
+ return -EINVAL;
+ }
+ header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
+ sizeof *ersh + ERSPAN_V1_MDSIZE;
+
+ } else if (ovs_scan_len(s, &n, "erspan(ver=2,sid="SCNx16"dir="SCNu8
+ ",hwid=0x"SCNx8")", &sid, &dir, &hwid)) {
+
+ ersh = ERSPAN_HDR(greh);
+ md2 = ALIGNED_CAST(struct erspan_md2 *, ersh + 1);
+
+ if (eth->eth_type == htons(ETH_TYPE_IP)) {
+ tnl_type = OVS_VPORT_TYPE_ERSPAN;
+ } else {
+ tnl_type = OVS_VPORT_TYPE_IP6ERSPAN;
+ }
+
+ greh->flags = htons(GRE_SEQ);
+ greh->protocol = htons(ETH_TYPE_ERSPAN2);
+
+ ersh->ver = 2;
+ set_sid(ersh, sid);
+ set_hwid(md2, hwid);
+ md2->dir = dir;
+
+ if (!ovs_scan_len(s, &n, ")")) {
+ return -EINVAL;
+ }
+
+ header_len = sizeof *eth + ip_len + ERSPAN_GREHDR_LEN +
+ sizeof *ersh + ERSPAN_V2_MDSIZE;
+
+ } else if (ovs_scan_len(s, &n, "gtpu(flags=%"SCNi8",msgtype=%"
+ SCNu8",teid=0x%"SCNx32"))",
+ >pu_flags, >pu_msgtype, &teid)) {
+ struct gtpuhdr *gtph = (struct gtpuhdr *) (udp + 1);
+
+ gtph->md.flags = gtpu_flags;
+ gtph->md.msgtype = gtpu_msgtype;
+ put_16aligned_be32(>ph->teid, htonl(teid));
+ tnl_type = OVS_VPORT_TYPE_GTPU;
+ header_len = sizeof *eth + ip_len +
+ sizeof *udp + sizeof *gtph;
} else {
return -EINVAL;
}
const char *s = s_;
if (ovs_scan(s, "ct")) {
- const char *helper = NULL;
- size_t helper_len = 0;
+ const char *helper = NULL, *timeout = NULL;
+ size_t helper_len = 0, timeout_len = 0;
bool commit = false;
bool force_commit = false;
uint16_t zone = 0;
s += n;
retval = scan_u128(s, &ct_label.value, &ct_label.mask);
- if (retval < 0) {
- return retval;
+ if (retval == 0) {
+ return -EINVAL;
}
s += retval;
continue;
s += helper_len;
continue;
}
+ if (ovs_scan(s, "timeout=%n", &n)) {
+ s += n;
+ timeout_len = strcspn(s, delimiters_end);
+ if (!timeout_len || timeout_len > 31) {
+ return -EINVAL;
+ }
+ timeout = s;
+ s += timeout_len;
+ continue;
+ }
n = scan_ct_nat(s, &nat_params);
if (n > 0) {
nl_msg_put_string__(actions, OVS_CT_ATTR_HELPER, helper,
helper_len);
}
+ if (timeout) {
+ nl_msg_put_string__(actions, OVS_CT_ATTR_TIMEOUT, timeout,
+ timeout_len);
+ }
if (have_nat) {
nl_msg_put_ct_nat(&nat_params, actions);
}
return s - s_;
}
+static void
+nsh_key_to_attr(struct ofpbuf *buf, const struct ovs_key_nsh *nsh,
+ uint8_t * metadata, size_t md_size,
+ bool is_mask)
+{
+ size_t nsh_key_ofs;
+ struct ovs_nsh_key_base base;
+
+ base.flags = nsh->flags;
+ base.ttl = nsh->ttl;
+ base.mdtype = nsh->mdtype;
+ base.np = nsh->np;
+ base.path_hdr = nsh->path_hdr;
+
+ nsh_key_ofs = nl_msg_start_nested(buf, OVS_KEY_ATTR_NSH);
+ nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_BASE, &base, sizeof base);
+
+ if (is_mask) {
+ nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
+ sizeof nsh->context);
+ } else {
+ switch (nsh->mdtype) {
+ case NSH_M_TYPE1:
+ nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD1, nsh->context,
+ sizeof nsh->context);
+ break;
+ case NSH_M_TYPE2:
+ if (metadata && md_size > 0) {
+ nl_msg_put_unspec(buf, OVS_NSH_KEY_ATTR_MD2, metadata,
+ md_size);
+ }
+ break;
+ default:
+ /* No match support for other MD formats yet. */
+ break;
+ }
+ }
+ nl_msg_end_nested(buf, nsh_key_ofs);
+}
+
+
+static int
+parse_odp_push_nsh_action(const char *s, struct ofpbuf *actions)
+{
+ int n = 0;
+ int ret = 0;
+ uint32_t spi = 0;
+ uint8_t si = 255;
+ uint32_t cd;
+ struct ovs_key_nsh nsh;
+ uint8_t metadata[NSH_CTX_HDRS_MAX_LEN];
+ uint8_t md_size = 0;
+
+ if (!ovs_scan_len(s, &n, "push_nsh(")) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* The default is NSH_M_TYPE1 */
+ nsh.flags = 0;
+ nsh.ttl = 63;
+ nsh.mdtype = NSH_M_TYPE1;
+ nsh.np = NSH_P_ETHERNET;
+ nsh.path_hdr = nsh_spi_si_to_path_hdr(0, 255);
+ memset(nsh.context, 0, NSH_M_TYPE1_MDLEN);
+
+ for (;;) {
+ n += strspn(s + n, delimiters);
+ if (s[n] == ')') {
+ break;
+ }
+
+ if (ovs_scan_len(s, &n, "flags=%"SCNi8, &nsh.flags)) {
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "ttl=%"SCNi8, &nsh.ttl)) {
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "mdtype=%"SCNi8, &nsh.mdtype)) {
+ switch (nsh.mdtype) {
+ case NSH_M_TYPE1:
+ /* This is the default format. */;
+ break;
+ case NSH_M_TYPE2:
+ /* Length will be updated later. */
+ md_size = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "np=%"SCNi8, &nsh.np)) {
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "spi=0x%"SCNx32, &spi)) {
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "si=%"SCNi8, &si)) {
+ continue;
+ }
+ if (nsh.mdtype == NSH_M_TYPE1) {
+ if (ovs_scan_len(s, &n, "c1=0x%"SCNx32, &cd)) {
+ nsh.context[0] = htonl(cd);
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "c2=0x%"SCNx32, &cd)) {
+ nsh.context[1] = htonl(cd);
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "c3=0x%"SCNx32, &cd)) {
+ nsh.context[2] = htonl(cd);
+ continue;
+ }
+ if (ovs_scan_len(s, &n, "c4=0x%"SCNx32, &cd)) {
+ nsh.context[3] = htonl(cd);
+ continue;
+ }
+ }
+ else if (nsh.mdtype == NSH_M_TYPE2) {
+ struct ofpbuf b;
+ char buf[512];
+ size_t mdlen, padding;
+ if (ovs_scan_len(s, &n, "md2=0x%511[0-9a-fA-F]", buf)
+ && n/2 <= sizeof metadata) {
+ ofpbuf_use_stub(&b, metadata, sizeof metadata);
+ ofpbuf_put_hex(&b, buf, &mdlen);
+ /* Pad metadata to 4 bytes. */
+ padding = PAD_SIZE(mdlen, 4);
+ if (padding > 0) {
+ ofpbuf_put_zeros(&b, padding);
+ }
+ md_size = mdlen + padding;
+ ofpbuf_uninit(&b);
+ continue;
+ }
+ }
+
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ if (ret >= 0) {
+ nsh.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
+ size_t offset = nl_msg_start_nested(actions, OVS_ACTION_ATTR_PUSH_NSH);
+ nsh_key_to_attr(actions, &nsh, metadata, md_size, false);
+ nl_msg_end_nested(actions, offset);
+ ret = n;
+ }
+ return ret;
+}
+
static int
-parse_action_list(const char *s, const struct simap *port_names,
+parse_action_list(struct parse_odp_context *context, const char *s,
struct ofpbuf *actions)
{
int n = 0;
if (s[n] == ')') {
break;
}
- retval = parse_odp_action(s + n, port_names, actions);
+ retval = parse_odp_action(context, s + n, actions);
if (retval < 0) {
return retval;
}
n += retval;
}
+ if (actions->size > UINT16_MAX) {
+ return -EFBIG;
+ }
+
return n;
}
+
static int
-parse_odp_action(const char *s, const struct simap *port_names,
+parse_odp_action(struct parse_odp_context *context, const char *s,
struct ofpbuf *actions)
+{
+ int retval;
+
+ context->depth++;
+
+ if (context->depth == MAX_ODP_NESTED) {
+ retval = -EINVAL;
+ } else {
+ retval = parse_odp_action__(context, s, actions);
+ }
+
+ context->depth--;
+
+ return retval;
+}
+
+
+static int
+parse_odp_action__(struct parse_odp_context *context, const char *s,
+ struct ofpbuf *actions)
{
{
uint32_t port;
}
}
+ {
+ uint32_t bond_id;
+ int n;
+
+ if (ovs_scan(s, "lb_output(%"PRIu32")%n", &bond_id, &n)) {
+ nl_msg_put_u32(actions, OVS_ACTION_ATTR_LB_OUTPUT, bond_id);
+ return n;
+ }
+ }
+
{
uint32_t max_len;
int n;
}
}
- if (port_names) {
+ if (context->port_names) {
int len = strcspn(s, delimiters);
struct simap_node *node;
- node = simap_find_len(port_names, s, len);
+ node = simap_find_len(context->port_names, s, len);
if (node) {
nl_msg_put_u32(actions, OVS_ACTION_ATTR_OUTPUT, node->data);
return len;
if (!strncmp(s, "set(", 4)) {
size_t start_ofs;
int retval;
- struct nlattr mask[128 / sizeof(struct nlattr)];
- struct ofpbuf maskbuf;
+ struct nlattr mask[1024 / sizeof(struct nlattr)];
+ struct ofpbuf maskbuf = OFPBUF_STUB_INITIALIZER(mask);
struct nlattr *nested, *key;
size_t size;
- /* 'mask' is big enough to hold any key. */
- ofpbuf_use_stack(&maskbuf, mask, sizeof mask);
-
start_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_SET);
- retval = parse_odp_key_mask_attr(s + 4, port_names, actions, &maskbuf);
+ retval = parse_odp_key_mask_attr(context, s + 4, actions, &maskbuf);
if (retval < 0) {
+ ofpbuf_uninit(&maskbuf);
return retval;
}
if (s[retval + 4] != ')') {
+ ofpbuf_uninit(&maskbuf);
return -EINVAL;
}
if (size == nl_attr_get_size(key)) {
/* Change to masked set action if not fully masked. */
if (!is_all_ones(mask + 1, size)) {
+ /* Remove padding of eariler key payload */
+ actions->size -= NLA_ALIGN(key->nla_len) - key->nla_len;
+
+ /* Put mask payload right after key payload */
key->nla_len += size;
ofpbuf_put(actions, mask + 1, size);
+
/* 'actions' may have been reallocated by ofpbuf_put(). */
nested = ofpbuf_at_assert(actions, start_ofs, sizeof *nested);
nested->nla_type = OVS_ACTION_ATTR_SET_MASKED;
+
+ key = nested + 1;
+ /* Add new padding as needed */
+ ofpbuf_put_zeros(actions, NLA_ALIGN(key->nla_len) -
+ key->nla_len);
}
}
+ ofpbuf_uninit(&maskbuf);
nl_msg_end_nested(actions, start_ofs);
return retval + 5;
&tpid, &vid, &pcp, &n)
|| ovs_scan(s, "push_vlan(tpid=%i,vid=%i,pcp=%i,cfi=%i)%n",
&tpid, &vid, &pcp, &cfi, &n)) {
+ if ((vid & ~(VLAN_VID_MASK >> VLAN_VID_SHIFT)) != 0
+ || (pcp & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT)) != 0) {
+ return -EINVAL;
+ }
push.vlan_tpid = htons(tpid);
push.vlan_tci = htons((vid << VLAN_VID_SHIFT)
| (pcp << VLAN_PCP_SHIFT)
actions_ofs = nl_msg_start_nested(actions,
OVS_SAMPLE_ATTR_ACTIONS);
- int retval = parse_action_list(s + n, port_names, actions);
- if (retval < 0)
+ int retval = parse_action_list(context, s + n, actions);
+ if (retval < 0) {
return retval;
+ }
+
n += retval;
nl_msg_end_nested(actions, actions_ofs);
int n = 6;
actions_ofs = nl_msg_start_nested(actions, OVS_ACTION_ATTR_CLONE);
- int retval = parse_action_list(s + n, port_names, actions);
+ int retval = parse_action_list(context, s + n, actions);
if (retval < 0) {
return retval;
}
}
{
- uint32_t port;
- int n;
-
- if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
- nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
- return n;
+ if (!strncmp(s, "push_nsh(", 9)) {
+ int retval = parse_odp_push_nsh_action(s, actions);
+ if (retval < 0) {
+ return retval;
+ }
+ return retval + 1;
+ }
+ }
+
+ {
+ int n;
+ if (ovs_scan(s, "pop_nsh()%n", &n)) {
+ nl_msg_put_flag(actions, OVS_ACTION_ATTR_POP_NSH);
+ return n;
+ }
+ }
+
+ {
+ uint32_t port;
+ int n;
+
+ if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
+ nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
+ return n;
+ }
+ }
+
+ {
+ if (!strncmp(s, "ct_clear", 8)) {
+ nl_msg_put_flag(actions, OVS_ACTION_ATTR_CT_CLEAR);
+ return 8;
+ }
+ }
+
+ {
+ uint16_t pkt_len;
+ int n = -1;
+ if (ovs_scan(s, "check_pkt_len(size=%"SCNi16",gt(%n", &pkt_len, &n)) {
+ size_t cpl_ofs, actions_ofs;
+ cpl_ofs = nl_msg_start_nested(actions,
+ OVS_ACTION_ATTR_CHECK_PKT_LEN);
+ nl_msg_put_u16(actions, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, pkt_len);
+ actions_ofs = nl_msg_start_nested(
+ actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
+
+ int retval;
+ if (!strncasecmp(s + n, "drop", 4)) {
+ n += 4;
+ } else {
+ retval = parse_action_list(context, s + n, actions);
+ if (retval < 0) {
+ return retval;
+ }
+
+ n += retval;
+ }
+ nl_msg_end_nested(actions, actions_ofs);
+ retval = -1;
+ if (!ovs_scan(s + n, "),le(%n", &retval)) {
+ return -EINVAL;
+ }
+ n += retval;
+
+ actions_ofs = nl_msg_start_nested(
+ actions, OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
+ if (!strncasecmp(s + n, "drop", 4)) {
+ n += 4;
+ } else {
+ retval = parse_action_list(context, s + n, actions);
+ if (retval < 0) {
+ return retval;
+ }
+ n += retval;
+ }
+ nl_msg_end_nested(actions, actions_ofs);
+ nl_msg_end_nested(actions, cpl_ofs);
+ return s[n + 1] == ')' ? n + 2 : -EINVAL;
}
}
return n;
}
}
+
return -EINVAL;
}
size_t old_size;
if (!strcasecmp(s, "drop")) {
+ nl_msg_put_u32(actions, OVS_ACTION_ATTR_DROP, XLATE_OK);
return 0;
}
+ struct parse_odp_context context = (struct parse_odp_context) {
+ .port_names = port_names,
+ };
+
old_size = actions->size;
for (;;) {
int retval;
return 0;
}
- retval = parse_odp_action(s, port_names, actions);
+ retval = parse_odp_action(&context, s, actions);
+
if (retval < 0 || !strchr(delimiters, s[retval])) {
actions->size = old_size;
return -retval;
.next_max = OVS_VXLAN_EXT_MAX},
[OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = 16 },
[OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = 16 },
+ [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = ATTR_LEN_VARIABLE },
+ [OVS_TUNNEL_KEY_ATTR_GTPU_OPTS] = { .len = ATTR_LEN_VARIABLE },
};
-static const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
+const struct attr_len_tbl ovs_flow_key_attr_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_ENCAP] = { .len = ATTR_LEN_NESTED },
[OVS_KEY_ATTR_PRIORITY] = { .len = 4 },
[OVS_KEY_ATTR_SKB_MARK] = { .len = 4 },
[OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
[OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
[OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
+ [OVS_KEY_ATTR_ND_EXTENSIONS] = { .len = sizeof(struct ovs_key_nd_extensions) },
[OVS_KEY_ATTR_CT_STATE] = { .len = 4 },
[OVS_KEY_ATTR_CT_ZONE] = { .len = 2 },
[OVS_KEY_ATTR_CT_MARK] = { .len = 4 },
[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = { .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = { .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
[OVS_KEY_ATTR_PACKET_TYPE] = { .len = 4 },
+ [OVS_KEY_ATTR_NSH] = { .len = ATTR_LEN_NESTED,
+ .next = ovs_nsh_key_attr_lens,
+ .next_max = OVS_NSH_KEY_ATTR_MAX },
};
/* Returns the correct length of the payload for a flow key attribute of the
* if the attribute's payload is variable length, or ATTR_LEN_NESTED if the
* payload is a nested type. */
static int
-odp_key_attr_len(const struct attr_len_tbl tbl[], int max_len, uint16_t type)
+odp_key_attr_len(const struct attr_len_tbl tbl[], int max_type, uint16_t type)
{
- if (type > max_len) {
+ if (type > max_type) {
return ATTR_LEN_INVALID;
}
}
}
+enum odp_key_fitness
+odp_nsh_hdr_from_attr(const struct nlattr *attr,
+ struct nsh_hdr *nsh_hdr, size_t size)
+{
+ unsigned int left;
+ const struct nlattr *a;
+ bool unknown = false;
+ uint8_t flags = 0;
+ uint8_t ttl = 63;
+ size_t mdlen = 0;
+ bool has_md1 = false;
+ bool has_md2 = false;
+
+ memset(nsh_hdr, 0, size);
+
+ NL_NESTED_FOR_EACH (a, left, attr) {
+ uint16_t type = nl_attr_type(a);
+ size_t len = nl_attr_get_size(a);
+ int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
+ OVS_NSH_KEY_ATTR_MAX, type);
+
+ if (len != expected_len && expected_len >= 0) {
+ return ODP_FIT_ERROR;
+ }
+
+ switch (type) {
+ case OVS_NSH_KEY_ATTR_BASE: {
+ const struct ovs_nsh_key_base *base = nl_attr_get(a);
+ nsh_hdr->next_proto = base->np;
+ nsh_hdr->md_type = base->mdtype;
+ put_16aligned_be32(&nsh_hdr->path_hdr, base->path_hdr);
+ flags = base->flags;
+ ttl = base->ttl;
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD1: {
+ const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
+ struct nsh_md1_ctx *md1_dst = &nsh_hdr->md1;
+ has_md1 = true;
+ mdlen = nl_attr_get_size(a);
+ if ((mdlen + NSH_BASE_HDR_LEN != NSH_M_TYPE1_LEN) ||
+ (mdlen + NSH_BASE_HDR_LEN > size)) {
+ return ODP_FIT_ERROR;
+ }
+ memcpy(md1_dst, md1, mdlen);
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD2: {
+ struct nsh_md2_tlv *md2_dst = &nsh_hdr->md2;
+ const uint8_t *md2 = nl_attr_get(a);
+ has_md2 = true;
+ mdlen = nl_attr_get_size(a);
+ if (mdlen + NSH_BASE_HDR_LEN > size) {
+ return ODP_FIT_ERROR;
+ }
+ memcpy(md2_dst, md2, mdlen);
+ break;
+ }
+ default:
+ /* Allow this to show up as unexpected, if there are unknown
+ * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
+ unknown = true;
+ break;
+ }
+ }
+
+ if (unknown) {
+ return ODP_FIT_TOO_MUCH;
+ }
+
+ if ((has_md1 && nsh_hdr->md_type != NSH_M_TYPE1)
+ || (has_md2 && nsh_hdr->md_type != NSH_M_TYPE2)) {
+ return ODP_FIT_ERROR;
+ }
+
+ /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
+ nsh_set_flags_ttl_len(nsh_hdr, flags, ttl, NSH_BASE_HDR_LEN + mdlen);
+
+ return ODP_FIT_PERFECT;
+}
+
+/* Reports the error 'msg', which is formatted as with printf().
+ *
+ * If 'errorp' is nonnull, then some the wants the error report to come
+ * directly back to it, so the function stores the error message into '*errorp'
+ * (after first freeing it in case there's something there already).
+ *
+ * Otherwise, logs the message at WARN level, rate-limited. */
+static void OVS_PRINTF_FORMAT(3, 4)
+odp_parse_error(struct vlog_rate_limit *rl, char **errorp,
+ const char *msg, ...)
+{
+ if (OVS_UNLIKELY(errorp)) {
+ free(*errorp);
+
+ va_list args;
+ va_start(args, msg);
+ *errorp = xvasprintf(msg, args);
+ va_end(args);
+ } else if (!VLOG_DROP_WARN(rl)) {
+ va_list args;
+ va_start(args, msg);
+ char *error = xvasprintf(msg, args);
+ va_end(args);
+
+ VLOG_WARN("%s", error);
+
+ free(error);
+ }
+}
+
+/* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
+ * returns fitness. If the attribute is a key, 'is_mask' should be false;
+ * if it is a mask, 'is_mask' should be true. If 'errorp' is nonnull and the
+ * function returns ODP_FIT_ERROR, stores a malloc()'d error message in
+ * '*errorp'. */
+static enum odp_key_fitness
+odp_nsh_key_from_attr__(const struct nlattr *attr, bool is_mask,
+ struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask,
+ char **errorp)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ if (errorp) {
+ *errorp = NULL;
+ }
+
+ unsigned int left;
+ const struct nlattr *a;
+ bool unknown = false;
+ bool has_md1 = false;
+
+ NL_NESTED_FOR_EACH (a, left, attr) {
+ uint16_t type = nl_attr_type(a);
+ size_t len = nl_attr_get_size(a);
+ int expected_len = odp_key_attr_len(ovs_nsh_key_attr_lens,
+ OVS_NSH_KEY_ATTR_MAX, type);
+ if (expected_len) {
+ if (nsh_mask) {
+ expected_len *= 2;
+ }
+ if (len != expected_len) {
+ odp_parse_error(&rl, errorp, "NSH %s attribute %"PRIu16" "
+ "should have length %d but actually has "
+ "%"PRIuSIZE,
+ nsh_mask ? "mask" : "key",
+ type, expected_len, len);
+ return ODP_FIT_ERROR;
+ }
+ }
+
+ switch (type) {
+ case OVS_NSH_KEY_ATTR_UNSPEC:
+ break;
+ case OVS_NSH_KEY_ATTR_BASE: {
+ const struct ovs_nsh_key_base *base = nl_attr_get(a);
+ nsh->flags = base->flags;
+ nsh->ttl = base->ttl;
+ nsh->mdtype = base->mdtype;
+ nsh->np = base->np;
+ nsh->path_hdr = base->path_hdr;
+ if (nsh_mask && (len == 2 * sizeof(*base))) {
+ const struct ovs_nsh_key_base *base_mask = base + 1;
+ nsh_mask->flags = base_mask->flags;
+ nsh_mask->ttl = base_mask->ttl;
+ nsh_mask->mdtype = base_mask->mdtype;
+ nsh_mask->np = base_mask->np;
+ nsh_mask->path_hdr = base_mask->path_hdr;
+ }
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD1: {
+ const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
+ has_md1 = true;
+ memcpy(nsh->context, md1->context, sizeof md1->context);
+ if (len == 2 * sizeof(*md1)) {
+ const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
+ memcpy(nsh_mask->context, md1_mask->context,
+ sizeof(*md1_mask));
+ }
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD2:
+ default:
+ /* Allow this to show up as unexpected, if there are unknown
+ * tunnel attribute, eventually resulting in ODP_FIT_TOO_MUCH. */
+ unknown = true;
+ break;
+ }
+ }
+
+ if (unknown) {
+ return ODP_FIT_TOO_MUCH;
+ }
+
+ if (!is_mask && has_md1 && nsh->mdtype != NSH_M_TYPE1 && !nsh_mask) {
+ odp_parse_error(&rl, errorp, "OVS_NSH_KEY_ATTR_MD1 present but "
+ "declared mdtype %"PRIu8" is not %d (NSH_M_TYPE1)",
+ nsh->mdtype, NSH_M_TYPE1);
+ return ODP_FIT_ERROR;
+ }
+
+ return ODP_FIT_PERFECT;
+}
+
+/* Parses OVS_KEY_ATTR_NSH attribute 'attr' into 'nsh' and 'nsh_mask' and
+ * returns fitness. The attribute should be a key (not a mask). If 'errorp'
+ * is nonnull and the function returns ODP_FIT_ERROR, stores a malloc()'d error
+ * message in '*errorp'. */
+enum odp_key_fitness
+odp_nsh_key_from_attr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
+ struct ovs_key_nsh *nsh_mask, char **errorp)
+{
+ return odp_nsh_key_from_attr__(attr, false, nsh, nsh_mask, errorp);
+}
+
+/* Parses OVS_KEY_ATTR_TUNNEL attribute 'attr' into 'tun' and returns fitness.
+ * If the attribute is a key, 'is_mask' should be false; if it is a mask,
+ * 'is_mask' should be true. If 'errorp' is nonnull and the function returns
+ * ODP_FIT_ERROR, stores a malloc()'d error message in '*errorp'. */
static enum odp_key_fitness
odp_tun_key_from_attr__(const struct nlattr *attr, bool is_mask,
- struct flow_tnl *tun)
+ struct flow_tnl *tun, char **errorp)
{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
unsigned int left;
const struct nlattr *a;
bool ttl = false;
OVS_TUNNEL_ATTR_MAX, type);
if (len != expected_len && expected_len >= 0) {
+ odp_parse_error(&rl, errorp, "tunnel key attribute %"PRIu16" "
+ "should have length %d but actually has %"PRIuSIZE,
+ type, expected_len, len);
return ODP_FIT_ERROR;
}
struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
+ odp_parse_error(&rl, errorp, "error parsing VXLAN options");
return ODP_FIT_ERROR;
}
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
tun_metadata_from_geneve_nlattr(a, is_mask, tun);
break;
+ case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: {
+ const struct erspan_metadata *opts = nl_attr_get(a);
+
+ tun->erspan_ver = opts->version;
+ if (tun->erspan_ver == 1) {
+ tun->erspan_idx = ntohl(opts->u.index);
+ } else if (tun->erspan_ver == 2) {
+ tun->erspan_dir = opts->u.md2.dir;
+ tun->erspan_hwid = get_hwid(&opts->u.md2);
+ } else {
+ VLOG_WARN("%s invalid erspan version\n", __func__);
+ }
+ break;
+ }
+ case OVS_TUNNEL_KEY_ATTR_GTPU_OPTS: {
+ const struct gtpu_metadata *opts = nl_attr_get(a);
+
+ tun->gtpu_flags = opts->flags;
+ tun->gtpu_msgtype = opts->msgtype;
+ break;
+ }
default:
/* Allow this to show up as unexpected, if there are unknown
}
if (!ttl) {
+ odp_parse_error(&rl, errorp, "tunnel options missing TTL");
return ODP_FIT_ERROR;
}
if (unknown) {
return ODP_FIT_PERFECT;
}
+/* Parses OVS_KEY_ATTR_TUNNEL key attribute 'attr' into 'tun' and returns
+ * fitness. The attribute should be a key (not a mask). If 'errorp' is
+ * nonnull, stores NULL into '*errorp' on success, otherwise a malloc()'d error
+ * message. */
enum odp_key_fitness
-odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun)
+odp_tun_key_from_attr(const struct nlattr *attr, struct flow_tnl *tun,
+ char **errorp)
{
+ if (errorp) {
+ *errorp = NULL;
+ }
memset(tun, 0, sizeof *tun);
- return odp_tun_key_from_attr__(attr, false, tun);
+ return odp_tun_key_from_attr__(attr, false, tun, errorp);
}
static void
tun_key_to_attr(struct ofpbuf *a, const struct flow_tnl *tun_key,
const struct flow_tnl *tun_flow_key,
- const struct ofpbuf *key_buf)
+ const struct ofpbuf *key_buf, const char *tnl_type)
{
size_t tun_key_ofs;
if (tun_key->flags & FLOW_TNL_F_OAM) {
nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
}
- if (tun_key->gbp_flags || tun_key->gbp_id) {
+
+ /* If tnl_type is set to a particular type of output tunnel,
+ * only put its relevant tunnel metadata to the nlattr.
+ * If tnl_type is NULL, put tunnel metadata according to the
+ * 'tun_key'.
+ */
+ if ((!tnl_type || !strcmp(tnl_type, "vxlan")) &&
+ (tun_key->gbp_flags || tun_key->gbp_id)) {
size_t vxlan_opts_ofs;
vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
(tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
nl_msg_end_nested(a, vxlan_opts_ofs);
}
- tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
- nl_msg_end_nested(a, tun_key_ofs);
-}
+ if (!tnl_type || !strcmp(tnl_type, "geneve")) {
+ tun_metadata_to_geneve_nlattr(tun_key, tun_flow_key, key_buf, a);
+ }
-static bool
-odp_mask_attr_is_wildcard(const struct nlattr *ma)
-{
- return is_all_zeros(nl_attr_get(ma), nl_attr_get_size(ma));
+ if ((!tnl_type || !strcmp(tnl_type, "erspan") ||
+ !strcmp(tnl_type, "ip6erspan")) &&
+ (tun_key->erspan_ver == 1 || tun_key->erspan_ver == 2)) {
+ struct erspan_metadata opts;
+
+ opts.version = tun_key->erspan_ver;
+ if (opts.version == 1) {
+ opts.u.index = htonl(tun_key->erspan_idx);
+ } else {
+ opts.u.md2.dir = tun_key->erspan_dir;
+ set_hwid(&opts.u.md2, tun_key->erspan_hwid);
+ }
+ nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
+ &opts, sizeof(opts));
+ }
+
+ if ((!tnl_type || !strcmp(tnl_type, "gtpu")) &&
+ (tun_key->gtpu_flags && tun_key->gtpu_msgtype)) {
+ struct gtpu_metadata opts;
+
+ opts.flags = tun_key->gtpu_flags;
+ opts.msgtype = tun_key->gtpu_msgtype;
+ nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
+ &opts, sizeof(opts));
+ }
+ nl_msg_end_nested(a, tun_key_ofs);
}
static bool
-odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
+odp_mask_is_constant__(enum ovs_key_attr attr, const void *mask, size_t size,
+ int constant)
{
- if (attr == OVS_KEY_ATTR_TCP_FLAGS) {
- return TCP_FLAGS(*(ovs_be16 *)mask) == TCP_FLAGS(OVS_BE16_MAX);
- }
- if (attr == OVS_KEY_ATTR_IPV6) {
- const struct ovs_key_ipv6 *ipv6_mask = mask;
+ /* Convert 'constant' to all the widths we need. C conversion rules ensure
+ * that -1 becomes all-1-bits and 0 does not change. */
+ ovs_be16 be16 = (OVS_FORCE ovs_be16) constant;
+ uint32_t u32 = constant;
+ uint8_t u8 = constant;
+ const struct in6_addr *in6 = constant ? &in6addr_exact : &in6addr_any;
- return
- ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
- == htonl(IPV6_LABEL_MASK))
- && ipv6_mask->ipv6_proto == UINT8_MAX
- && ipv6_mask->ipv6_tclass == UINT8_MAX
- && ipv6_mask->ipv6_hlimit == UINT8_MAX
- && ipv6_mask->ipv6_frag == UINT8_MAX
- && ipv6_mask_is_exact(&ipv6_mask->ipv6_src)
- && ipv6_mask_is_exact(&ipv6_mask->ipv6_dst);
- }
- if (attr == OVS_KEY_ATTR_TUNNEL) {
+ switch (attr) {
+ case OVS_KEY_ATTR_UNSPEC:
+ case OVS_KEY_ATTR_ENCAP:
+ case __OVS_KEY_ATTR_MAX:
+ default:
return false;
+
+ case OVS_KEY_ATTR_PRIORITY:
+ case OVS_KEY_ATTR_IN_PORT:
+ case OVS_KEY_ATTR_ETHERNET:
+ case OVS_KEY_ATTR_VLAN:
+ case OVS_KEY_ATTR_ETHERTYPE:
+ case OVS_KEY_ATTR_IPV4:
+ case OVS_KEY_ATTR_TCP:
+ case OVS_KEY_ATTR_UDP:
+ case OVS_KEY_ATTR_ICMP:
+ case OVS_KEY_ATTR_ICMPV6:
+ case OVS_KEY_ATTR_ND:
+ case OVS_KEY_ATTR_ND_EXTENSIONS:
+ case OVS_KEY_ATTR_SKB_MARK:
+ case OVS_KEY_ATTR_TUNNEL:
+ case OVS_KEY_ATTR_SCTP:
+ case OVS_KEY_ATTR_DP_HASH:
+ case OVS_KEY_ATTR_RECIRC_ID:
+ case OVS_KEY_ATTR_MPLS:
+ case OVS_KEY_ATTR_CT_STATE:
+ case OVS_KEY_ATTR_CT_ZONE:
+ case OVS_KEY_ATTR_CT_MARK:
+ case OVS_KEY_ATTR_CT_LABELS:
+ case OVS_KEY_ATTR_PACKET_TYPE:
+ case OVS_KEY_ATTR_NSH:
+ return is_all_byte(mask, size, u8);
+
+ case OVS_KEY_ATTR_TCP_FLAGS:
+ return TCP_FLAGS(*(ovs_be16 *) mask) == TCP_FLAGS(be16);
+
+ case OVS_KEY_ATTR_IPV6: {
+ const struct ovs_key_ipv6 *ipv6_mask = mask;
+ return ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
+ == htonl(IPV6_LABEL_MASK & u32)
+ && ipv6_mask->ipv6_proto == u8
+ && ipv6_mask->ipv6_tclass == u8
+ && ipv6_mask->ipv6_hlimit == u8
+ && ipv6_mask->ipv6_frag == u8
+ && ipv6_addr_equals(&ipv6_mask->ipv6_src, in6)
+ && ipv6_addr_equals(&ipv6_mask->ipv6_dst, in6));
}
- if (attr == OVS_KEY_ATTR_ARP) {
- /* ARP key has padding, ignore it. */
- BUILD_ASSERT_DECL(sizeof(struct ovs_key_arp) == 24);
- BUILD_ASSERT_DECL(offsetof(struct ovs_key_arp, arp_tha) == 10 + 6);
- size = offsetof(struct ovs_key_arp, arp_tha) + ETH_ADDR_LEN;
- ovs_assert(((uint16_t *)mask)[size/2] == 0);
+ case OVS_KEY_ATTR_ARP:
+ return is_all_byte(mask, OFFSETOFEND(struct ovs_key_arp, arp_tha), u8);
+
+ case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
+ return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv4,
+ ipv4_proto), u8);
+
+ case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
+ return is_all_byte(mask, OFFSETOFEND(struct ovs_key_ct_tuple_ipv6,
+ ipv6_proto), u8);
}
+}
- return is_all_ones(mask, size);
+/* The caller must already have verified that 'ma' has a correct length.
+ *
+ * The main purpose of this function is formatting, to allow code to figure out
+ * whether the mask can be omitted. It doesn't try hard for attributes that
+ * contain sub-attributes, etc., because normally those would be broken down
+ * further for formatting. */
+static bool
+odp_mask_attr_is_wildcard(const struct nlattr *ma)
+{
+ return odp_mask_is_constant__(nl_attr_type(ma),
+ nl_attr_get(ma), nl_attr_get_size(ma), 0);
}
+/* The caller must already have verified that 'size' is a correct length for
+ * 'attr'.
+ *
+ * The main purpose of this function is formatting, to allow code to figure out
+ * whether the mask can be omitted. It doesn't try hard for attributes that
+ * contain sub-attributes, etc., because normally those would be broken down
+ * further for formatting. */
+static bool
+odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
+{
+ return odp_mask_is_constant__(attr, mask, size, -1);
+}
+
+/* The caller must already have verified that 'ma' has a correct length. */
static bool
odp_mask_attr_is_exact(const struct nlattr *ma)
{
enum ovs_key_attr attr = nl_attr_type(ma);
- const void *mask;
- size_t size;
-
- if (attr == OVS_KEY_ATTR_TUNNEL) {
- return false;
- } else {
- mask = nl_attr_get(ma);
- size = nl_attr_get_size(ma);
- }
-
- return odp_mask_is_exact(attr, mask, size);
+ return odp_mask_is_exact(attr, nl_attr_get(ma), nl_attr_get_size(ma));
}
void
}
}
+
static void
format_be64(struct ds *ds, const char *name, ovs_be64 key,
const ovs_be64 *mask, bool verbose)
static bool
check_attr_len(struct ds *ds, const struct nlattr *a, const struct nlattr *ma,
- const struct attr_len_tbl tbl[], int max_len, bool need_key)
+ const struct attr_len_tbl tbl[], int max_type, bool need_key)
{
int expected_len;
- expected_len = odp_key_attr_len(tbl, max_len, nl_attr_type(a));
+ expected_len = odp_key_attr_len(tbl, max_type, nl_attr_type(a));
if (expected_len != ATTR_LEN_VARIABLE &&
expected_len != ATTR_LEN_NESTED) {
ofpbuf_uninit(&ofp);
}
+static void
+format_odp_tun_erspan_opt(const struct nlattr *attr,
+ const struct nlattr *mask_attr, struct ds *ds,
+ bool verbose)
+{
+ const struct erspan_metadata *opts, *mask;
+ uint8_t ver, ver_ma, dir, dir_ma, hwid, hwid_ma;
+
+ opts = nl_attr_get(attr);
+ mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
+
+ ver = (uint8_t)opts->version;
+ if (mask) {
+ ver_ma = (uint8_t)mask->version;
+ }
+
+ format_u8u(ds, "ver", ver, mask ? &ver_ma : NULL, verbose);
+
+ if (opts->version == 1) {
+ if (mask) {
+ ds_put_format(ds, "idx=%#"PRIx32"/%#"PRIx32",",
+ ntohl(opts->u.index),
+ ntohl(mask->u.index));
+ } else {
+ ds_put_format(ds, "idx=%#"PRIx32",", ntohl(opts->u.index));
+ }
+ } else if (opts->version == 2) {
+ dir = opts->u.md2.dir;
+ hwid = opts->u.md2.hwid;
+ if (mask) {
+ dir_ma = mask->u.md2.dir;
+ hwid_ma = mask->u.md2.hwid;
+ }
+
+ format_u8u(ds, "dir", dir, mask ? &dir_ma : NULL, verbose);
+ format_u8x(ds, "hwid", hwid, mask ? &hwid_ma : NULL, verbose);
+ }
+ ds_chomp(ds, ',');
+}
+
+static void
+format_odp_tun_gtpu_opt(const struct nlattr *attr,
+ const struct nlattr *mask_attr, struct ds *ds,
+ bool verbose)
+{
+ const struct gtpu_metadata *opts, *mask;
+
+ opts = nl_attr_get(attr);
+ mask = mask_attr ? nl_attr_get(mask_attr) : NULL;
+
+ format_u8x(ds, "flags", opts->flags, mask ? &mask->flags : NULL, verbose);
+ format_u8u(ds, "msgtype", opts->msgtype, mask ? &mask->msgtype : NULL,
+ verbose);
+ ds_chomp(ds, ',');
+}
+
#define MASK(PTR, FIELD) PTR ? &PTR->FIELD : NULL
static void
format_geneve_opts(opt, mask, opts_len, ds, verbose);
}
+static void
+format_odp_nsh_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
+ struct ds *ds)
+{
+ unsigned int left;
+ const struct nlattr *a;
+ struct ovs_key_nsh nsh;
+ struct ovs_key_nsh nsh_mask;
+
+ memset(&nsh, 0, sizeof nsh);
+ memset(&nsh_mask, 0xff, sizeof nsh_mask);
+
+ NL_NESTED_FOR_EACH (a, left, attr) {
+ enum ovs_nsh_key_attr type = nl_attr_type(a);
+ const struct nlattr *ma = NULL;
+
+ if (mask_attr) {
+ ma = nl_attr_find__(nl_attr_get(mask_attr),
+ nl_attr_get_size(mask_attr), type);
+ }
+
+ if (!check_attr_len(ds, a, ma, ovs_nsh_key_attr_lens,
+ OVS_NSH_KEY_ATTR_MAX, true)) {
+ continue;
+ }
+
+ switch (type) {
+ case OVS_NSH_KEY_ATTR_UNSPEC:
+ break;
+ case OVS_NSH_KEY_ATTR_BASE: {
+ const struct ovs_nsh_key_base *base = nl_attr_get(a);
+ const struct ovs_nsh_key_base *base_mask
+ = ma ? nl_attr_get(ma) : NULL;
+ nsh.flags = base->flags;
+ nsh.ttl = base->ttl;
+ nsh.mdtype = base->mdtype;
+ nsh.np = base->np;
+ nsh.path_hdr = base->path_hdr;
+ if (base_mask) {
+ nsh_mask.flags = base_mask->flags;
+ nsh_mask.ttl = base_mask->ttl;
+ nsh_mask.mdtype = base_mask->mdtype;
+ nsh_mask.np = base_mask->np;
+ nsh_mask.path_hdr = base_mask->path_hdr;
+ }
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD1: {
+ const struct ovs_nsh_key_md1 *md1 = nl_attr_get(a);
+ const struct ovs_nsh_key_md1 *md1_mask
+ = ma ? nl_attr_get(ma) : NULL;
+ memcpy(nsh.context, md1->context, sizeof md1->context);
+ if (md1_mask) {
+ memcpy(nsh_mask.context, md1_mask->context,
+ sizeof md1_mask->context);
+ }
+ break;
+ }
+ case OVS_NSH_KEY_ATTR_MD2:
+ case __OVS_NSH_KEY_ATTR_MAX:
+ default:
+ /* No support for matching other metadata formats yet. */
+ break;
+ }
+ }
+
+ if (mask_attr) {
+ format_nsh_key_mask(ds, &nsh, &nsh_mask);
+ } else {
+ format_nsh_key(ds, &nsh);
+ }
+}
+
static void
format_odp_tun_attr(const struct nlattr *attr, const struct nlattr *mask_attr,
struct ds *ds, bool verbose)
case OVS_TUNNEL_KEY_ATTR_ID:
format_be64(ds, "tun_id", nl_attr_get_be64(a),
ma ? nl_attr_get(ma) : NULL, verbose);
- flags |= FLOW_TNL_F_KEY;
+ flags |= FLOW_TNL_F_KEY;
if (ma) {
mask_flags |= FLOW_TNL_F_KEY;
}
ma ? nl_attr_get(ma) : NULL, verbose);
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
- flags |= FLOW_TNL_F_DONT_FRAGMENT;
+ flags |= FLOW_TNL_F_DONT_FRAGMENT;
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
- flags |= FLOW_TNL_F_CSUM;
+ flags |= FLOW_TNL_F_CSUM;
break;
case OVS_TUNNEL_KEY_ATTR_TP_SRC:
format_be16(ds, "tp_src", nl_attr_get_be16(a),
ma ? nl_attr_get(ma) : NULL, verbose);
break;
case OVS_TUNNEL_KEY_ATTR_OAM:
- flags |= FLOW_TNL_F_OAM;
+ flags |= FLOW_TNL_F_OAM;
break;
case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
ds_put_cstr(ds, "vxlan(");
break;
case OVS_TUNNEL_KEY_ATTR_PAD:
break;
+ case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS:
+ ds_put_cstr(ds, "erspan(");
+ format_odp_tun_erspan_opt(a, ma, ds, verbose);
+ ds_put_cstr(ds, "),");
+ break;
+ case OVS_TUNNEL_KEY_ATTR_GTPU_OPTS:
+ ds_put_cstr(ds, "gtpu(");
+ format_odp_tun_gtpu_opt(a, ma, ds, verbose);
+ ds_put_cstr(ds, "),");
+ break;
case __OVS_TUNNEL_KEY_ATTR_MAX:
default:
format_unknown_key(ds, a, ma);
static void
format_frag(struct ds *ds, const char *name, uint8_t key,
- const uint8_t *mask, bool verbose)
+ const uint8_t *mask, bool verbose OVS_UNUSED)
{
bool mask_empty = mask && !*mask;
+ bool mask_full = !mask || *mask == UINT8_MAX;
/* ODP frag is an enumeration field; partial masks are not meaningful. */
- if (verbose || !mask_empty) {
- bool mask_full = !mask || *mask == UINT8_MAX;
-
- if (!mask_full) { /* Partially masked. */
- ds_put_format(ds, "error: partial mask not supported for frag (%#"
- PRIx8"),", *mask);
- } else {
- ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
- }
+ if (!mask_empty && !mask_full) {
+ ds_put_format(ds, "error: partial mask not supported for frag (%#"
+ PRIx8"),", *mask);
+ } else if (!mask_empty) {
+ ds_put_format(ds, "%s=%s,", name, ovs_frag_type_to_string(key));
}
}
return is_all_zeros(mask, n);
}
+/* The caller must have already verified that 'a' and 'ma' have correct
+ * lengths. */
static void
-format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
- const struct hmap *portno_names, struct ds *ds,
- bool verbose)
+format_odp_key_attr__(const struct nlattr *a, const struct nlattr *ma,
+ const struct hmap *portno_names, struct ds *ds,
+ bool verbose)
{
enum ovs_key_attr attr = nl_attr_type(a);
char namebuf[OVS_KEY_ATTR_BUFSIZE];
ds_put_cstr(ds, ovs_key_attr_to_string(attr, namebuf, sizeof namebuf));
- if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
- OVS_KEY_ATTR_MAX, false)) {
- return;
- }
-
ds_put_char(ds, '(');
switch (attr) {
case OVS_KEY_ATTR_ENCAP:
ds_chomp(ds, ',');
break;
}
+ case OVS_KEY_ATTR_ND_EXTENSIONS: {
+ const struct ovs_key_nd_extensions *mask = ma ? nl_attr_get(ma) : NULL;
+ const struct ovs_key_nd_extensions *key = nl_attr_get(a);
+
+ bool first = true;
+ format_be32_masked(ds, &first, "nd_reserved", key->nd_reserved,
+ OVS_BE32_MAX);
+ ds_put_char(ds, ',');
+
+ format_u8u(ds, "nd_options_type", key->nd_options_type,
+ MASK(mask, nd_options_type), verbose);
+
+ ds_chomp(ds, ',');
+ break;
+ }
+ case OVS_KEY_ATTR_NSH: {
+ format_odp_nsh_attr(a, ma, ds);
+ break;
+ }
case OVS_KEY_ATTR_UNSPEC:
case __OVS_KEY_ATTR_MAX:
default:
ds_put_char(ds, ')');
}
+static void
+format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
+ const struct hmap *portno_names, struct ds *ds,
+ bool verbose)
+{
+ if (check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
+ OVS_KEY_ATTR_MAX, false)) {
+ format_odp_key_attr__(a, ma, portno_names, ds, verbose);
+ }
+}
+
static struct nlattr *
generate_all_wildcard_mask(const struct attr_len_tbl tbl[], int max,
struct ofpbuf *ofp, const struct nlattr *key)
size_t nested_mask;
if (tbl[type].next) {
- tbl = tbl[type].next;
- max = tbl[type].next_max;
+ const struct attr_len_tbl *entry = &tbl[type];
+ tbl = entry->next;
+ max = entry->next_max;
}
nested_mask = nl_msg_start_nested(ofp, type);
error = parse_int_string(s, (uint8_t *)&be_mask,
sizeof be_mask, &s);
if (error) {
- return error;
+ return 0;
}
*mask = ntoh128(be_mask);
} else {
const struct nlattr *a;
unsigned int left;
bool has_ethtype_key = false;
- const struct nlattr *ma = NULL;
+ bool has_packet_type_key = false;
struct ofpbuf ofp;
bool first_field = true;
ofpbuf_init(&ofp, 100);
NL_ATTR_FOR_EACH (a, left, key, key_len) {
+ int attr_type = nl_attr_type(a);
+ const struct nlattr *ma = (mask && mask_len
+ ? nl_attr_find__(mask, mask_len,
+ attr_type)
+ : NULL);
+ if (!check_attr_len(ds, a, ma, ovs_flow_key_attr_lens,
+ OVS_KEY_ATTR_MAX, false)) {
+ continue;
+ }
+
bool is_nested_attr;
bool is_wildcard = false;
- int attr_type = nl_attr_type(a);
if (attr_type == OVS_KEY_ATTR_ETHERTYPE) {
has_ethtype_key = true;
+ } else if (attr_type == OVS_KEY_ATTR_PACKET_TYPE) {
+ has_packet_type_key = true;
}
is_nested_attr = odp_key_attr_len(ovs_flow_key_attr_lens,
if (!first_field) {
ds_put_char(ds, ',');
}
- format_odp_key_attr(a, ma, portno_names, ds, verbose);
+ format_odp_key_attr__(a, ma, portno_names, ds, verbose);
first_field = false;
+ } else if (attr_type == OVS_KEY_ATTR_ETHERNET
+ && !has_packet_type_key) {
+ /* This special case reflects differences between the kernel
+ * and userspace datapaths regarding the root type of the
+ * packet being matched (typically Ethernet but some tunnels
+ * can encapsulate IPv4 etc.). The kernel datapath does not
+ * have an explicit way to indicate packet type; instead:
+ *
+ * - If OVS_KEY_ATTR_ETHERNET is present, the packet is an
+ * Ethernet packet and OVS_KEY_ATTR_ETHERTYPE is the
+ * Ethertype encoded in the Ethernet header.
+ *
+ * - If OVS_KEY_ATTR_ETHERNET is absent, then the packet's
+ * root type is that encoded in OVS_KEY_ATTR_ETHERTYPE
+ * (i.e. if OVS_KEY_ATTR_ETHERTYPE is 0x0800 then the
+ * packet is an IPv4 packet).
+ *
+ * Thus, if OVS_KEY_ATTR_ETHERNET is present, even if it is
+ * all-wildcarded, it is important to print it.
+ *
+ * On the other hand, the userspace datapath supports
+ * OVS_KEY_ATTR_PACKET_TYPE and uses it to indicate the packet
+ * type. Thus, if OVS_KEY_ATTR_PACKET_TYPE is present, we need
+ * not print an all-wildcarded OVS_KEY_ATTR_ETHERNET. */
+ if (!first_field) {
+ ds_put_char(ds, ',');
+ }
+ ds_put_cstr(ds, "eth()");
}
ofpbuf_clear(&ofp);
}
ds_put_char(ds, ')');
}
if (!has_ethtype_key) {
- ma = nl_attr_find__(mask, mask_len, OVS_KEY_ATTR_ETHERTYPE);
+ const struct nlattr *ma = nl_attr_find__(mask, mask_len,
+ OVS_KEY_ATTR_ETHERTYPE);
if (ma) {
ds_put_format(ds, ",eth_type(0/0x%04"PRIx16")",
ntohs(nl_attr_get_be16(ma)));
return 0;
}
+static int
+scan_be32(const char *s, ovs_be32 *key, ovs_be32 *mask)
+{
+ uint32_t key_, mask_;
+ int n;
+
+ if (ovs_scan(s, "%"SCNi32"%n", &key_, &n)) {
+ int len = n;
+
+ *key = htonl(key_);
+ if (mask) {
+ if (ovs_scan(s + len, "/%"SCNi32"%n", &mask_, &n)) {
+ len += n;
+ *mask = htonl(mask_);
+ } else {
+ *mask = OVS_BE32_MAX;
+ }
+ }
+ return len;
+ }
+ return 0;
+}
+
static int
scan_be64(const char *s, ovs_be64 *key, ovs_be64 *mask)
{
const char *s_base = s;
ovs_be16 id = 0, id_mask = 0;
uint8_t flags = 0, flags_mask = 0;
+ int len;
if (!strncmp(s, "id=", 3)) {
s += 3;
- s += scan_be16(s, &id, mask ? &id_mask : NULL);
+ len = scan_be16(s, &id, mask ? &id_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
}
if (s[0] == ',') {
}
if (!strncmp(s, "flags=", 6)) {
s += 6;
- s += scan_u8(s, &flags, mask ? &flags_mask : NULL);
+ len = scan_u8(s, &flags, mask ? &flags_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
}
if (!strncmp(s, "))", 2)) {
return 0;
}
+static int
+scan_gtpu_metadata(const char *s,
+ struct gtpu_metadata *key,
+ struct gtpu_metadata *mask)
+{
+ const char *s_base = s;
+ uint8_t flags = 0, flags_ma = 0;
+ uint8_t msgtype = 0, msgtype_ma = 0;
+ int len;
+
+ if (!strncmp(s, "flags=", 6)) {
+ s += 6;
+ len = scan_u8(s, &flags, mask ? &flags_ma : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
+ }
+
+ if (s[0] == ',') {
+ s++;
+ }
+
+ if (!strncmp(s, "msgtype=", 8)) {
+ s += 8;
+ len = scan_u8(s, &msgtype, mask ? &msgtype_ma : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
+ }
+
+ if (!strncmp(s, ")", 1)) {
+ s += 1;
+ key->flags = flags;
+ key->msgtype = msgtype;
+ if (mask) {
+ mask->flags = flags_ma;
+ mask->msgtype = msgtype_ma;
+ }
+ }
+ return s - s_base;
+}
+
+static int
+scan_erspan_metadata(const char *s,
+ struct erspan_metadata *key,
+ struct erspan_metadata *mask)
+{
+ const char *s_base = s;
+ uint32_t idx = 0, idx_mask = 0;
+ uint8_t ver = 0, dir = 0, hwid = 0;
+ uint8_t ver_mask = 0, dir_mask = 0, hwid_mask = 0;
+ int len;
+
+ if (!strncmp(s, "ver=", 4)) {
+ s += 4;
+ len = scan_u8(s, &ver, mask ? &ver_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
+ }
+
+ if (s[0] == ',') {
+ s++;
+ }
+
+ if (ver == 1) {
+ if (!strncmp(s, "idx=", 4)) {
+ s += 4;
+ len = scan_u32(s, &idx, mask ? &idx_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
+ }
+
+ if (!strncmp(s, ")", 1)) {
+ s += 1;
+ key->version = ver;
+ key->u.index = htonl(idx);
+ if (mask) {
+ mask->u.index = htonl(idx_mask);
+ }
+ }
+ return s - s_base;
+
+ } else if (ver == 2) {
+ if (!strncmp(s, "dir=", 4)) {
+ s += 4;
+ len = scan_u8(s, &dir, mask ? &dir_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
+ }
+ if (s[0] == ',') {
+ s++;
+ }
+ if (!strncmp(s, "hwid=", 5)) {
+ s += 5;
+ len = scan_u8(s, &hwid, mask ? &hwid_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
+ }
+
+ if (!strncmp(s, ")", 1)) {
+ s += 1;
+ key->version = ver;
+ key->u.md2.hwid = hwid;
+ key->u.md2.dir = dir;
+ if (mask) {
+ mask->u.md2.hwid = hwid_mask;
+ mask->u.md2.dir = dir_mask;
+ }
+ }
+ return s - s_base;
+ }
+
+ return 0;
+}
+
static int
scan_geneve(const char *s, struct geneve_scan *key, struct geneve_scan *mask)
{
struct geneve_opt *opt = key->d;
struct geneve_opt *opt_mask = mask ? mask->d : NULL;
int len_remain = sizeof key->d;
+ int len;
while (s[0] == '{' && len_remain >= sizeof *opt) {
int data_len = 0;
if (!strncmp(s, "class=", 6)) {
s += 6;
- s += scan_be16(s, &opt->opt_class,
- mask ? &opt_mask->opt_class : NULL);
+ len = scan_be16(s, &opt->opt_class,
+ mask ? &opt_mask->opt_class : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
} else if (mask) {
memset(&opt_mask->opt_class, 0, sizeof opt_mask->opt_class);
}
}
if (!strncmp(s, "type=", 5)) {
s += 5;
- s += scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
+ len = scan_u8(s, &opt->type, mask ? &opt_mask->type : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
} else if (mask) {
memset(&opt_mask->type, 0, sizeof opt_mask->type);
}
if (!strncmp(s, "len=", 4)) {
uint8_t opt_len, opt_len_mask;
s += 4;
- s += scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
+ len = scan_u8(s, &opt_len, mask ? &opt_len_mask : NULL);
+ if (len == 0) {
+ return 0;
+ }
+ s += len;
if (opt_len > 124 || opt_len % 4 || opt_len > len_remain) {
return 0;
if (s[0] == ',') {
s++;
+ if (parse_int_string(s, (uint8_t *)(opt + 1),
+ data_len, (char **)&s)) {
+ return 0;
+ }
}
- if (parse_int_string(s, (uint8_t *)(opt + 1), data_len, (char **)&s)) {
- return 0;
- }
-
if (mask) {
if (s[0] == '/') {
s++;
opt_mask += 1 + data_len / 4;
}
len_remain -= data_len;
+ } else {
+ return 0;
}
}
if (s[0] == ')') {
- int len = sizeof key->d - len_remain;
+ len = sizeof key->d - len_remain;
s++;
key->len = len;
geneve->len);
}
+static void
+erspan_to_attr(struct ofpbuf *a, const void *data_)
+{
+ const struct erspan_metadata *md = data_;
+
+ nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, md,
+ sizeof *md);
+}
+
+static void
+gtpu_to_attr(struct ofpbuf *a, const void *data_)
+{
+ const struct gtpu_metadata *md = data_;
+
+ nl_msg_put_unspec(a, OVS_TUNNEL_KEY_ATTR_GTPU_OPTS, md,
+ sizeof *md);
+}
+
#define SCAN_PUT_ATTR(BUF, ATTR, DATA, FUNC) \
{ \
unsigned long call_fn = (unsigned long)FUNC; \
/* Beginning of nested attribute. */
#define SCAN_BEGIN_NESTED(NAME, ATTR) \
SCAN_IF(NAME); \
- size_t key_offset, mask_offset; \
+ size_t key_offset, mask_offset = 0; \
key_offset = nl_msg_start_nested(key, ATTR); \
if (mask) { \
mask_offset = nl_msg_start_nested(mask, ATTR); \
do { \
len = 0;
-#define SCAN_END_NESTED() \
- SCAN_FINISH(); \
- nl_msg_end_nested(key, key_offset); \
- if (mask) { \
- nl_msg_end_nested(mask, mask_offset); \
- } \
- return s - start; \
+#define SCAN_END_NESTED() \
+ SCAN_FINISH(); \
+ if (nl_attr_oversized(key->size - key_offset - NLA_HDRLEN)) { \
+ return -E2BIG; \
+ } \
+ nl_msg_end_nested(key, key_offset); \
+ if (mask) { \
+ nl_msg_end_nested(mask, mask_offset); \
+ } \
+ return s - start; \
}
#define SCAN_FIELD_NESTED__(NAME, TYPE, SCAN_AS, ATTR, FUNC) \
/* scan_port needs one extra argument. */
#define SCAN_SINGLE_PORT(NAME, TYPE, ATTR) \
SCAN_BEGIN(NAME, TYPE) { \
- len = scan_port(s, &skey, &smask, port_names); \
+ len = scan_port(s, &skey, &smask, \
+ context->port_names); \
if (len == 0) { \
return -EINVAL; \
} \
} SCAN_END_SINGLE(ATTR)
static int
-parse_odp_key_mask_attr(const char *s, const struct simap *port_names,
+parse_odp_nsh_key_mask_attr(const char *s, struct ofpbuf *key,
+ struct ofpbuf *mask)
+{
+ if (strncmp(s, "nsh(", 4) == 0) {
+ const char *start = s;
+ int len;
+ struct ovs_key_nsh skey, smask;
+ uint32_t spi = 0, spi_mask = 0;
+ uint8_t si = 0, si_mask = 0;
+
+ s += 4;
+
+ memset(&skey, 0, sizeof skey);
+ memset(&smask, 0, sizeof smask);
+ do {
+ len = 0;
+
+ if (strncmp(s, "flags=", 6) == 0) {
+ s += 6;
+ len = scan_u8(s, &skey.flags, mask ? &smask.flags : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "mdtype=", 7) == 0) {
+ s += 7;
+ len = scan_u8(s, &skey.mdtype, mask ? &smask.mdtype : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "np=", 3) == 0) {
+ s += 3;
+ len = scan_u8(s, &skey.np, mask ? &smask.np : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "spi=", 4) == 0) {
+ s += 4;
+ len = scan_u32(s, &spi, mask ? &spi_mask : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "si=", 3) == 0) {
+ s += 3;
+ len = scan_u8(s, &si, mask ? &si_mask : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "c1=", 3) == 0) {
+ s += 3;
+ len = scan_be32(s, &skey.context[0],
+ mask ? &smask.context[0] : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "c2=", 3) == 0) {
+ s += 3;
+ len = scan_be32(s, &skey.context[1],
+ mask ? &smask.context[1] : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "c3=", 3) == 0) {
+ s += 3;
+ len = scan_be32(s, &skey.context[2],
+ mask ? &smask.context[2] : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+
+ if (strncmp(s, "c4=", 3) == 0) {
+ s += 3;
+ len = scan_be32(s, &skey.context[3],
+ mask ? &smask.context[3] : NULL);
+ if (len == 0) {
+ return -EINVAL;
+ }
+ s += len;
+ continue;
+ }
+ } while (*s++ == ',' && len != 0);
+ if (s[-1] != ')') {
+ return -EINVAL;
+ }
+
+ skey.path_hdr = nsh_spi_si_to_path_hdr(spi, si);
+ smask.path_hdr = nsh_spi_si_to_path_hdr(spi_mask, si_mask);
+
+ nsh_key_to_attr(key, &skey, NULL, 0, false);
+ if (mask) {
+ nsh_key_to_attr(mask, &smask, NULL, 0, true);
+ }
+ return s - start;
+ }
+ return 0;
+}
+
+static int
+parse_odp_key_mask_attr(struct parse_odp_context *context, const char *s,
struct ofpbuf *key, struct ofpbuf *mask)
{
- ovs_u128 ufid;
- int len;
+ int retval;
- /* Skip UFID. */
- len = odp_ufid_from_string(s, &ufid);
- if (len) {
- return len;
+ context->depth++;
+
+ if (context->depth == MAX_ODP_NESTED) {
+ retval = -EINVAL;
+ } else {
+ retval = parse_odp_key_mask_attr__(context, s, key, mask);
}
+ context->depth--;
+
+ return retval;
+}
+
+static int
+parse_odp_key_mask_attr__(struct parse_odp_context *context, const char *s,
+ struct ofpbuf *key, struct ofpbuf *mask)
+{
SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
SCAN_FIELD_NESTED("ttl=", uint8_t, u8, OVS_TUNNEL_KEY_ATTR_TTL);
SCAN_FIELD_NESTED("tp_src=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_SRC);
SCAN_FIELD_NESTED("tp_dst=", ovs_be16, be16, OVS_TUNNEL_KEY_ATTR_TP_DST);
+ SCAN_FIELD_NESTED_FUNC("erspan(", struct erspan_metadata, erspan_metadata,
+ erspan_to_attr);
SCAN_FIELD_NESTED_FUNC("vxlan(gbp(", uint32_t, vxlan_gbp, vxlan_gbp_to_attr);
SCAN_FIELD_NESTED_FUNC("geneve(", struct geneve_scan, geneve,
geneve_to_attr);
+ SCAN_FIELD_NESTED_FUNC("gtpu(", struct gtpu_metadata, gtpu_metadata,
+ gtpu_to_attr);
SCAN_FIELD_NESTED_FUNC("flags(", uint16_t, tun_flags, tun_flags_to_attr);
} SCAN_END_NESTED();
SCAN_FIELD("tll=", eth, nd_tll);
} SCAN_END(OVS_KEY_ATTR_ND);
+ SCAN_BEGIN("nd_ext(", struct ovs_key_nd_extensions) {
+ SCAN_FIELD("nd_reserved=", be32, nd_reserved);
+ SCAN_FIELD("nd_options_type=", u8, nd_options_type);
+ } SCAN_END(OVS_KEY_ATTR_ND_EXTENSIONS);
+
struct packet_type {
ovs_be16 ns;
ovs_be16 id;
SCAN_FIELD("id=", be16, id);
} SCAN_END(OVS_KEY_ATTR_PACKET_TYPE);
+ /* nsh is nested, it needs special process */
+ int ret = parse_odp_nsh_key_mask_attr(s, key, mask);
+ if (ret < 0) {
+ return ret;
+ } else {
+ s += ret;
+ }
+
/* Encap open-coded. */
if (!strncmp(s, "encap(", 6)) {
const char *start = s;
break;
}
- retval = parse_odp_key_mask_attr(s, port_names, key, mask);
+ retval = parse_odp_key_mask_attr(context, s, key, mask);
if (retval < 0) {
return retval;
}
+
+ if (nl_attr_oversized(key->size - encap - NLA_HDRLEN)) {
+ return -E2BIG;
+ }
s += retval;
}
s++;
return -EINVAL;
}
-/* Parses the string representation of a datapath flow key, in the
- * format output by odp_flow_key_format(). Returns 0 if successful,
- * otherwise a positive errno value. On success, the flow key is
- * appended to 'key' as a series of Netlink attributes. On failure, no
- * data is appended to 'key'. Either way, 'key''s data might be
- * reallocated.
+/* Parses the string representation of a datapath flow key, in the format
+ * output by odp_flow_key_format(). Returns 0 if successful, otherwise a
+ * positive errno value. On success, stores NULL into '*errorp' and the flow
+ * key is appended to 'key' as a series of Netlink attributes. On failure,
+ * stores a malloc()'d error message in '*errorp' without changing the data in
+ * 'key'. Either way, 'key''s data might be reallocated.
*
* If 'port_names' is nonnull, it points to an simap that maps from a port name
* to a port number. (Port names may be used instead of port numbers in
* have duplicated keys. odp_flow_key_to_flow() will detect those errors. */
int
odp_flow_from_string(const char *s, const struct simap *port_names,
- struct ofpbuf *key, struct ofpbuf *mask)
+ struct ofpbuf *key, struct ofpbuf *mask,
+ char **errorp)
{
+ if (errorp) {
+ *errorp = NULL;
+ }
+
const size_t old_size = key->size;
+ struct parse_odp_context context = (struct parse_odp_context) {
+ .port_names = port_names,
+ };
for (;;) {
int retval;
return 0;
}
- retval = parse_odp_key_mask_attr(s, port_names, key, mask);
+ /* Skip UFID. */
+ ovs_u128 ufid;
+ retval = odp_ufid_from_string(s, &ufid);
+ if (retval < 0) {
+ if (errorp) {
+ *errorp = xasprintf("syntax error at %s", s);
+ }
+ key->size = old_size;
+ return -retval;
+ } else if (retval > 0) {
+ s += retval;
+ s += s[0] == ' ' ? 1 : 0;
+ }
+
+ retval = parse_odp_key_mask_attr(&context, s, key, mask);
if (retval < 0) {
+ if (errorp) {
+ *errorp = xasprintf("syntax error at %s", s);
+ }
key->size = old_size;
return -retval;
}
static void put_arp_key(const struct ovs_key_arp *, struct flow *);
static void get_nd_key(const struct flow *, struct ovs_key_nd *);
static void put_nd_key(const struct ovs_key_nd *, struct flow *);
+static void get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh,
+ bool is_mask);
+static void put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
+ bool is_mask);
/* These share the same layout. */
union ovs_key_tp {
odp_flow_key_from_flow__(const struct odp_flow_key_parms *parms,
bool export_mask, struct ofpbuf *buf)
{
+ /* New "struct flow" fields that are visible to the datapath (including all
+ * data fields) should be translated into equivalent datapath flow fields
+ * here (you will have to add a OVS_KEY_ATTR_* for them). */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
+
struct ovs_key_ethernet *eth_key;
size_t encap[FLOW_MAX_VLAN_HEADERS] = {0};
size_t max_vlans;
nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, data->skb_priority);
- if (flow_tnl_dst_is_set(&flow->tunnel) || export_mask) {
+ if (flow_tnl_dst_is_set(&flow->tunnel) ||
+ flow_tnl_src_is_set(&flow->tunnel) || export_mask) {
tun_key_to_attr(buf, &data->tunnel, &parms->flow->tunnel,
- parms->key_buf);
+ parms->key_buf, NULL);
}
nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, data->pkt_mark);
nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_LABELS, &data->ct_label,
sizeof(data->ct_label));
}
- if (parms->support.ct_orig_tuple && flow->ct_nw_proto) {
- if (flow->dl_type == htons(ETH_TYPE_IP)) {
- struct ovs_key_ct_tuple_ipv4 ct = {
- data->ct_nw_src,
- data->ct_nw_dst,
- data->ct_tp_src,
- data->ct_tp_dst,
- data->ct_nw_proto,
- };
- nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, &ct,
- sizeof ct);
- } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- struct ovs_key_ct_tuple_ipv6 ct = {
- data->ct_ipv6_src,
- data->ct_ipv6_dst,
- data->ct_tp_src,
- data->ct_tp_dst,
- data->ct_nw_proto,
- };
- nl_msg_put_unspec(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, &ct,
- sizeof ct);
+ if (flow->ct_nw_proto) {
+ if (parms->support.ct_orig_tuple
+ && flow->dl_type == htons(ETH_TYPE_IP)) {
+ struct ovs_key_ct_tuple_ipv4 *ct;
+
+ /* 'struct ovs_key_ct_tuple_ipv4' has padding, clear it. */
+ ct = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
+ sizeof *ct);
+ ct->ipv4_src = data->ct_nw_src;
+ ct->ipv4_dst = data->ct_nw_dst;
+ ct->src_port = data->ct_tp_src;
+ ct->dst_port = data->ct_tp_dst;
+ ct->ipv4_proto = data->ct_nw_proto;
+ } else if (parms->support.ct_orig_tuple6
+ && flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ struct ovs_key_ct_tuple_ipv6 *ct;
+
+ /* 'struct ovs_key_ct_tuple_ipv6' has padding, clear it. */
+ ct = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
+ sizeof *ct);
+ ct->ipv6_src = data->ct_ipv6_src;
+ ct->ipv6_dst = data->ct_ipv6_dst;
+ ct->src_port = data->ct_tp_src;
+ ct->dst_port = data->ct_tp_dst;
+ ct->ipv6_proto = data->ct_nw_proto;
}
}
if (parms->support.recirc) {
for (i = 0; i < n; i++) {
mpls_key[i].mpls_lse = data->mpls_lse[i];
}
+ } else if (flow->dl_type == htons(ETH_TYPE_NSH)) {
+ nsh_key_to_attr(buf, &data->nsh, NULL, 0, export_mask);
}
if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
sizeof *tcp_key);
get_tp_key(data, tcp_key);
- if (data->tcp_flags) {
+ if (data->tcp_flags || (mask && mask->tcp_flags)) {
nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
}
} else if (flow->nw_proto == IPPROTO_UDP) {
* xlate_wc_finish() for details. */
&& (!export_mask || (data->tp_src == htons(0xff)
&& data->tp_dst == htons(0xff)))) {
-
struct ovs_key_nd *nd_key;
-
nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
sizeof *nd_key);
nd_key->nd_target = data->nd_target;
nd_key->nd_sll = data->arp_sha;
nd_key->nd_tll = data->arp_tha;
+
+ /* Add ND Extensions Attr only if supported and reserved field
+ * or options type is set. */
+ if (parms->support.nd_ext) {
+ struct ovs_key_nd_extensions *nd_ext_key;
+
+ if (data->igmp_group_ip4 != 0 || data->tcp_flags != 0) {
+ /* 'struct ovs_key_nd_extensions' has padding,
+ * clear it. */
+ nd_ext_key = nl_msg_put_unspec_zero(buf,
+ OVS_KEY_ATTR_ND_EXTENSIONS,
+ sizeof *nd_ext_key);
+ nd_ext_key->nd_reserved = data->igmp_group_ip4;
+ nd_ext_key->nd_options_type = ntohs(data->tcp_flags);
+ }
+ }
}
}
}
nl_msg_put_u32(buf, OVS_KEY_ATTR_PRIORITY, md->skb_priority);
+ if (md->dp_hash) {
+ nl_msg_put_u32(buf, OVS_KEY_ATTR_DP_HASH, md->dp_hash);
+ }
+
if (flow_tnl_dst_is_set(&md->tunnel)) {
- tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL);
+ tun_key_to_attr(buf, &md->tunnel, &md->tunnel, NULL, NULL);
}
nl_msg_put_u32(buf, OVS_KEY_ATTR_SKB_MARK, md->pkt_mark);
ovs_be32 packet_type = htonl(PT_UNKNOWN);
ovs_be16 ethertype = 0;
size_t left;
- uint32_t wanted_attrs = 1u << OVS_KEY_ATTR_PRIORITY |
- 1u << OVS_KEY_ATTR_SKB_MARK | 1u << OVS_KEY_ATTR_TUNNEL |
- 1u << OVS_KEY_ATTR_IN_PORT | 1u << OVS_KEY_ATTR_ETHERTYPE |
- 1u << OVS_KEY_ATTR_ETHERNET;
pkt_metadata_init(md, ODPP_NONE);
NL_ATTR_FOR_EACH (nla, left, key, key_len) {
- uint16_t type = nl_attr_type(nla);
+ enum ovs_key_attr type = nl_attr_type(nla);
size_t len = nl_attr_get_size(nla);
int expected_len = odp_key_attr_len(ovs_flow_key_attr_lens,
OVS_KEY_ATTR_MAX, type);
switch (type) {
case OVS_KEY_ATTR_RECIRC_ID:
md->recirc_id = nl_attr_get_u32(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_RECIRC_ID);
break;
case OVS_KEY_ATTR_DP_HASH:
md->dp_hash = nl_attr_get_u32(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_DP_HASH);
break;
case OVS_KEY_ATTR_PRIORITY:
md->skb_priority = nl_attr_get_u32(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_PRIORITY);
break;
case OVS_KEY_ATTR_SKB_MARK:
md->pkt_mark = nl_attr_get_u32(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_SKB_MARK);
break;
case OVS_KEY_ATTR_CT_STATE:
md->ct_state = odp_to_ovs_ct_state(nl_attr_get_u32(nla));
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_CT_STATE);
break;
case OVS_KEY_ATTR_CT_ZONE:
md->ct_zone = nl_attr_get_u16(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_CT_ZONE);
break;
case OVS_KEY_ATTR_CT_MARK:
md->ct_mark = nl_attr_get_u32(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_CT_MARK);
break;
case OVS_KEY_ATTR_CT_LABELS: {
md->ct_label = nl_attr_get_u128(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_CT_LABELS);
break;
}
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: {
const struct ovs_key_ct_tuple_ipv4 *ct = nl_attr_get(nla);
md->ct_orig_tuple.ipv4 = *ct;
md->ct_orig_tuple_ipv6 = false;
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
break;
}
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: {
md->ct_orig_tuple.ipv6 = *ct;
md->ct_orig_tuple_ipv6 = true;
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
break;
}
case OVS_KEY_ATTR_TUNNEL: {
enum odp_key_fitness res;
- res = odp_tun_key_from_attr(nla, &md->tunnel);
+ res = odp_tun_key_from_attr(nla, &md->tunnel, NULL);
if (res == ODP_FIT_ERROR) {
memset(&md->tunnel, 0, sizeof md->tunnel);
- } else if (res == ODP_FIT_PERFECT) {
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_TUNNEL);
}
break;
}
case OVS_KEY_ATTR_IN_PORT:
md->in_port.odp_port = nl_attr_get_odp_port(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_IN_PORT);
break;
case OVS_KEY_ATTR_ETHERNET:
/* Presence of OVS_KEY_ATTR_ETHERNET indicates Ethernet packet. */
packet_type = htonl(PT_ETH);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_ETHERNET);
break;
case OVS_KEY_ATTR_ETHERTYPE:
ethertype = nl_attr_get_be16(nla);
- wanted_attrs &= ~(1u << OVS_KEY_ATTR_ETHERTYPE);
break;
+ case OVS_KEY_ATTR_UNSPEC:
+ case OVS_KEY_ATTR_ENCAP:
+ case OVS_KEY_ATTR_VLAN:
+ case OVS_KEY_ATTR_IPV4:
+ case OVS_KEY_ATTR_IPV6:
+ case OVS_KEY_ATTR_TCP:
+ case OVS_KEY_ATTR_UDP:
+ case OVS_KEY_ATTR_ICMP:
+ case OVS_KEY_ATTR_ICMPV6:
+ case OVS_KEY_ATTR_ARP:
+ case OVS_KEY_ATTR_ND:
+ case OVS_KEY_ATTR_ND_EXTENSIONS:
+ case OVS_KEY_ATTR_SCTP:
+ case OVS_KEY_ATTR_TCP_FLAGS:
+ case OVS_KEY_ATTR_MPLS:
+ case OVS_KEY_ATTR_PACKET_TYPE:
+ case OVS_KEY_ATTR_NSH:
+ case __OVS_KEY_ATTR_MAX:
default:
break;
}
-
- if (!wanted_attrs) {
- break; /* Have everything. */
- }
}
if (packet_type == htonl(PT_ETH)) {
}
}
-uint32_t
-odp_flow_key_hash(const struct nlattr *key, size_t key_len)
+/* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'.
+ * Generated value has format of random UUID. */
+void
+odp_flow_key_hash(const void *key, size_t key_len, ovs_u128 *hash)
{
- BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
- return hash_bytes32(ALIGNED_CAST(const uint32_t *, key), key_len, 0);
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ static uint32_t secret;
+
+ if (ovsthread_once_start(&once)) {
+ secret = random_uint32();
+ ovsthread_once_done(&once);
+ }
+ hash_bytes128(key, key_len, secret, hash);
+ uuid_set_bits_v4((struct uuid *)hash);
}
static void
: FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
}
+/* Parses the attributes in the 'key_len' bytes of 'key' into 'attrs', which
+ * must have OVS_KEY_ATTR_MAX + 1 elements. Stores each attribute in 'key'
+ * into the corresponding element of 'attrs'.
+ *
+ * Stores a bitmask of the attributes' indexes found in 'key' into
+ * '*present_attrsp'.
+ *
+ * If an attribute beyond OVS_KEY_ATTR_MAX is found, stores its attribute type
+ * (or one of them, if more than one) into '*out_of_range_attrp', otherwise 0.
+ *
+ * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
+ * error message in '*errorp'. */
static bool
parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
const struct nlattr *attrs[], uint64_t *present_attrsp,
- int *out_of_range_attrp)
+ int *out_of_range_attrp, char **errorp)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
const struct nlattr *nla;
if (len != expected_len && expected_len >= 0) {
char namebuf[OVS_KEY_ATTR_BUFSIZE];
- VLOG_ERR_RL(&rl, "attribute %s has length %"PRIuSIZE" but should have "
- "length %d", ovs_key_attr_to_string(type, namebuf,
- sizeof namebuf),
- len, expected_len);
+ odp_parse_error(&rl, errorp, "attribute %s has length %"PRIuSIZE" "
+ "but should have length %d",
+ ovs_key_attr_to_string(type, namebuf,
+ sizeof namebuf),
+ len, expected_len);
return false;
}
if (present_attrs & (UINT64_C(1) << type)) {
char namebuf[OVS_KEY_ATTR_BUFSIZE];
- VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
- ovs_key_attr_to_string(type,
- namebuf, sizeof namebuf));
+ odp_parse_error(&rl, errorp,
+ "duplicate %s attribute in flow key",
+ ovs_key_attr_to_string(type, namebuf,
+ sizeof namebuf));
return false;
}
}
}
if (left) {
- VLOG_ERR_RL(&rl, "trailing garbage in flow key");
+ odp_parse_error(&rl, errorp, "trailing garbage in flow key");
return false;
}
return ODP_FIT_PERFECT;
}
+/* Initializes 'flow->dl_type' based on the attributes in 'attrs', in which the
+ * attributes in the bit-mask 'present_attrs' are present. Returns true if
+ * successful, false on failure.
+ *
+ * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
+ * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
+ * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
+ * previously parsed flow key.
+ *
+ * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
+ * error message in '*errorp'. */
static bool
parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
uint64_t present_attrs, uint64_t *expected_attrs,
- struct flow *flow, const struct flow *src_flow)
+ struct flow *flow, const struct flow *src_flow,
+ char **errorp)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
bool is_mask = flow != src_flow;
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
- VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
- ntohs(flow->dl_type));
+ odp_parse_error(&rl, errorp,
+ "invalid Ethertype %"PRIu16" in flow key",
+ ntohs(flow->dl_type));
return false;
}
if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
flow->dl_type != htons(0xffff)) {
+ odp_parse_error(&rl, errorp, "can't bitwise match non-Ethernet II "
+ "\"Ethertype\" %#"PRIx16" (with mask %#"PRIx16")",
+ ntohs(src_flow->dl_type), ntohs(flow->dl_type));
return false;
}
*expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
flow->dl_type = htons(0xffff);
} else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
/* See comments in odp_flow_key_from_flow__(). */
- VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
+ odp_parse_error(&rl, errorp,
+ "mask expected for non-Ethernet II frame");
return false;
}
}
return true;
}
+/* Initializes MPLS, L3, and L4 fields in 'flow' based on the attributes in
+ * 'attrs', in which the attributes in the bit-mask 'present_attrs' are
+ * present. The caller also indicates an out-of-range attribute
+ * 'out_of_range_attr' if one was present when parsing (if so, the fitness
+ * cannot be perfect).
+ *
+ * Sets 1-bits in '*expected_attrs' for the attributes in 'attrs' that were
+ * consulted. 'flow' is assumed to be a flow key unless 'src_flow' is nonnull,
+ * in which case 'flow' is a flow mask and 'src_flow' is its corresponding
+ * previously parsed flow key.
+ *
+ * Returns fitness based on any discrepancies between present and expected
+ * attributes, except that a 'need_check' of false overrides this.
+ *
+ * If 'errorp' is nonnull and the function returns false, stores a malloc()'d
+ * error message in '*errorp'. 'key' and 'key_len' are just used for error
+ * reporting in this case. */
static enum odp_key_fitness
parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
uint64_t present_attrs, int out_of_range_attr,
- uint64_t expected_attrs, struct flow *flow,
+ uint64_t *expected_attrs, struct flow *flow,
const struct nlattr *key, size_t key_len,
- const struct flow *src_flow)
+ const struct flow *src_flow, bool need_check, char **errorp)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
bool is_mask = src_flow != flow;
if (eth_type_mpls(src_flow->dl_type)) {
if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
- expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
+ *expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
int i;
if (!size || size % sizeof(ovs_be32)) {
+ odp_parse_error(&rl, errorp,
+ "MPLS LSEs have invalid length %"PRIuSIZE,
+ size);
return ODP_FIT_ERROR;
}
if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
+ odp_parse_error(&rl, errorp,
+ "unexpected MPLS Ethertype mask %x"PRIx16,
+ ntohs(flow->dl_type));
return ODP_FIT_ERROR;
}
/* BOS may be set only in the innermost label. */
for (i = 0; i < n - 1; i++) {
if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
+ odp_parse_error(&rl, errorp,
+ "MPLS BOS set in non-innermost label");
return ODP_FIT_ERROR;
}
}
goto done;
} else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key;
ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
put_ipv4_key(ipv4_key, flow, is_mask);
if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
+ odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV4 has invalid "
+ "nw_frag %#"PRIx8, flow->nw_frag);
return ODP_FIT_ERROR;
}
+
if (is_mask) {
check_start = ipv4_key;
check_len = sizeof *ipv4_key;
}
} else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
const struct ovs_key_ipv6 *ipv6_key;
ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
put_ipv6_key(ipv6_key, flow, is_mask);
if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
+ odp_parse_error(&rl, errorp, "OVS_KEY_ATTR_IPV6 has invalid "
+ "nw_frag %#"PRIx8, flow->nw_frag);
return ODP_FIT_ERROR;
}
if (is_mask) {
} else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
src_flow->dl_type == htons(ETH_TYPE_RARP)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
const struct ovs_key_arp *arp_key;
arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
- VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
- "key", ntohs(arp_key->arp_op));
+ odp_parse_error(&rl, errorp,
+ "unsupported ARP opcode %"PRIu16" in flow "
+ "key", ntohs(arp_key->arp_op));
return ODP_FIT_ERROR;
}
put_arp_key(arp_key, flow);
expected_bit = OVS_KEY_ATTR_ARP;
}
}
+ } else if (src_flow->dl_type == htons(ETH_TYPE_NSH)) {
+ if (!is_mask) {
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_NSH;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_NSH)) {
+ if (odp_nsh_key_from_attr__(attrs[OVS_KEY_ATTR_NSH],
+ is_mask, &flow->nsh,
+ NULL, errorp) == ODP_FIT_ERROR) {
+ return ODP_FIT_ERROR;
+ }
+ if (is_mask) {
+ check_start = nl_attr_get(attrs[OVS_KEY_ATTR_NSH]);
+ check_len = nl_attr_get_size(attrs[OVS_KEY_ATTR_NSH]);
+ expected_bit = OVS_KEY_ATTR_NSH;
+ }
+ }
} else {
goto done;
}
if (check_len > 0) { /* Happens only when 'is_mask'. */
if (!is_all_zeros(check_start, check_len) &&
flow->dl_type != htons(0xffff)) {
+ odp_parse_error(&rl, errorp, "unexpected L3 matching with "
+ "masked Ethertype %#"PRIx16"/%#"PRIx16,
+ ntohs(src_flow->dl_type),
+ ntohs(flow->dl_type));
return ODP_FIT_ERROR;
} else {
- expected_attrs |= UINT64_C(1) << expected_bit;
+ *expected_attrs |= UINT64_C(1) << expected_bit;
}
}
src_flow->dl_type == htons(ETH_TYPE_IPV6))
&& !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
const union ovs_key_tp *tcp_key;
expected_bit = OVS_KEY_ATTR_TCP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
}
} else if (src_flow->nw_proto == IPPROTO_UDP
src_flow->dl_type == htons(ETH_TYPE_IPV6))
&& !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
const union ovs_key_tp *udp_key;
src_flow->dl_type == htons(ETH_TYPE_IPV6))
&& !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
const union ovs_key_tp *sctp_key;
&& src_flow->dl_type == htons(ETH_TYPE_IP)
&& !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
const struct ovs_key_icmp *icmp_key;
&& src_flow->dl_type == htons(ETH_TYPE_IPV6)
&& !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
const struct ovs_key_icmpv6 *icmpv6_key;
expected_bit = OVS_KEY_ATTR_ICMPV6;
if (is_nd(src_flow, NULL)) {
if (!is_mask) {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
const struct ovs_key_nd *nd_key;
* htons(0xffff). See xlate_wc_finish() for details.
* */
if (!is_all_zeros(nd_key, sizeof *nd_key) &&
+ (flow->tp_src != htons(0xff) ||
+ flow->tp_dst != htons(0xff))) {
+ odp_parse_error(&rl, errorp,
+ "ICMP (src,dst) masks should be "
+ "(0xff,0xff) but are actually "
+ "(%#"PRIx16",%#"PRIx16")",
+ ntohs(flow->tp_src),
+ ntohs(flow->tp_dst));
+ return ODP_FIT_ERROR;
+ } else {
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
+ }
+ }
+ }
+ if (present_attrs &
+ (UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS)) {
+ const struct ovs_key_nd_extensions *nd_ext_key;
+ if (!is_mask) {
+ *expected_attrs |=
+ UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
+ }
+
+ nd_ext_key =
+ nl_attr_get(attrs[OVS_KEY_ATTR_ND_EXTENSIONS]);
+ flow->igmp_group_ip4 = nd_ext_key->nd_reserved;
+ flow->tcp_flags = htons(nd_ext_key->nd_options_type);
+
+ if (is_mask) {
+ /* Even though 'tp_src' and 'tp_dst' are 16 bits wide,
+ * ICMP type and code are 8 bits wide. Therefore, an
+ * exact match looks like htons(0xff), not
+ * htons(0xffff). See xlate_wc_finish() for details.
+ * */
+ if (!is_all_zeros(nd_ext_key, sizeof *nd_ext_key) &&
(flow->tp_src != htons(0xff) ||
flow->tp_dst != htons(0xff))) {
return ODP_FIT_ERROR;
} else {
- expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
+ *expected_attrs |=
+ UINT64_C(1) << OVS_KEY_ATTR_ND_EXTENSIONS;
}
}
}
}
}
+ } else if (src_flow->nw_proto == IPPROTO_IGMP
+ && src_flow->dl_type == htons(ETH_TYPE_IP)) {
+ /* OVS userspace parses the IGMP type, code, and group, but its
+ * datapaths do not, so there is always missing information. */
+ return ODP_FIT_TOO_LITTLE;
}
if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
+ odp_parse_error(&rl, errorp, "flow matches on L4 ports but does "
+ "not define an L4 protocol");
return ODP_FIT_ERROR;
} else {
- expected_attrs |= UINT64_C(1) << expected_bit;
+ *expected_attrs |= UINT64_C(1) << expected_bit;
}
}
done:
- return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
- key, key_len);
+ return need_check ? check_expectations(present_attrs, out_of_range_attr,
+ *expected_attrs, key, key_len) : ODP_FIT_PERFECT;
}
/* Parse 802.1Q header then encapsulated L3 attributes. */
uint64_t present_attrs, int out_of_range_attr,
uint64_t expected_attrs, struct flow *flow,
const struct nlattr *key, size_t key_len,
- const struct flow *src_flow)
+ const struct flow *src_flow, char **errorp)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
bool is_mask = src_flow != flow;
}
return fitness;
} else if (!(flow->vlans[encaps].tci & htons(VLAN_CFI))) {
- VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
- "but CFI bit is not set",
- ntohs(flow->vlans[encaps].tci));
+ odp_parse_error(
+ &rl, errorp, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
+ "but CFI bit is not set", ntohs(flow->vlans[encaps].tci));
return ODP_FIT_ERROR;
}
} else {
/* Now parse the encapsulated attributes. */
if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
- attrs, &present_attrs, &out_of_range_attr)) {
+ attrs, &present_attrs, &out_of_range_attr,
+ errorp)) {
return ODP_FIT_ERROR;
}
expected_attrs = 0;
if (!parse_ethertype(attrs, present_attrs, &expected_attrs,
- flow, src_flow)) {
+ flow, src_flow, errorp)) {
return ODP_FIT_ERROR;
}
-
+ encap_fitness = parse_l2_5_onward(attrs, present_attrs,
+ out_of_range_attr,
+ &expected_attrs,
+ flow, key, key_len,
+ src_flow, false, errorp);
+ if (encap_fitness != ODP_FIT_PERFECT) {
+ return encap_fitness;
+ }
encaps++;
}
- encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
- expected_attrs, flow, key, key_len,
- src_flow);
-
- /* The overall fitness is the worse of the outer and inner attributes. */
- return MAX(fitness, encap_fitness);
+ return check_expectations(present_attrs, out_of_range_attr,
+ expected_attrs, key, key_len);
}
static enum odp_key_fitness
odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
- struct flow *flow, const struct flow *src_flow)
+ struct flow *flow, const struct flow *src_flow,
+ char **errorp)
{
+ /* New "struct flow" fields that are visible to the datapath (including all
+ * data fields) should be translated from equivalent datapath flow fields
+ * here (you will have to add a OVS_KEY_ATTR_* for them). */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
+
+ enum odp_key_fitness fitness = ODP_FIT_ERROR;
+ if (errorp) {
+ *errorp = NULL;
+ }
+
const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
uint64_t expected_attrs;
uint64_t present_attrs;
/* Parse attributes. */
if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
- &out_of_range_attr)) {
- return ODP_FIT_ERROR;
+ &out_of_range_attr, errorp)) {
+ goto exit;
}
expected_attrs = 0;
enum odp_key_fitness res;
res = odp_tun_key_from_attr__(attrs[OVS_KEY_ATTR_TUNNEL], is_mask,
- &flow->tunnel);
+ &flow->tunnel, errorp);
if (res == ODP_FIT_ERROR) {
- return ODP_FIT_ERROR;
+ goto exit;
} else if (res == ODP_FIT_PERFECT) {
expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
}
flow->packet_type
= nl_attr_get_be32(attrs[OVS_KEY_ATTR_PACKET_TYPE]);
expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PACKET_TYPE;
+ if (pt_ns(src_flow->packet_type) == OFPHTN_ETHERTYPE) {
+ flow->dl_type = pt_ns_type_be(flow->packet_type);
+ }
} else if (!is_mask) {
flow->packet_type = htonl(PT_ETH);
}
/* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
- src_flow)) {
- return ODP_FIT_ERROR;
+ src_flow, errorp)) {
+ goto exit;
}
if (is_mask
? (src_flow->vlans[0].tci & htons(VLAN_CFI)) != 0
: eth_type_vlan(src_flow->dl_type)) {
- return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
- expected_attrs, flow, key, key_len, src_flow);
+ fitness = parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len,
+ src_flow, errorp);
+ } else {
+ if (is_mask) {
+ /* A missing VLAN mask means exact match on vlan_tci 0 (== no
+ * VLAN). */
+ flow->vlans[0].tpid = htons(0xffff);
+ flow->vlans[0].tci = htons(0xffff);
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
+ flow->vlans[0].tci = nl_attr_get_be16(
+ attrs[OVS_KEY_ATTR_VLAN]);
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
+ }
+ }
+ fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
+ &expected_attrs, flow, key, key_len,
+ src_flow, true, errorp);
}
- if (is_mask) {
- /* A missing VLAN mask means exact match on vlan_tci 0 (== no VLAN). */
- flow->vlans[0].tpid = htons(0xffff);
- flow->vlans[0].tci = htons(0xffff);
- if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
- flow->vlans[0].tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
- expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
+
+exit:;
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ if (fitness == ODP_FIT_ERROR && (errorp || !VLOG_DROP_WARN(&rl))) {
+ struct ds s = DS_EMPTY_INITIALIZER;
+ if (is_mask) {
+ ds_put_cstr(&s, "the flow mask in error is: ");
+ odp_flow_key_format(key, key_len, &s);
+ ds_put_cstr(&s, ", for the following flow key: ");
+ flow_format(&s, src_flow, NULL);
+ } else {
+ ds_put_cstr(&s, "the flow key in error is: ");
+ odp_flow_key_format(key, key_len, &s);
+ }
+ if (errorp) {
+ char *old_error = *errorp;
+ *errorp = xasprintf("%s; %s", old_error, ds_cstr(&s));
+ free(old_error);
+ } else {
+ VLOG_WARN("%s", ds_cstr(&s));
}
+ ds_destroy(&s);
}
- return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
- expected_attrs, flow, key, key_len, src_flow);
+ return fitness;
}
/* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
* by looking at the attributes for lower-level protocols, e.g. if the network
* protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
* know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
- * must be absent. */
+ * must be absent.
+ *
+ * If 'errorp' is nonnull, this function uses it for detailed error reports: if
+ * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
+ * '*errorp', otherwise NULL. */
enum odp_key_fitness
odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
- struct flow *flow)
+ struct flow *flow, char **errorp)
{
- return odp_flow_key_to_flow__(key, key_len, flow, flow);
+ return odp_flow_key_to_flow__(key, key_len, flow, flow, errorp);
}
/* Converts the 'mask_key_len' bytes of OVS_KEY_ATTR_* attributes in 'mask_key'
* to a mask structure in 'mask'. 'flow' must be a previously translated flow
* corresponding to 'mask' and similarly flow_key/flow_key_len must be the
* attributes from that flow. Returns an ODP_FIT_* value that indicates how
- * well 'key' fits our expectations for what a flow key should contain. */
+ * well 'key' fits our expectations for what a flow key should contain.
+ *
+ * If 'errorp' is nonnull, this function uses it for detailed error reports: if
+ * the return value is ODP_FIT_ERROR, it stores a malloc()'d error string in
+ * '*errorp', otherwise NULL. */
enum odp_key_fitness
odp_flow_key_to_mask(const struct nlattr *mask_key, size_t mask_key_len,
- struct flow_wildcards *mask, const struct flow *src_flow)
+ struct flow_wildcards *mask, const struct flow *src_flow,
+ char **errorp)
{
if (mask_key_len) {
return odp_flow_key_to_flow__(mask_key, mask_key_len,
- &mask->masks, src_flow);
-
+ &mask->masks, src_flow, errorp);
} else {
+ if (errorp) {
+ *errorp = NULL;
+ }
+
/* A missing mask means that the flow should be exact matched.
* Generate an appropriate exact wildcard for the flow. */
flow_wildcards_init_for_packet(mask, src_flow);
{
enum odp_key_fitness fitness;
- fitness = odp_flow_key_to_flow(key, key_len, &match->flow);
+ fitness = odp_flow_key_to_flow(key, key_len, &match->flow, NULL);
if (fitness) {
/* This should not happen: it indicates that
* odp_flow_key_from_flow() and odp_flow_key_to_flow() disagree on
return EINVAL;
}
- fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow);
+ fitness = odp_flow_key_to_mask(mask, mask_len, &match->wc, &match->flow,
+ NULL);
if (fitness) {
/* This should not happen: it indicates that
* odp_flow_key_from_mask() and odp_flow_key_to_mask()
/* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
* Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
- * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
- * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
- * null, then the return value is not meaningful.) */
-size_t
+ * whose contents are the 'userdata_size' bytes at 'userdata' and sets
+ * 'odp_actions_ofs' if nonnull with the offset within 'odp_actions' of the
+ * start of the cookie. (If 'userdata' is null, then the 'odp_actions_ofs'
+ * value is not meaningful.)
+ *
+ * Returns negative error code on failure. */
+int
odp_put_userspace_action(uint32_t pid,
const void *userdata, size_t userdata_size,
odp_port_t tunnel_out_port,
bool include_actions,
- struct ofpbuf *odp_actions)
+ struct ofpbuf *odp_actions, size_t *odp_actions_ofs)
{
size_t userdata_ofs;
size_t offset;
offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
if (userdata) {
+ if (nl_attr_oversized(userdata_size)) {
+ return -E2BIG;
+ }
userdata_ofs = odp_actions->size + NLA_HDRLEN;
/* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
if (include_actions) {
nl_msg_put_flag(odp_actions, OVS_USERSPACE_ATTR_ACTIONS);
}
+ if (nl_attr_oversized(odp_actions->size - offset - NLA_HDRLEN)) {
+ return -E2BIG;
+ }
nl_msg_end_nested(odp_actions, offset);
- return userdata_ofs;
+ if (odp_actions_ofs) {
+ *odp_actions_ofs = userdata_ofs;
+ }
+
+ return 0;
}
void
void
odp_put_tunnel_action(const struct flow_tnl *tunnel,
- struct ofpbuf *odp_actions)
+ struct ofpbuf *odp_actions, const char *tnl_type)
{
size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
- tun_key_to_attr(odp_actions, tunnel, tunnel, NULL);
+ tun_key_to_attr(odp_actions, tunnel, tunnel, NULL, tnl_type);
nl_msg_end_nested(odp_actions, offset);
}
* only on tunneling information. */
void
commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions)
+ struct ofpbuf *odp_actions, const char *tnl_type)
{
/* A valid IPV4_TUNNEL must have non-zero ip_dst; a valid IPv6 tunnel
* must have non-zero ipv6_dst. */
return;
}
memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
- odp_put_tunnel_action(&base->tunnel, odp_actions);
+ odp_put_tunnel_action(&base->tunnel, odp_actions, tnl_type);
+ }
+}
+
+struct offsetof_sizeof {
+ int offset;
+ int size;
+};
+
+
+/* Performs bitwise OR over the fields in 'dst_' and 'src_' specified in
+ * 'offsetof_sizeof_arr' array. Result is stored in 'dst_'. */
+static void
+or_masks(void *dst_, const void *src_,
+ struct offsetof_sizeof *offsetof_sizeof_arr)
+{
+ int field, size, offset;
+ const uint8_t *src = src_;
+ uint8_t *dst = dst_;
+
+ for (field = 0; ; field++) {
+ size = offsetof_sizeof_arr[field].size;
+ offset = offsetof_sizeof_arr[field].offset;
+
+ if (!size) {
+ return;
+ }
+ or_bytes(dst + offset, src + offset, size);
+ }
+}
+
+/* Compares each of the fields in 'key0' and 'key1'. The fields are specified
+ * in 'offsetof_sizeof_arr', which is an array terminated by a 0-size field.
+ * Returns true if all of the fields are equal, false if at least one differs.
+ * As a side effect, for each field that is the same in 'key0' and 'key1',
+ * zeros the corresponding bytes in 'mask'. */
+static bool
+keycmp_mask(const void *key0, const void *key1,
+ struct offsetof_sizeof *offsetof_sizeof_arr, void *mask)
+{
+ bool differ = false;
+
+ for (int field = 0 ; ; field++) {
+ int size = offsetof_sizeof_arr[field].size;
+ int offset = offsetof_sizeof_arr[field].offset;
+ if (size == 0) {
+ break;
+ }
+
+ char *pkey0 = ((char *)key0) + offset;
+ char *pkey1 = ((char *)key1) + offset;
+ char *pmask = ((char *)mask) + offset;
+ if (memcmp(pkey0, pkey1, size) == 0) {
+ memset(pmask, 0, size);
+ } else {
+ differ = true;
+ }
}
+
+ return differ;
}
static bool
commit(enum ovs_key_attr attr, bool use_masked_set,
const void *key, void *base, void *mask, size_t size,
+ struct offsetof_sizeof *offsetof_sizeof_arr,
struct ofpbuf *odp_actions)
{
- if (memcmp(key, base, size)) {
+ if (keycmp_mask(key, base, offsetof_sizeof_arr, mask)) {
bool fully_masked = odp_mask_is_exact(attr, mask, size);
if (use_masked_set && !fully_masked) {
}
static void
-commit_set_ether_addr_action(const struct flow *flow, struct flow *base_flow,
- struct ofpbuf *odp_actions,
- struct flow_wildcards *wc,
- bool use_masked)
+commit_set_ether_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc,
+ bool use_masked)
{
- struct ovs_key_ethernet key, base, mask;
+ struct ovs_key_ethernet key, base, mask, orig_mask;
+ struct offsetof_sizeof ovs_key_ethernet_offsetof_sizeof_arr[] =
+ OVS_KEY_ETHERNET_OFFSETOF_SIZEOF_ARR;
+
+ if (flow->packet_type != htonl(PT_ETH)) {
+ return;
+ }
get_ethernet_key(flow, &key);
get_ethernet_key(base_flow, &base);
get_ethernet_key(&wc->masks, &mask);
+ memcpy(&orig_mask, &mask, sizeof mask);
if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
- &key, &base, &mask, sizeof key, odp_actions)) {
+ &key, &base, &mask, sizeof key,
+ ovs_key_ethernet_offsetof_sizeof_arr, odp_actions)) {
put_ethernet_key(&base, base_flow);
+ or_masks(&mask, &orig_mask, ovs_key_ethernet_offsetof_sizeof_arr);
put_ethernet_key(&mask, &wc->masks);
}
}
-static void
-commit_ether_action(const struct flow *flow, struct flow *base_flow,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc,
- bool use_masked)
-{
- if (flow->packet_type == htonl(PT_ETH)) {
- if (base_flow->packet_type != htonl(PT_ETH)) {
- odp_put_push_eth_action(odp_actions, &flow->dl_src, &flow->dl_dst);
- base_flow->packet_type = flow->packet_type;
- base_flow->dl_src = flow->dl_src;
- base_flow->dl_dst = flow->dl_dst;
- } else {
- commit_set_ether_addr_action(flow, base_flow, odp_actions, wc,
- use_masked);
- }
- } else {
- if (base_flow->packet_type == htonl(PT_ETH)) {
- odp_put_pop_eth_action(odp_actions);
- base_flow->packet_type = flow->packet_type;
- }
- }
-}
-
static void
commit_vlan_action(const struct flow* flow, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
/* Otherwise, if there more LSEs in base than are common between
* base and flow then pop the topmost one. */
ovs_be16 dl_type;
- bool popped;
-
/* If all the LSEs are to be popped and this is not the outermost
* LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
* POP_MPLS action instead of flow->dl_type.
dl_type = flow->dl_type;
}
nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
- popped = flow_pop_mpls(base, base_n, flow->dl_type, NULL);
- ovs_assert(popped);
+ ovs_assert(flow_pop_mpls(base, base_n, flow->dl_type, NULL));
base_n--;
}
}
struct ofpbuf *odp_actions, struct flow_wildcards *wc,
bool use_masked)
{
- struct ovs_key_ipv4 key, mask, base;
+ struct ovs_key_ipv4 key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_ipv4_offsetof_sizeof_arr[] =
+ OVS_KEY_IPV4_OFFSETOF_SIZEOF_ARR;
/* Check that nw_proto and nw_frag remain unchanged. */
ovs_assert(flow->nw_proto == base_flow->nw_proto &&
get_ipv4_key(flow, &key, false);
get_ipv4_key(base_flow, &base, false);
get_ipv4_key(&wc->masks, &mask, true);
+ memcpy(&orig_mask, &mask, sizeof mask);
mask.ipv4_proto = 0; /* Not writeable. */
mask.ipv4_frag = 0; /* Not writable. */
+ if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
+ ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
+ mask.ipv4_tos &= ~IP_ECN_MASK;
+ }
+
if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
- odp_actions)) {
+ ovs_key_ipv4_offsetof_sizeof_arr, odp_actions)) {
put_ipv4_key(&base, base_flow, false);
- if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
- put_ipv4_key(&mask, &wc->masks, true);
- }
+ or_masks(&mask, &orig_mask, ovs_key_ipv4_offsetof_sizeof_arr);
+ put_ipv4_key(&mask, &wc->masks, true);
}
}
struct ofpbuf *odp_actions, struct flow_wildcards *wc,
bool use_masked)
{
- struct ovs_key_ipv6 key, mask, base;
+ struct ovs_key_ipv6 key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_ipv6_offsetof_sizeof_arr[] =
+ OVS_KEY_IPV6_OFFSETOF_SIZEOF_ARR;
/* Check that nw_proto and nw_frag remain unchanged. */
ovs_assert(flow->nw_proto == base_flow->nw_proto &&
get_ipv6_key(flow, &key, false);
get_ipv6_key(base_flow, &base, false);
get_ipv6_key(&wc->masks, &mask, true);
+ memcpy(&orig_mask, &mask, sizeof mask);
mask.ipv6_proto = 0; /* Not writeable. */
mask.ipv6_frag = 0; /* Not writable. */
+ mask.ipv6_label &= htonl(IPV6_LABEL_MASK); /* Not writable. */
+
+ if (flow_tnl_dst_is_set(&base_flow->tunnel) &&
+ ((base_flow->nw_tos ^ flow->nw_tos) & IP_ECN_MASK) == 0) {
+ mask.ipv6_tclass &= ~IP_ECN_MASK;
+ }
if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
- odp_actions)) {
+ ovs_key_ipv6_offsetof_sizeof_arr, odp_actions)) {
put_ipv6_key(&base, base_flow, false);
- if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
- put_ipv6_key(&mask, &wc->masks, true);
- }
+ or_masks(&mask, &orig_mask, ovs_key_ipv6_offsetof_sizeof_arr);
+ put_ipv6_key(&mask, &wc->masks, true);
}
}
arp->arp_sip = flow->nw_src;
arp->arp_tip = flow->nw_dst;
- arp->arp_op = htons(flow->nw_proto);
+ arp->arp_op = flow->nw_proto == UINT8_MAX ?
+ OVS_BE16_MAX : htons(flow->nw_proto);
arp->arp_sha = flow->arp_sha;
arp->arp_tha = flow->arp_tha;
}
commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
{
- struct ovs_key_arp key, mask, base;
+ struct ovs_key_arp key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_arp_offsetof_sizeof_arr[] =
+ OVS_KEY_ARP_OFFSETOF_SIZEOF_ARR;
get_arp_key(flow, &key);
get_arp_key(base_flow, &base);
get_arp_key(&wc->masks, &mask);
+ memcpy(&orig_mask, &mask, sizeof mask);
if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
- odp_actions)) {
+ ovs_key_arp_offsetof_sizeof_arr, odp_actions)) {
put_arp_key(&base, base_flow);
+ or_masks(&mask, &orig_mask, ovs_key_arp_offsetof_sizeof_arr);
put_arp_key(&mask, &wc->masks);
return SLOW_ACTION;
}
commit_set_icmp_action(const struct flow *flow, struct flow *base_flow,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
{
- struct ovs_key_icmp key, mask, base;
+ struct ovs_key_icmp key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_icmp_offsetof_sizeof_arr[] =
+ OVS_KEY_ICMP_OFFSETOF_SIZEOF_ARR;
enum ovs_key_attr attr;
if (is_icmpv4(flow, NULL)) {
get_icmp_key(flow, &key);
get_icmp_key(base_flow, &base);
get_icmp_key(&wc->masks, &mask);
+ memcpy(&orig_mask, &mask, sizeof mask);
- if (commit(attr, false, &key, &base, &mask, sizeof key, odp_actions)) {
+ if (commit(attr, false, &key, &base, &mask, sizeof key,
+ ovs_key_icmp_offsetof_sizeof_arr, odp_actions)) {
put_icmp_key(&base, base_flow);
+ or_masks(&mask, &orig_mask, ovs_key_icmp_offsetof_sizeof_arr);
put_icmp_key(&mask, &wc->masks);
return SLOW_ACTION;
}
flow->arp_tha = nd->nd_tll;
}
+static void
+get_nd_extensions_key(const struct flow *flow,
+ struct ovs_key_nd_extensions *nd_ext)
+{
+ /* ND Extensions key has padding, clear it. */
+ memset(nd_ext, 0, sizeof *nd_ext);
+ nd_ext->nd_reserved = flow->igmp_group_ip4;
+ nd_ext->nd_options_type = ntohs(flow->tcp_flags);
+}
+
+static void
+put_nd_extensions_key(const struct ovs_key_nd_extensions *nd_ext,
+ struct flow *flow)
+{
+ flow->igmp_group_ip4 = nd_ext->nd_reserved;
+ flow->tcp_flags = htons(nd_ext->nd_options_type);
+}
+
static enum slow_path_reason
commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
struct ofpbuf *odp_actions,
struct flow_wildcards *wc, bool use_masked)
{
- struct ovs_key_nd key, mask, base;
+ struct ovs_key_nd key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_nd_offsetof_sizeof_arr[] =
+ OVS_KEY_ND_OFFSETOF_SIZEOF_ARR;
get_nd_key(flow, &key);
get_nd_key(base_flow, &base);
get_nd_key(&wc->masks, &mask);
+ memcpy(&orig_mask, &mask, sizeof mask);
if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
- odp_actions)) {
+ ovs_key_nd_offsetof_sizeof_arr, odp_actions)) {
put_nd_key(&base, base_flow);
+ or_masks(&mask, &orig_mask, ovs_key_nd_offsetof_sizeof_arr);
put_nd_key(&mask, &wc->masks);
return SLOW_ACTION;
}
return 0;
}
+static enum slow_path_reason
+commit_set_nd_extensions_action(const struct flow *flow,
+ struct flow *base_flow,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc, bool use_masked)
+{
+ struct ovs_key_nd_extensions key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_nd_extensions_offsetof_sizeof_arr[] =
+ OVS_KEY_ND_EXTENSIONS_OFFSETOF_SIZEOF_ARR;
+
+ get_nd_extensions_key(flow, &key);
+ get_nd_extensions_key(base_flow, &base);
+ get_nd_extensions_key(&wc->masks, &mask);
+ memcpy(&orig_mask, &mask, sizeof mask);
+
+ if (commit(OVS_KEY_ATTR_ND_EXTENSIONS, use_masked, &key, &base, &mask,
+ sizeof key, ovs_key_nd_extensions_offsetof_sizeof_arr,
+ odp_actions)) {
+ put_nd_extensions_key(&base, base_flow);
+ or_masks(&mask, &orig_mask, ovs_key_nd_extensions_offsetof_sizeof_arr);
+ put_nd_extensions_key(&mask, &wc->masks);
+ return SLOW_ACTION;
+ }
+ return 0;
+}
+
static enum slow_path_reason
commit_set_nw_action(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc,
bool use_masked)
{
+ uint32_t reason;
+
/* Check if 'flow' really has an L3 header. */
if (!flow->nw_proto) {
return 0;
case ETH_TYPE_IPV6:
commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
- return commit_set_nd_action(flow, base, odp_actions, wc, use_masked);
+ if (base->nw_proto == IPPROTO_ICMPV6) {
+ /* Commit extended attrs first to make sure
+ correct options are added.*/
+ reason = commit_set_nd_extensions_action(flow, base,
+ odp_actions, wc, use_masked);
+ reason |= commit_set_nd_action(flow, base, odp_actions,
+ wc, use_masked);
+ return reason;
+ }
+ break;
case ETH_TYPE_ARP:
return commit_set_arp_action(flow, base, odp_actions, wc);
return 0;
}
+static inline void
+get_nsh_key(const struct flow *flow, struct ovs_key_nsh *nsh, bool is_mask)
+{
+ *nsh = flow->nsh;
+ if (!is_mask) {
+ if (nsh->mdtype != NSH_M_TYPE1) {
+ memset(nsh->context, 0, sizeof(nsh->context));
+ }
+ }
+}
+
+static inline void
+put_nsh_key(const struct ovs_key_nsh *nsh, struct flow *flow,
+ bool is_mask OVS_UNUSED)
+{
+ flow->nsh = *nsh;
+ if (flow->nsh.mdtype != NSH_M_TYPE1) {
+ memset(flow->nsh.context, 0, sizeof(flow->nsh.context));
+ }
+}
+
+static bool
+commit_nsh(const struct ovs_key_nsh * flow_nsh, bool use_masked_set,
+ const struct ovs_key_nsh *key, struct ovs_key_nsh *base,
+ struct ovs_key_nsh *mask, size_t size,
+ struct ofpbuf *odp_actions)
+{
+ enum ovs_key_attr attr = OVS_KEY_ATTR_NSH;
+
+ if (memcmp(key, base, size) == 0) {
+ /* Mask bits are set when we have either read or set the corresponding
+ * values. Masked bits will be exact-matched, no need to set them
+ * if the value did not actually change. */
+ return false;
+ }
+
+ bool fully_masked = odp_mask_is_exact(attr, mask, size);
+
+ if (use_masked_set && !fully_masked) {
+ size_t nsh_key_ofs;
+ struct ovs_nsh_key_base nsh_base;
+ struct ovs_nsh_key_base nsh_base_mask;
+ struct ovs_nsh_key_md1 md1;
+ struct ovs_nsh_key_md1 md1_mask;
+ size_t offset = nl_msg_start_nested(odp_actions,
+ OVS_ACTION_ATTR_SET_MASKED);
+
+ nsh_base.flags = key->flags;
+ nsh_base.ttl = key->ttl;
+ nsh_base.mdtype = key->mdtype;
+ nsh_base.np = key->np;
+ nsh_base.path_hdr = key->path_hdr;
+
+ nsh_base_mask.flags = mask->flags;
+ nsh_base_mask.ttl = mask->ttl;
+ nsh_base_mask.mdtype = mask->mdtype;
+ nsh_base_mask.np = mask->np;
+ nsh_base_mask.path_hdr = mask->path_hdr;
+
+ /* OVS_KEY_ATTR_NSH keys */
+ nsh_key_ofs = nl_msg_start_nested(odp_actions, OVS_KEY_ATTR_NSH);
+
+ /* put value and mask for OVS_NSH_KEY_ATTR_BASE */
+ char *data = nl_msg_put_unspec_uninit(odp_actions,
+ OVS_NSH_KEY_ATTR_BASE,
+ 2 * sizeof(nsh_base));
+ const char *lkey = (char *)&nsh_base, *lmask = (char *)&nsh_base_mask;
+ size_t lkey_size = sizeof(nsh_base);
+
+ while (lkey_size--) {
+ *data++ = *lkey++ & *lmask++;
+ }
+ lmask = (char *)&nsh_base_mask;
+ memcpy(data, lmask, sizeof(nsh_base_mask));
+
+ switch (key->mdtype) {
+ case NSH_M_TYPE1:
+ memcpy(md1.context, key->context, sizeof key->context);
+ memcpy(md1_mask.context, mask->context, sizeof mask->context);
+
+ /* put value and mask for OVS_NSH_KEY_ATTR_MD1 */
+ data = nl_msg_put_unspec_uninit(odp_actions,
+ OVS_NSH_KEY_ATTR_MD1,
+ 2 * sizeof(md1));
+ lkey = (char *)&md1;
+ lmask = (char *)&md1_mask;
+ lkey_size = sizeof(md1);
+
+ while (lkey_size--) {
+ *data++ = *lkey++ & *lmask++;
+ }
+ lmask = (char *)&md1_mask;
+ memcpy(data, lmask, sizeof(md1_mask));
+ break;
+ case NSH_M_TYPE2:
+ default:
+ /* No match support for other MD formats yet. */
+ break;
+ }
+
+ nl_msg_end_nested(odp_actions, nsh_key_ofs);
+
+ nl_msg_end_nested(odp_actions, offset);
+ } else {
+ if (!fully_masked) {
+ memset(mask, 0xff, size);
+ }
+ size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
+ nsh_key_to_attr(odp_actions, flow_nsh, NULL, 0, false);
+ nl_msg_end_nested(odp_actions, offset);
+ }
+ memcpy(base, key, size);
+ return true;
+}
+
+static void
+commit_set_nsh_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc,
+ bool use_masked)
+{
+ struct ovs_key_nsh key, mask, base;
+
+ if (flow->dl_type != htons(ETH_TYPE_NSH) ||
+ !memcmp(&base_flow->nsh, &flow->nsh, sizeof base_flow->nsh)) {
+ return;
+ }
+
+ /* Check that mdtype and np remain unchanged. */
+ ovs_assert(flow->nsh.mdtype == base_flow->nsh.mdtype &&
+ flow->nsh.np == base_flow->nsh.np);
+
+ get_nsh_key(flow, &key, false);
+ get_nsh_key(base_flow, &base, false);
+ get_nsh_key(&wc->masks, &mask, true);
+ mask.mdtype = 0; /* Not writable. */
+ mask.np = 0; /* Not writable. */
+
+ if (commit_nsh(&base_flow->nsh, use_masked, &key, &base, &mask,
+ sizeof key, odp_actions)) {
+ put_nsh_key(&base, base_flow, false);
+ if (mask.mdtype != 0) { /* Mask was changed by commit(). */
+ put_nsh_key(&mask, &wc->masks, true);
+ }
+ }
+}
+
/* TCP, UDP, and SCTP keys have the same layout. */
BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
bool use_masked)
{
enum ovs_key_attr key_type;
- union ovs_key_tp key, mask, base;
+ union ovs_key_tp key, mask, orig_mask, base;
+ struct offsetof_sizeof ovs_key_tp_offsetof_sizeof_arr[] =
+ OVS_KEY_TCP_OFFSETOF_SIZEOF_ARR;
/* Check if 'flow' really has an L3 header. */
if (!flow->nw_proto) {
get_tp_key(flow, &key);
get_tp_key(base_flow, &base);
get_tp_key(&wc->masks, &mask);
+ memcpy(&orig_mask, &mask, sizeof mask);
if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
- odp_actions)) {
+ ovs_key_tp_offsetof_sizeof_arr, odp_actions)) {
put_tp_key(&base, base_flow);
+ or_masks(&mask, &orig_mask, ovs_key_tp_offsetof_sizeof_arr);
put_tp_key(&mask, &wc->masks);
}
}
bool use_masked)
{
uint32_t key, mask, base;
+ struct offsetof_sizeof ovs_key_prio_offsetof_sizeof_arr[] = {
+ {0, sizeof(uint32_t)},
+ {0, 0}
+ };
key = flow->skb_priority;
base = base_flow->skb_priority;
mask = wc->masks.skb_priority;
if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
- sizeof key, odp_actions)) {
+ sizeof key, ovs_key_prio_offsetof_sizeof_arr, odp_actions)) {
base_flow->skb_priority = base;
- wc->masks.skb_priority = mask;
+ wc->masks.skb_priority |= mask;
}
}
bool use_masked)
{
uint32_t key, mask, base;
+ struct offsetof_sizeof ovs_key_pkt_mark_offsetof_sizeof_arr[] = {
+ {0, sizeof(uint32_t)},
+ {0, 0}
+ };
key = flow->pkt_mark;
base = base_flow->pkt_mark;
mask = wc->masks.pkt_mark;
if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
- sizeof key, odp_actions)) {
+ sizeof key, ovs_key_pkt_mark_offsetof_sizeof_arr,
+ odp_actions)) {
base_flow->pkt_mark = base;
- wc->masks.pkt_mark = mask;
+ wc->masks.pkt_mark |= mask;
+ }
+}
+
+static void
+odp_put_pop_nsh_action(struct ofpbuf *odp_actions)
+{
+ nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_NSH);
+}
+
+static void
+odp_put_push_nsh_action(struct ofpbuf *odp_actions,
+ const struct flow *flow,
+ struct ofpbuf *encap_data)
+{
+ uint8_t * metadata = NULL;
+ uint8_t md_size = 0;
+
+ switch (flow->nsh.mdtype) {
+ case NSH_M_TYPE2:
+ if (encap_data) {
+ ovs_assert(encap_data->size < NSH_CTX_HDRS_MAX_LEN);
+ metadata = encap_data->data;
+ md_size = encap_data->size;
+ } else {
+ md_size = 0;
+ }
+ break;
+ default:
+ md_size = 0;
+ break;
+ }
+ size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_PUSH_NSH);
+ nsh_key_to_attr(odp_actions, &flow->nsh, metadata, md_size, false);
+ nl_msg_end_nested(odp_actions, offset);
+}
+
+static void
+commit_encap_decap_action(const struct flow *flow,
+ struct flow *base_flow,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc,
+ bool pending_encap, bool pending_decap,
+ struct ofpbuf *encap_data)
+{
+ if (pending_encap) {
+ switch (ntohl(flow->packet_type)) {
+ case PT_ETH: {
+ /* push_eth */
+ odp_put_push_eth_action(odp_actions, &flow->dl_src,
+ &flow->dl_dst);
+ base_flow->packet_type = flow->packet_type;
+ base_flow->dl_src = flow->dl_src;
+ base_flow->dl_dst = flow->dl_dst;
+ break;
+ }
+ case PT_NSH:
+ /* push_nsh */
+ odp_put_push_nsh_action(odp_actions, flow, encap_data);
+ base_flow->packet_type = flow->packet_type;
+ /* Update all packet headers in base_flow. */
+ memcpy(&base_flow->dl_dst, &flow->dl_dst,
+ sizeof(*flow) - offsetof(struct flow, dl_dst));
+ break;
+ default:
+ /* Only the above protocols are supported for encap.
+ * The check is done at action translation. */
+ OVS_NOT_REACHED();
+ }
+ } else if (pending_decap || flow->packet_type != base_flow->packet_type) {
+ /* This is an explicit or implicit decap case. */
+ if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE &&
+ base_flow->packet_type == htonl(PT_ETH)) {
+ /* Generate pop_eth and continue without recirculation. */
+ odp_put_pop_eth_action(odp_actions);
+ base_flow->packet_type = flow->packet_type;
+ base_flow->dl_src = eth_addr_zero;
+ base_flow->dl_dst = eth_addr_zero;
+ } else {
+ /* All other decap cases require recirculation.
+ * No need to update the base flow here. */
+ switch (ntohl(base_flow->packet_type)) {
+ case PT_NSH:
+ /* pop_nsh. */
+ odp_put_pop_nsh_action(odp_actions);
+ break;
+ default:
+ /* Checks are done during translation. */
+ OVS_NOT_REACHED();
+ }
+ }
}
+
+ wc->masks.packet_type = OVS_BE32_MAX;
}
/* If any of the flow key data that ODP actions can modify are different in
* in addition to this function if needed. Sets fields in 'wc' that are
* used as part of the action.
*
- * Returns a reason to force processing the flow's packets into the userspace
- * slow path, if there is one, otherwise 0. */
+ * In the common case, this function returns 0. If the flow key modification
+ * requires the flow's packets to be forced into the userspace slow path, this
+ * function returns SLOW_ACTION. This only happens when there is no ODP action
+ * to modify some field that was actually modified. For example, there is no
+ * ODP action to modify any ARP field, so such a modification triggers
+ * SLOW_ACTION. (When this happens, packets that need such modification get
+ * flushed to userspace and handled there, which works OK but much more slowly
+ * than if the datapath handled it directly.) */
enum slow_path_reason
commit_odp_actions(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc,
- bool use_masked)
+ bool use_masked, bool pending_encap, bool pending_decap,
+ struct ofpbuf *encap_data)
{
+ /* If you add a field that OpenFlow actions can change, and that is visible
+ * to the datapath (including all data fields), then you should also add
+ * code here to commit changes to the field. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
+
enum slow_path_reason slow1, slow2;
bool mpls_done = false;
- commit_ether_action(flow, base, odp_actions, wc, use_masked);
+ commit_encap_decap_action(flow, base, odp_actions, wc,
+ pending_encap, pending_decap, encap_data);
+ commit_set_ether_action(flow, base, odp_actions, wc, use_masked);
/* Make packet a non-MPLS packet before committing L3/4 actions,
* which would otherwise do nothing. */
if (eth_type_mpls(base->dl_type) && !eth_type_mpls(flow->dl_type)) {
commit_mpls_action(flow, base, odp_actions);
mpls_done = true;
}
+ commit_set_nsh_action(flow, base, odp_actions, wc, use_masked);
slow1 = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
commit_set_port_action(flow, base, odp_actions, wc, use_masked);
slow2 = commit_set_icmp_action(flow, base, odp_actions, wc);