const struct ovs_key_nd *mask)
{
const struct ovs_nd_msg *ns = dp_packet_l4(packet);
- const struct ovs_nd_opt *nd_opt = dp_packet_get_nd_payload(packet);
+ const struct ovs_nd_lla_opt *lla_opt = dp_packet_get_nd_payload(packet);
- if (OVS_LIKELY(ns && nd_opt)) {
+ if (OVS_LIKELY(ns && lla_opt)) {
int bytes_remain = dp_packet_l4_size(packet) - sizeof(*ns);
struct in6_addr tgt_buf;
struct eth_addr sll_buf = eth_addr_zero;
struct eth_addr tll_buf = eth_addr_zero;
- while (bytes_remain >= ND_OPT_LEN && nd_opt->nd_opt_len != 0) {
- if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
- && nd_opt->nd_opt_len == 1) {
- sll_buf = nd_opt->nd_opt_mac;
+ while (bytes_remain >= ND_LLA_OPT_LEN && lla_opt->len != 0) {
+ if (lla_opt->type == ND_OPT_SOURCE_LINKADDR
+ && lla_opt->len == 1) {
+ sll_buf = lla_opt->mac;
ether_addr_copy_masked(&sll_buf, key->nd_sll, mask->nd_sll);
/* A packet can only contain one SLL or TLL option */
break;
- } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
- && nd_opt->nd_opt_len == 1) {
- tll_buf = nd_opt->nd_opt_mac;
+ } else if (lla_opt->type == ND_OPT_TARGET_LINKADDR
+ && lla_opt->len == 1) {
+ tll_buf = lla_opt->mac;
ether_addr_copy_masked(&tll_buf, key->nd_tll, mask->nd_tll);
/* A packet can only contain one SLL or TLL option */
break;
}
- nd_opt += nd_opt->nd_opt_len;
- bytes_remain -= nd_opt->nd_opt_len * ND_OPT_LEN;
+ lla_opt += lla_opt->len;
+ bytes_remain -= lla_opt->len * ND_LLA_OPT_LEN;
}
packet_set_nd(packet,
}
}
+/* Set the NSH header. Assumes the NSH header is present and matches the
+ * MD format of the key. The slow path must take case of that. */
+static void
+odp_set_nsh(struct dp_packet *packet, const struct ovs_key_nsh *key,
+ const struct ovs_key_nsh *mask)
+{
+ struct nsh_hdr *nsh = dp_packet_l3(packet);
+
+ if (!mask) {
+ nsh->ver_flags_len = htons(key->flags << NSH_FLAGS_SHIFT) |
+ (nsh->ver_flags_len & ~htons(NSH_FLAGS_MASK));
+ put_16aligned_be32(&nsh->path_hdr, key->path_hdr);
+ switch (nsh->md_type) {
+ case NSH_M_TYPE1:
+ for (int i = 0; i < 4; i++) {
+ put_16aligned_be32(&nsh->md1.c[i], key->c[i]);
+ }
+ break;
+ case NSH_M_TYPE2:
+ default:
+ /* No support for setting any other metadata format yet. */
+ break;
+ }
+ } else {
+ uint8_t flags = (ntohs(nsh->ver_flags_len) & NSH_FLAGS_MASK) >>
+ NSH_FLAGS_SHIFT;
+ flags = key->flags | (flags & ~mask->flags);
+ nsh->ver_flags_len = htons(flags << NSH_FLAGS_SHIFT) |
+ (nsh->ver_flags_len & ~htons(NSH_FLAGS_MASK));
+
+ ovs_be32 path_hdr = get_16aligned_be32(&nsh->path_hdr);
+ path_hdr = key->path_hdr | (path_hdr & ~mask->path_hdr);
+ put_16aligned_be32(&nsh->path_hdr, path_hdr);
+ switch (nsh->md_type) {
+ case NSH_M_TYPE1:
+ for (int i = 0; i < 4; i++) {
+ ovs_be32 p = get_16aligned_be32(&nsh->md1.c[i]);
+ ovs_be32 k = key->c[i];
+ ovs_be32 m = mask->c[i];
+ put_16aligned_be32(&nsh->md1.c[i], k | (p & ~m));
+ }
+ break;
+ case NSH_M_TYPE2:
+ default:
+ /* No support for setting any other metadata format yet. */
+ break;
+ }
+ }
+}
+
static void
odp_execute_set_action(struct dp_packet *packet, const struct nlattr *a)
{
odp_eth_set_addrs(packet, nl_attr_get(a), NULL);
break;
+ case OVS_KEY_ATTR_NSH:
+ odp_set_nsh(packet, nl_attr_get(a), NULL);
+ break;
+
case OVS_KEY_ATTR_IPV4:
ipv4_key = nl_attr_get_unspec(a, sizeof(struct ovs_key_ipv4));
packet_set_ipv4(packet, ipv4_key->ipv4_src,
break;
case OVS_KEY_ATTR_UNSPEC:
+ case OVS_KEY_ATTR_PACKET_TYPE:
case OVS_KEY_ATTR_ENCAP:
case OVS_KEY_ATTR_ETHERTYPE:
case OVS_KEY_ATTR_IN_PORT:
get_mask(a, struct ovs_key_ethernet));
break;
+ case OVS_KEY_ATTR_NSH:
+ odp_set_nsh(packet, nl_attr_get(a),
+ get_mask(a, struct ovs_key_nsh));
+ break;
+
case OVS_KEY_ATTR_IPV4:
odp_set_ipv4(packet, nl_attr_get(a),
get_mask(a, struct ovs_key_ipv4));
break;
case OVS_KEY_ATTR_TUNNEL: /* Masked data not supported for tunnel. */
+ case OVS_KEY_ATTR_PACKET_TYPE:
case OVS_KEY_ATTR_UNSPEC:
case OVS_KEY_ATTR_CT_STATE:
case OVS_KEY_ATTR_CT_ZONE:
case OVS_ACTION_ATTR_PUSH_ETH:
case OVS_ACTION_ATTR_POP_ETH:
case OVS_ACTION_ATTR_CLONE:
+ case OVS_ACTION_ATTR_ENCAP_NSH:
+ case OVS_ACTION_ATTR_DECAP_NSH:
return false;
case OVS_ACTION_ATTR_UNSPEC:
uint32_t hash;
DP_PACKET_BATCH_FOR_EACH (packet, batch) {
- flow_extract(packet, &flow);
- hash = flow_hash_5tuple(&flow, hash_act->hash_basis);
+ /* RSS hash can be used here instead of 5tuple for
+ * performance reasons. */
+ if (dp_packet_rss_valid(packet)) {
+ hash = dp_packet_get_rss_hash(packet);
+ hash = hash_int(hash, hash_act->hash_basis);
+ } else {
+ flow_extract(packet, &flow);
+ hash = flow_hash_5tuple(&flow, hash_act->hash_basis);
+ }
packet->md.dp_hash = hash;
}
} else {
case OVS_ACTION_ATTR_METER:
/* Not implemented yet. */
break;
+ case OVS_ACTION_ATTR_PUSH_ETH: {
+ const struct ovs_action_push_eth *eth = nl_attr_get(a);
+
+ DP_PACKET_BATCH_FOR_EACH (packet, batch) {
+ push_eth(packet, ð->addresses.eth_dst,
+ ð->addresses.eth_src);
+ }
+ break;
+ }
+
+ case OVS_ACTION_ATTR_POP_ETH:
+ DP_PACKET_BATCH_FOR_EACH (packet, batch) {
+ pop_eth(packet);
+ }
+ break;
+
+ case OVS_ACTION_ATTR_ENCAP_NSH: {
+ const struct ovs_action_encap_nsh *enc_nsh = nl_attr_get(a);
+ DP_PACKET_BATCH_FOR_EACH (packet, batch) {
+ encap_nsh(packet, enc_nsh);
+ }
+ break;
+ }
+ case OVS_ACTION_ATTR_DECAP_NSH: {
+ size_t i, num = batch->count;
+
+ DP_PACKET_BATCH_REFILL_FOR_EACH (i, num, packet, batch) {
+ if (decap_nsh(packet)) {
+ dp_packet_batch_refill(batch, packet, i);
+ } else {
+ dp_packet_delete(packet);
+ }
+ }
+ break;
+ }
case OVS_ACTION_ATTR_OUTPUT:
case OVS_ACTION_ATTR_TUNNEL_PUSH:
case OVS_ACTION_ATTR_USERSPACE:
case OVS_ACTION_ATTR_RECIRC:
case OVS_ACTION_ATTR_CT:
- case OVS_ACTION_ATTR_PUSH_ETH:
- case OVS_ACTION_ATTR_POP_ETH:
case OVS_ACTION_ATTR_UNSPEC:
case __OVS_ACTION_ATTR_MAX:
OVS_NOT_REACHED();