/* TCP flags in the first half of a BE32, zeroes in the other half. */
BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) + 2
- == offsetof(struct flow, pad) &&
+ == offsetof(struct flow, pad2) &&
offsetof(struct flow, tcp_flags) / 4
- == offsetof(struct flow, pad) / 4);
+ == offsetof(struct flow, pad2) / 4);
#if WORDS_BIGENDIAN
#define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
<< 16)
* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 27)
+#if (FLOW_WC_SEQ != 28)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
void
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
fmd->dp_hash = flow->dp_hash;
fmd->recirc_id = flow->recirc_id;
{
memset(&wc->masks, 0x0, sizeof wc->masks);
+ /* Update this function whenever struct flow changes. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+
if (flow->tunnel.ip_dst) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
WC_MASK_FIELD(wc, tunnel.tun_id);
WC_MASK_FIELD(wc, dp_hash);
WC_MASK_FIELD(wc, in_port);
+ /* actset_output wildcarded. */
+
WC_MASK_FIELD(wc, dl_dst);
WC_MASK_FIELD(wc, dl_src);
WC_MASK_FIELD(wc, dl_type);
}
}
+/* Return a map of possible fields for a packet of the same type as 'flow'.
+ * Including extra bits in the returned mask is not wrong, it is just less
+ * optimal.
+ *
+ * This is a less precise version of flow_wildcards_init_for_packet() above. */
+uint64_t
+flow_wc_map(const struct flow *flow)
+{
+ /* Update this function whenever struct flow changes. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+
+ uint64_t map = (flow->tunnel.ip_dst) ? MINIFLOW_MAP(tunnel) : 0;
+
+ /* Metadata fields that can appear on packet input. */
+ map |= MINIFLOW_MAP(skb_priority) | MINIFLOW_MAP(pkt_mark)
+ | MINIFLOW_MAP(recirc_id) | MINIFLOW_MAP(dp_hash)
+ | MINIFLOW_MAP(in_port)
+ | MINIFLOW_MAP(dl_dst) | MINIFLOW_MAP(dl_src)
+ | MINIFLOW_MAP(dl_type) | MINIFLOW_MAP(vlan_tci);
+
+ /* Ethertype-dependent fields. */
+ if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
+ map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
+ | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
+ | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
+ if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
+ map |= MINIFLOW_MAP(igmp_group_ip4);
+ } else {
+ map |= MINIFLOW_MAP(tcp_flags)
+ | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
+ }
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ map |= MINIFLOW_MAP(ipv6_src) | MINIFLOW_MAP(ipv6_dst)
+ | MINIFLOW_MAP(ipv6_label)
+ | MINIFLOW_MAP(nw_proto) | MINIFLOW_MAP(nw_frag)
+ | MINIFLOW_MAP(nw_tos) | MINIFLOW_MAP(nw_ttl);
+ if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
+ map |= MINIFLOW_MAP(nd_target)
+ | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
+ } else {
+ map |= MINIFLOW_MAP(tcp_flags)
+ | MINIFLOW_MAP(tp_src) | MINIFLOW_MAP(tp_dst);
+ }
+ } else if (eth_type_mpls(flow->dl_type)) {
+ map |= MINIFLOW_MAP(mpls_lse);
+ } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
+ flow->dl_type == htons(ETH_TYPE_RARP)) {
+ map |= MINIFLOW_MAP(nw_src) | MINIFLOW_MAP(nw_dst)
+ | MINIFLOW_MAP(nw_proto)
+ | MINIFLOW_MAP(arp_sha) | MINIFLOW_MAP(arp_tha);
+ }
+
+ return map;
+}
+
/* Clear the metadata and register wildcard masks. They are not packet
* header fields. */
void
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
+ /* Update this function whenever struct flow changes. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
+
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
+ wc->masks.actset_output = 0;
}
/* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
int
flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
{
- if (wc) {
- wc->masks.dl_type = OVS_BE16_MAX;
- }
+ /* dl_type is always masked. */
if (eth_type_mpls(flow->dl_type)) {
int i;
int len = FLOW_MAX_MPLS_LABELS;
*
* - BoS: 1.
*
- * If the new label is the second or label MPLS label in 'flow', it is
+ * If the new label is the second or later label MPLS label in 'flow', it is
* generated as;
*
* - label: Copied from outer label.
ovs_assert(eth_type_mpls(mpls_eth_type));
ovs_assert(n < FLOW_MAX_MPLS_LABELS);
- memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
if (n) {
int i;
+ if (wc) {
+ memset(&wc->masks.mpls_lse, 0xff, sizeof *wc->masks.mpls_lse * n);
+ }
for (i = n; i >= 1; i--) {
flow->mpls_lse[i] = flow->mpls_lse[i - 1];
}
- flow->mpls_lse[0] = (flow->mpls_lse[1]
- & htonl(~MPLS_BOS_MASK));
+ flow->mpls_lse[0] = (flow->mpls_lse[1] & htonl(~MPLS_BOS_MASK));
} else {
int label = 0; /* IPv4 Explicit Null. */
int tc = 0;
if (is_ip_any(flow)) {
tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
- wc->masks.nw_tos |= IP_DSCP_MASK;
+ if (wc) {
+ wc->masks.nw_tos |= IP_DSCP_MASK;
+ wc->masks.nw_ttl = 0xff;
+ }
if (flow->nw_ttl) {
ttl = flow->nw_ttl;
}
- wc->masks.nw_ttl = 0xff;
}
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields. */
- BUILD_ASSERT(FLOW_WC_SEQ == 27);
+ BUILD_ASSERT(FLOW_WC_SEQ == 28);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
}
if (n == 0) {
/* Nothing to pop. */
return false;
- } else if (n == FLOW_MAX_MPLS_LABELS
- && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
- /* Can't pop because we don't know what to fill in mpls_lse[n - 1]. */
- return false;
+ } else if (n == FLOW_MAX_MPLS_LABELS) {
+ if (wc) {
+ wc->masks.mpls_lse[n - 1] |= htonl(MPLS_BOS_MASK);
+ }
+ if (!(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
+ /* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
+ return false;
+ }
}
- memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+ if (wc) {
+ memset(&wc->masks.mpls_lse[1], 0xff,
+ sizeof *wc->masks.mpls_lse * (n - 1));
+ }
for (i = 1; i < n; i++) {
flow->mpls_lse[i - 1] = flow->mpls_lse[i];
}