1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
6 #include <linux/uaccess.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/llc_pdu.h>
12 #include <linux/kernel.h>
13 #include <linux/jhash.h>
14 #include <linux/jiffies.h>
15 #include <linux/llc.h>
16 #include <linux/module.h>
18 #include <linux/rcupdate.h>
19 #include <linux/cpumask.h>
20 #include <linux/if_arp.h>
22 #include <linux/ipv6.h>
23 #include <linux/mpls.h>
24 #include <linux/sctp.h>
25 #include <linux/smp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/icmp.h>
29 #include <linux/icmpv6.h>
30 #include <linux/rculist.h>
32 #include <net/ip_tunnels.h>
35 #include <net/ndisc.h>
37 #include <net/netfilter/nf_conntrack_zones.h>
39 #include "conntrack.h"
42 #include "flow_netlink.h"
45 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
47 struct timespec64 cur_ts
;
50 ktime_get_ts64(&cur_ts
);
51 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
52 cur_ms
= (u64
)(u32
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
53 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
55 return cur_ms
- idle_ms
;
58 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
60 void ovs_flow_stats_update(struct sw_flow
*flow
, __be16 tcp_flags
,
61 const struct sk_buff
*skb
)
63 struct sw_flow_stats
*stats
;
64 unsigned int cpu
= smp_processor_id();
65 int len
= skb
->len
+ (skb_vlan_tag_present(skb
) ? VLAN_HLEN
: 0);
67 stats
= rcu_dereference(flow
->stats
[cpu
]);
69 /* Check if already have CPU-specific stats. */
71 spin_lock(&stats
->lock
);
72 /* Mark if we write on the pre-allocated stats. */
73 if (cpu
== 0 && unlikely(flow
->stats_last_writer
!= cpu
))
74 flow
->stats_last_writer
= cpu
;
76 stats
= rcu_dereference(flow
->stats
[0]); /* Pre-allocated. */
77 spin_lock(&stats
->lock
);
79 /* If the current CPU is the only writer on the
80 * pre-allocated stats keep using them.
82 if (unlikely(flow
->stats_last_writer
!= cpu
)) {
83 /* A previous locker may have already allocated the
84 * stats, so we need to check again. If CPU-specific
85 * stats were already allocated, we update the pre-
86 * allocated stats as we have already locked them.
88 if (likely(flow
->stats_last_writer
!= -1) &&
89 likely(!rcu_access_pointer(flow
->stats
[cpu
]))) {
90 /* Try to allocate CPU-specific stats. */
91 struct sw_flow_stats
*new_stats
;
94 kmem_cache_alloc_node(flow_stats_cache
,
100 if (likely(new_stats
)) {
101 new_stats
->used
= jiffies
;
102 new_stats
->packet_count
= 1;
103 new_stats
->byte_count
= len
;
104 new_stats
->tcp_flags
= tcp_flags
;
105 spin_lock_init(&new_stats
->lock
);
107 rcu_assign_pointer(flow
->stats
[cpu
],
109 cpumask_set_cpu(cpu
, &flow
->cpu_used_mask
);
113 flow
->stats_last_writer
= cpu
;
117 stats
->used
= jiffies
;
118 stats
->packet_count
++;
119 stats
->byte_count
+= len
;
120 stats
->tcp_flags
|= tcp_flags
;
122 spin_unlock(&stats
->lock
);
125 /* Must be called with rcu_read_lock or ovs_mutex. */
126 void ovs_flow_stats_get(const struct sw_flow
*flow
,
127 struct ovs_flow_stats
*ovs_stats
,
128 unsigned long *used
, __be16
*tcp_flags
)
134 memset(ovs_stats
, 0, sizeof(*ovs_stats
));
136 /* We open code this to make sure cpu 0 is always considered */
137 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
= cpumask_next(cpu
, &flow
->cpu_used_mask
)) {
138 struct sw_flow_stats
*stats
= rcu_dereference_ovsl(flow
->stats
[cpu
]);
141 /* Local CPU may write on non-local stats, so we must
142 * block bottom-halves here.
144 spin_lock_bh(&stats
->lock
);
145 if (!*used
|| time_after(stats
->used
, *used
))
147 *tcp_flags
|= stats
->tcp_flags
;
148 ovs_stats
->n_packets
+= stats
->packet_count
;
149 ovs_stats
->n_bytes
+= stats
->byte_count
;
150 spin_unlock_bh(&stats
->lock
);
155 /* Called with ovs_mutex. */
156 void ovs_flow_stats_clear(struct sw_flow
*flow
)
160 /* We open code this to make sure cpu 0 is always considered */
161 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
= cpumask_next(cpu
, &flow
->cpu_used_mask
)) {
162 struct sw_flow_stats
*stats
= ovsl_dereference(flow
->stats
[cpu
]);
165 spin_lock_bh(&stats
->lock
);
167 stats
->packet_count
= 0;
168 stats
->byte_count
= 0;
169 stats
->tcp_flags
= 0;
170 spin_unlock_bh(&stats
->lock
);
175 static int check_header(struct sk_buff
*skb
, int len
)
177 if (unlikely(skb
->len
< len
))
179 if (unlikely(!pskb_may_pull(skb
, len
)))
184 static bool arphdr_ok(struct sk_buff
*skb
)
186 return pskb_may_pull(skb
, skb_network_offset(skb
) +
187 sizeof(struct arp_eth_header
));
190 static int check_iphdr(struct sk_buff
*skb
)
192 unsigned int nh_ofs
= skb_network_offset(skb
);
196 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
200 ip_len
= ip_hdrlen(skb
);
201 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
202 skb
->len
< nh_ofs
+ ip_len
))
205 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
209 static bool tcphdr_ok(struct sk_buff
*skb
)
211 int th_ofs
= skb_transport_offset(skb
);
214 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
217 tcp_len
= tcp_hdrlen(skb
);
218 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
219 skb
->len
< th_ofs
+ tcp_len
))
225 static bool udphdr_ok(struct sk_buff
*skb
)
227 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
228 sizeof(struct udphdr
));
231 static bool sctphdr_ok(struct sk_buff
*skb
)
233 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
234 sizeof(struct sctphdr
));
237 static bool icmphdr_ok(struct sk_buff
*skb
)
239 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
240 sizeof(struct icmphdr
));
243 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
)
245 unsigned short frag_off
;
246 unsigned int payload_ofs
= 0;
247 unsigned int nh_ofs
= skb_network_offset(skb
);
250 int err
, nexthdr
, flags
= 0;
252 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
258 key
->ip
.proto
= NEXTHDR_NONE
;
259 key
->ip
.tos
= ipv6_get_dsfield(nh
);
260 key
->ip
.ttl
= nh
->hop_limit
;
261 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
262 key
->ipv6
.addr
.src
= nh
->saddr
;
263 key
->ipv6
.addr
.dst
= nh
->daddr
;
265 nexthdr
= ipv6_find_hdr(skb
, &payload_ofs
, -1, &frag_off
, &flags
);
266 if (flags
& IP6_FH_F_FRAG
) {
268 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
269 key
->ip
.proto
= nexthdr
;
272 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
274 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
277 /* Delayed handling of error in ipv6_find_hdr() as it
278 * always sets flags and frag_off to a valid value which may be
279 * used to set key->ip.frag above.
281 if (unlikely(nexthdr
< 0))
284 nh_len
= payload_ofs
- nh_ofs
;
285 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
286 key
->ip
.proto
= nexthdr
;
290 static bool icmp6hdr_ok(struct sk_buff
*skb
)
292 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
293 sizeof(struct icmp6hdr
));
297 * parse_vlan_tag - Parse vlan tag from vlan header.
298 * @skb: skb containing frame to parse
299 * @key_vh: pointer to parsed vlan tag
300 * @untag_vlan: should the vlan header be removed from the frame
302 * Return: ERROR on memory error.
303 * %0 if it encounters a non-vlan or incomplete packet.
304 * %1 after successfully parsing vlan tag.
306 static int parse_vlan_tag(struct sk_buff
*skb
, struct vlan_head
*key_vh
,
309 struct vlan_head
*vh
= (struct vlan_head
*)skb
->data
;
311 if (likely(!eth_type_vlan(vh
->tpid
)))
314 if (unlikely(skb
->len
< sizeof(struct vlan_head
) + sizeof(__be16
)))
317 if (unlikely(!pskb_may_pull(skb
, sizeof(struct vlan_head
) +
321 vh
= (struct vlan_head
*)skb
->data
;
322 key_vh
->tci
= vh
->tci
| htons(VLAN_CFI_MASK
);
323 key_vh
->tpid
= vh
->tpid
;
325 if (unlikely(untag_vlan
)) {
326 int offset
= skb
->data
- skb_mac_header(skb
);
330 __skb_push(skb
, offset
);
331 err
= __skb_vlan_pop(skb
, &tci
);
332 __skb_pull(skb
, offset
);
335 __vlan_hwaccel_put_tag(skb
, key_vh
->tpid
, tci
);
337 __skb_pull(skb
, sizeof(struct vlan_head
));
342 static void clear_vlan(struct sw_flow_key
*key
)
344 key
->eth
.vlan
.tci
= 0;
345 key
->eth
.vlan
.tpid
= 0;
346 key
->eth
.cvlan
.tci
= 0;
347 key
->eth
.cvlan
.tpid
= 0;
350 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
354 if (skb_vlan_tag_present(skb
)) {
355 key
->eth
.vlan
.tci
= htons(skb
->vlan_tci
) | htons(VLAN_CFI_MASK
);
356 key
->eth
.vlan
.tpid
= skb
->vlan_proto
;
358 /* Parse outer vlan tag in the non-accelerated case. */
359 res
= parse_vlan_tag(skb
, &key
->eth
.vlan
, true);
364 /* Parse inner vlan tag. */
365 res
= parse_vlan_tag(skb
, &key
->eth
.cvlan
, false);
372 static __be16
parse_ethertype(struct sk_buff
*skb
)
374 struct llc_snap_hdr
{
375 u8 dsap
; /* Always 0xAA */
376 u8 ssap
; /* Always 0xAA */
381 struct llc_snap_hdr
*llc
;
384 proto
= *(__be16
*) skb
->data
;
385 __skb_pull(skb
, sizeof(__be16
));
387 if (eth_proto_is_802_3(proto
))
390 if (skb
->len
< sizeof(struct llc_snap_hdr
))
391 return htons(ETH_P_802_2
);
393 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
396 llc
= (struct llc_snap_hdr
*) skb
->data
;
397 if (llc
->dsap
!= LLC_SAP_SNAP
||
398 llc
->ssap
!= LLC_SAP_SNAP
||
399 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
400 return htons(ETH_P_802_2
);
402 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
404 if (eth_proto_is_802_3(llc
->ethertype
))
405 return llc
->ethertype
;
407 return htons(ETH_P_802_2
);
410 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
413 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
415 /* The ICMPv6 type and code fields use the 16-bit transport port
416 * fields, so we need to store them in 16-bit network byte order.
418 key
->tp
.src
= htons(icmp
->icmp6_type
);
419 key
->tp
.dst
= htons(icmp
->icmp6_code
);
420 memset(&key
->ipv6
.nd
, 0, sizeof(key
->ipv6
.nd
));
422 if (icmp
->icmp6_code
== 0 &&
423 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
424 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
425 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
429 /* In order to process neighbor discovery options, we need the
432 if (unlikely(icmp_len
< sizeof(*nd
)))
435 if (unlikely(skb_linearize(skb
)))
438 nd
= (struct nd_msg
*)skb_transport_header(skb
);
439 key
->ipv6
.nd
.target
= nd
->target
;
441 icmp_len
-= sizeof(*nd
);
443 while (icmp_len
>= 8) {
444 struct nd_opt_hdr
*nd_opt
=
445 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
446 int opt_len
= nd_opt
->nd_opt_len
* 8;
448 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
451 /* Store the link layer address if the appropriate
452 * option is provided. It is considered an error if
453 * the same link layer option is specified twice.
455 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
457 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
459 ether_addr_copy(key
->ipv6
.nd
.sll
,
460 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
461 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
463 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
465 ether_addr_copy(key
->ipv6
.nd
.tll
,
466 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
477 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
478 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
479 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
484 static int parse_nsh(struct sk_buff
*skb
, struct sw_flow_key
*key
)
487 unsigned int nh_ofs
= skb_network_offset(skb
);
491 err
= check_header(skb
, nh_ofs
+ NSH_BASE_HDR_LEN
);
496 version
= nsh_get_ver(nh
);
497 length
= nsh_hdr_len(nh
);
502 err
= check_header(skb
, nh_ofs
+ length
);
507 key
->nsh
.base
.flags
= nsh_get_flags(nh
);
508 key
->nsh
.base
.ttl
= nsh_get_ttl(nh
);
509 key
->nsh
.base
.mdtype
= nh
->mdtype
;
510 key
->nsh
.base
.np
= nh
->np
;
511 key
->nsh
.base
.path_hdr
= nh
->path_hdr
;
512 switch (key
->nsh
.base
.mdtype
) {
514 if (length
!= NSH_M_TYPE1_LEN
)
516 memcpy(key
->nsh
.context
, nh
->md1
.context
,
520 memset(key
->nsh
.context
, 0,
531 * key_extract_l3l4 - extracts L3/L4 header information.
532 * @skb: sk_buff that contains the frame, with skb->data pointing to the
534 * @key: output flow key
536 * Return: %0 if successful, otherwise a negative errno value.
538 static int key_extract_l3l4(struct sk_buff
*skb
, struct sw_flow_key
*key
)
543 if (key
->eth
.type
== htons(ETH_P_IP
)) {
547 error
= check_iphdr(skb
);
548 if (unlikely(error
)) {
549 memset(&key
->ip
, 0, sizeof(key
->ip
));
550 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
551 if (error
== -EINVAL
) {
552 skb
->transport_header
= skb
->network_header
;
559 key
->ipv4
.addr
.src
= nh
->saddr
;
560 key
->ipv4
.addr
.dst
= nh
->daddr
;
562 key
->ip
.proto
= nh
->protocol
;
563 key
->ip
.tos
= nh
->tos
;
564 key
->ip
.ttl
= nh
->ttl
;
566 offset
= nh
->frag_off
& htons(IP_OFFSET
);
568 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
569 memset(&key
->tp
, 0, sizeof(key
->tp
));
572 if (nh
->frag_off
& htons(IP_MF
) ||
573 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
574 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
576 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
578 /* Transport layer. */
579 if (key
->ip
.proto
== IPPROTO_TCP
) {
580 if (tcphdr_ok(skb
)) {
581 struct tcphdr
*tcp
= tcp_hdr(skb
);
582 key
->tp
.src
= tcp
->source
;
583 key
->tp
.dst
= tcp
->dest
;
584 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
586 memset(&key
->tp
, 0, sizeof(key
->tp
));
589 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
590 if (udphdr_ok(skb
)) {
591 struct udphdr
*udp
= udp_hdr(skb
);
592 key
->tp
.src
= udp
->source
;
593 key
->tp
.dst
= udp
->dest
;
595 memset(&key
->tp
, 0, sizeof(key
->tp
));
597 } else if (key
->ip
.proto
== IPPROTO_SCTP
) {
598 if (sctphdr_ok(skb
)) {
599 struct sctphdr
*sctp
= sctp_hdr(skb
);
600 key
->tp
.src
= sctp
->source
;
601 key
->tp
.dst
= sctp
->dest
;
603 memset(&key
->tp
, 0, sizeof(key
->tp
));
605 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
606 if (icmphdr_ok(skb
)) {
607 struct icmphdr
*icmp
= icmp_hdr(skb
);
608 /* The ICMP type and code fields use the 16-bit
609 * transport port fields, so we need to store
610 * them in 16-bit network byte order. */
611 key
->tp
.src
= htons(icmp
->type
);
612 key
->tp
.dst
= htons(icmp
->code
);
614 memset(&key
->tp
, 0, sizeof(key
->tp
));
618 } else if (key
->eth
.type
== htons(ETH_P_ARP
) ||
619 key
->eth
.type
== htons(ETH_P_RARP
)) {
620 struct arp_eth_header
*arp
;
621 bool arp_available
= arphdr_ok(skb
);
623 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
626 arp
->ar_hrd
== htons(ARPHRD_ETHER
) &&
627 arp
->ar_pro
== htons(ETH_P_IP
) &&
628 arp
->ar_hln
== ETH_ALEN
&&
631 /* We only match on the lower 8 bits of the opcode. */
632 if (ntohs(arp
->ar_op
) <= 0xff)
633 key
->ip
.proto
= ntohs(arp
->ar_op
);
637 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
638 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
639 ether_addr_copy(key
->ipv4
.arp
.sha
, arp
->ar_sha
);
640 ether_addr_copy(key
->ipv4
.arp
.tha
, arp
->ar_tha
);
642 memset(&key
->ip
, 0, sizeof(key
->ip
));
643 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
645 } else if (eth_p_mpls(key
->eth
.type
)) {
648 memset(&key
->mpls
, 0, sizeof(key
->mpls
));
649 skb_set_inner_network_header(skb
, skb
->mac_len
);
653 error
= check_header(skb
, skb
->mac_len
+
654 label_count
* MPLS_HLEN
);
658 memcpy(&lse
, skb_inner_network_header(skb
), MPLS_HLEN
);
660 if (label_count
<= MPLS_LABEL_DEPTH
)
661 memcpy(&key
->mpls
.lse
[label_count
- 1], &lse
,
664 skb_set_inner_network_header(skb
, skb
->mac_len
+
665 label_count
* MPLS_HLEN
);
666 if (lse
& htonl(MPLS_LS_S_MASK
))
671 if (label_count
> MPLS_LABEL_DEPTH
)
672 label_count
= MPLS_LABEL_DEPTH
;
674 key
->mpls
.num_labels_mask
= GENMASK(label_count
- 1, 0);
675 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
676 int nh_len
; /* IPv6 Header + Extensions */
678 nh_len
= parse_ipv6hdr(skb
, key
);
679 if (unlikely(nh_len
< 0)) {
682 memset(&key
->ip
, 0, sizeof(key
->ip
));
683 memset(&key
->ipv6
.addr
, 0, sizeof(key
->ipv6
.addr
));
686 skb
->transport_header
= skb
->network_header
;
695 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
) {
696 memset(&key
->tp
, 0, sizeof(key
->tp
));
699 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
700 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
702 /* Transport layer. */
703 if (key
->ip
.proto
== NEXTHDR_TCP
) {
704 if (tcphdr_ok(skb
)) {
705 struct tcphdr
*tcp
= tcp_hdr(skb
);
706 key
->tp
.src
= tcp
->source
;
707 key
->tp
.dst
= tcp
->dest
;
708 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
710 memset(&key
->tp
, 0, sizeof(key
->tp
));
712 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
713 if (udphdr_ok(skb
)) {
714 struct udphdr
*udp
= udp_hdr(skb
);
715 key
->tp
.src
= udp
->source
;
716 key
->tp
.dst
= udp
->dest
;
718 memset(&key
->tp
, 0, sizeof(key
->tp
));
720 } else if (key
->ip
.proto
== NEXTHDR_SCTP
) {
721 if (sctphdr_ok(skb
)) {
722 struct sctphdr
*sctp
= sctp_hdr(skb
);
723 key
->tp
.src
= sctp
->source
;
724 key
->tp
.dst
= sctp
->dest
;
726 memset(&key
->tp
, 0, sizeof(key
->tp
));
728 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
729 if (icmp6hdr_ok(skb
)) {
730 error
= parse_icmpv6(skb
, key
, nh_len
);
734 memset(&key
->tp
, 0, sizeof(key
->tp
));
737 } else if (key
->eth
.type
== htons(ETH_P_NSH
)) {
738 error
= parse_nsh(skb
, key
);
746 * key_extract - extracts a flow key from an Ethernet frame.
747 * @skb: sk_buff that contains the frame, with skb->data pointing to the
749 * @key: output flow key
751 * The caller must ensure that skb->len >= ETH_HLEN.
753 * Initializes @skb header fields as follows:
755 * - skb->mac_header: the L2 header.
757 * - skb->network_header: just past the L2 header, or just past the
758 * VLAN header, to the first byte of the L2 payload.
760 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
761 * on output, then just past the IP header, if one is present and
762 * of a correct length, otherwise the same as skb->network_header.
763 * For other key->eth.type values it is left untouched.
765 * - skb->protocol: the type of the data starting at skb->network_header.
766 * Equals to key->eth.type.
768 * Return: %0 if successful, otherwise a negative errno value.
770 static int key_extract(struct sk_buff
*skb
, struct sw_flow_key
*key
)
774 /* Flags are always used as part of stats */
777 skb_reset_mac_header(skb
);
781 if (ovs_key_mac_proto(key
) == MAC_PROTO_NONE
) {
782 if (unlikely(eth_type_vlan(skb
->protocol
)))
785 skb_reset_network_header(skb
);
786 key
->eth
.type
= skb
->protocol
;
789 ether_addr_copy(key
->eth
.src
, eth
->h_source
);
790 ether_addr_copy(key
->eth
.dst
, eth
->h_dest
);
792 __skb_pull(skb
, 2 * ETH_ALEN
);
793 /* We are going to push all headers that we pull, so no need to
794 * update skb->csum here.
797 if (unlikely(parse_vlan(skb
, key
)))
800 key
->eth
.type
= parse_ethertype(skb
);
801 if (unlikely(key
->eth
.type
== htons(0)))
804 /* Multiple tagged packets need to retain TPID to satisfy
805 * skb_vlan_pop(), which will later shift the ethertype into
808 if (key
->eth
.cvlan
.tci
& htons(VLAN_CFI_MASK
))
809 skb
->protocol
= key
->eth
.cvlan
.tpid
;
811 skb
->protocol
= key
->eth
.type
;
813 skb_reset_network_header(skb
);
814 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
817 skb_reset_mac_len(skb
);
819 /* Fill out L3/L4 key info, if any */
820 return key_extract_l3l4(skb
, key
);
823 /* In the case of conntrack fragment handling it expects L3 headers,
826 int ovs_flow_key_update_l3l4(struct sk_buff
*skb
, struct sw_flow_key
*key
)
828 return key_extract_l3l4(skb
, key
);
831 int ovs_flow_key_update(struct sk_buff
*skb
, struct sw_flow_key
*key
)
835 res
= key_extract(skb
, key
);
837 key
->mac_proto
&= ~SW_FLOW_KEY_INVALID
;
842 static int key_extract_mac_proto(struct sk_buff
*skb
)
844 switch (skb
->dev
->type
) {
846 return MAC_PROTO_ETHERNET
;
848 if (skb
->protocol
== htons(ETH_P_TEB
))
849 return MAC_PROTO_ETHERNET
;
850 return MAC_PROTO_NONE
;
856 int ovs_flow_key_extract(const struct ip_tunnel_info
*tun_info
,
857 struct sk_buff
*skb
, struct sw_flow_key
*key
)
859 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
860 struct tc_skb_ext
*tc_ext
;
862 bool post_ct
= false, post_ct_snat
= false, post_ct_dnat
= false;
866 /* Extract metadata from packet. */
868 key
->tun_proto
= ip_tunnel_info_af(tun_info
);
869 memcpy(&key
->tun_key
, &tun_info
->key
, sizeof(key
->tun_key
));
871 if (tun_info
->options_len
) {
872 BUILD_BUG_ON((1 << (sizeof(tun_info
->options_len
) *
874 > sizeof(key
->tun_opts
));
876 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key
, tun_info
->options_len
),
878 key
->tun_opts_len
= tun_info
->options_len
;
880 key
->tun_opts_len
= 0;
884 key
->tun_opts_len
= 0;
885 memset(&key
->tun_key
, 0, sizeof(key
->tun_key
));
888 key
->phy
.priority
= skb
->priority
;
889 key
->phy
.in_port
= OVS_CB(skb
)->input_vport
->port_no
;
890 key
->phy
.skb_mark
= skb
->mark
;
891 key
->ovs_flow_hash
= 0;
892 res
= key_extract_mac_proto(skb
);
895 key
->mac_proto
= res
;
897 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
898 if (static_branch_unlikely(&tc_recirc_sharing_support
)) {
899 tc_ext
= skb_ext_find(skb
, TC_SKB_EXT
);
900 key
->recirc_id
= tc_ext
? tc_ext
->chain
: 0;
901 OVS_CB(skb
)->mru
= tc_ext
? tc_ext
->mru
: 0;
902 post_ct
= tc_ext
? tc_ext
->post_ct
: false;
903 post_ct_snat
= post_ct
? tc_ext
->post_ct_snat
: false;
904 post_ct_dnat
= post_ct
? tc_ext
->post_ct_dnat
: false;
905 zone
= post_ct
? tc_ext
->zone
: 0;
913 err
= key_extract(skb
, key
);
915 ovs_ct_fill_key(skb
, key
, post_ct
); /* Must be after key_extract(). */
917 if (!skb_get_nfct(skb
)) {
921 key
->ct_state
&= ~OVS_CS_F_DST_NAT
;
923 key
->ct_state
&= ~OVS_CS_F_SRC_NAT
;
930 int ovs_flow_key_extract_userspace(struct net
*net
, const struct nlattr
*attr
,
932 struct sw_flow_key
*key
, bool log
)
934 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
938 err
= parse_flow_nlattrs(attr
, a
, &attrs
, log
);
942 /* Extract metadata from netlink attributes. */
943 err
= ovs_nla_get_flow_metadata(net
, a
, attrs
, key
, log
);
947 /* key_extract assumes that skb->protocol is set-up for
948 * layer 3 packets which is the case for other callers,
949 * in particular packets received from the network stack.
950 * Here the correct value can be set from the metadata
952 * For L2 packet key eth type would be zero. skb protocol
953 * would be set to correct value later during key-extact.
956 skb
->protocol
= key
->eth
.type
;
957 err
= key_extract(skb
, key
);
961 /* Check that we have conntrack original direction tuple metadata only
962 * for packets for which it makes sense. Otherwise the key may be
963 * corrupted due to overlapping key fields.
965 if (attrs
& (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
) &&
966 key
->eth
.type
!= htons(ETH_P_IP
))
968 if (attrs
& (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
) &&
969 (key
->eth
.type
!= htons(ETH_P_IPV6
) ||
970 sw_flow_key_is_nd(key
)))