2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/uaccess.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <net/llc_pdu.h>
25 #include <linux/kernel.h>
26 #include <linux/jhash.h>
27 #include <linux/jiffies.h>
28 #include <linux/llc.h>
29 #include <linux/module.h>
31 #include <linux/rcupdate.h>
32 #include <linux/if_arp.h>
34 #include <linux/ipv6.h>
35 #include <linux/mpls.h>
36 #include <linux/sctp.h>
37 #include <linux/smp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
46 #include <net/ndisc.h>
50 #include "flow_netlink.h"
54 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
56 struct timespec cur_ts
;
59 ktime_get_ts(&cur_ts
);
60 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
61 cur_ms
= (u64
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
62 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
64 return cur_ms
- idle_ms
;
67 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
69 void ovs_flow_stats_update(struct sw_flow
*flow
, __be16 tcp_flags
,
70 const struct sk_buff
*skb
)
72 struct flow_stats
*stats
;
73 int node
= numa_node_id();
74 int len
= skb
->len
+ (skb_vlan_tag_present(skb
) ? VLAN_HLEN
: 0);
76 stats
= rcu_dereference(flow
->stats
[node
]);
78 /* Check if already have node-specific stats. */
80 spin_lock(&stats
->lock
);
81 /* Mark if we write on the pre-allocated stats. */
82 if (node
== 0 && unlikely(flow
->stats_last_writer
!= node
))
83 flow
->stats_last_writer
= node
;
85 stats
= rcu_dereference(flow
->stats
[0]); /* Pre-allocated. */
86 spin_lock(&stats
->lock
);
88 /* If the current NUMA-node is the only writer on the
89 * pre-allocated stats keep using them.
91 if (unlikely(flow
->stats_last_writer
!= node
)) {
92 /* A previous locker may have already allocated the
93 * stats, so we need to check again. If node-specific
94 * stats were already allocated, we update the pre-
95 * allocated stats as we have already locked them.
97 if (likely(flow
->stats_last_writer
!= NUMA_NO_NODE
)
98 && likely(!rcu_access_pointer(flow
->stats
[node
]))) {
99 /* Try to allocate node-specific stats. */
100 struct flow_stats
*new_stats
;
103 kmem_cache_alloc_node(flow_stats_cache
,
109 if (likely(new_stats
)) {
110 new_stats
->used
= jiffies
;
111 new_stats
->packet_count
= 1;
112 new_stats
->byte_count
= len
;
113 new_stats
->tcp_flags
= tcp_flags
;
114 spin_lock_init(&new_stats
->lock
);
116 rcu_assign_pointer(flow
->stats
[node
],
121 flow
->stats_last_writer
= node
;
125 stats
->used
= jiffies
;
126 stats
->packet_count
++;
127 stats
->byte_count
+= len
;
128 stats
->tcp_flags
|= tcp_flags
;
130 spin_unlock(&stats
->lock
);
133 /* Must be called with rcu_read_lock or ovs_mutex. */
134 void ovs_flow_stats_get(const struct sw_flow
*flow
,
135 struct ovs_flow_stats
*ovs_stats
,
136 unsigned long *used
, __be16
*tcp_flags
)
142 memset(ovs_stats
, 0, sizeof(*ovs_stats
));
144 for_each_node(node
) {
145 struct flow_stats
*stats
= rcu_dereference_ovsl(flow
->stats
[node
]);
148 /* Local CPU may write on non-local stats, so we must
149 * block bottom-halves here.
151 spin_lock_bh(&stats
->lock
);
152 if (!*used
|| time_after(stats
->used
, *used
))
154 *tcp_flags
|= stats
->tcp_flags
;
155 ovs_stats
->n_packets
+= stats
->packet_count
;
156 ovs_stats
->n_bytes
+= stats
->byte_count
;
157 spin_unlock_bh(&stats
->lock
);
162 /* Called with ovs_mutex. */
163 void ovs_flow_stats_clear(struct sw_flow
*flow
)
167 for_each_node(node
) {
168 struct flow_stats
*stats
= ovsl_dereference(flow
->stats
[node
]);
171 spin_lock_bh(&stats
->lock
);
173 stats
->packet_count
= 0;
174 stats
->byte_count
= 0;
175 stats
->tcp_flags
= 0;
176 spin_unlock_bh(&stats
->lock
);
181 static int check_header(struct sk_buff
*skb
, int len
)
183 if (unlikely(skb
->len
< len
))
185 if (unlikely(!pskb_may_pull(skb
, len
)))
190 static bool arphdr_ok(struct sk_buff
*skb
)
192 return pskb_may_pull(skb
, skb_network_offset(skb
) +
193 sizeof(struct arp_eth_header
));
196 static int check_iphdr(struct sk_buff
*skb
)
198 unsigned int nh_ofs
= skb_network_offset(skb
);
202 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
206 ip_len
= ip_hdrlen(skb
);
207 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
208 skb
->len
< nh_ofs
+ ip_len
))
211 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
215 static bool tcphdr_ok(struct sk_buff
*skb
)
217 int th_ofs
= skb_transport_offset(skb
);
220 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
223 tcp_len
= tcp_hdrlen(skb
);
224 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
225 skb
->len
< th_ofs
+ tcp_len
))
231 static bool udphdr_ok(struct sk_buff
*skb
)
233 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
234 sizeof(struct udphdr
));
237 static bool sctphdr_ok(struct sk_buff
*skb
)
239 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
240 sizeof(struct sctphdr
));
243 static bool icmphdr_ok(struct sk_buff
*skb
)
245 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
246 sizeof(struct icmphdr
));
249 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
)
251 unsigned int nh_ofs
= skb_network_offset(skb
);
259 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
264 nexthdr
= nh
->nexthdr
;
265 payload_ofs
= (u8
*)(nh
+ 1) - skb
->data
;
267 key
->ip
.proto
= NEXTHDR_NONE
;
268 key
->ip
.tos
= ipv6_get_dsfield(nh
);
269 key
->ip
.ttl
= nh
->hop_limit
;
270 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
271 key
->ipv6
.addr
.src
= nh
->saddr
;
272 key
->ipv6
.addr
.dst
= nh
->daddr
;
274 payload_ofs
= ipv6_skip_exthdr(skb
, payload_ofs
, &nexthdr
, &frag_off
);
275 if (unlikely(payload_ofs
< 0))
279 if (frag_off
& htons(~0x7))
280 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
282 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
284 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
287 nh_len
= payload_ofs
- nh_ofs
;
288 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
289 key
->ip
.proto
= nexthdr
;
293 static bool icmp6hdr_ok(struct sk_buff
*skb
)
295 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
296 sizeof(struct icmp6hdr
));
299 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
302 __be16 eth_type
; /* ETH_P_8021Q */
305 struct qtag_prefix
*qp
;
307 if (unlikely(skb
->len
< sizeof(struct qtag_prefix
) + sizeof(__be16
)))
310 if (unlikely(!pskb_may_pull(skb
, sizeof(struct qtag_prefix
) +
314 qp
= (struct qtag_prefix
*) skb
->data
;
315 key
->eth
.tci
= qp
->tci
| htons(VLAN_TAG_PRESENT
);
316 __skb_pull(skb
, sizeof(struct qtag_prefix
));
321 static __be16
parse_ethertype(struct sk_buff
*skb
)
323 struct llc_snap_hdr
{
324 u8 dsap
; /* Always 0xAA */
325 u8 ssap
; /* Always 0xAA */
330 struct llc_snap_hdr
*llc
;
333 proto
= *(__be16
*) skb
->data
;
334 __skb_pull(skb
, sizeof(__be16
));
336 if (eth_proto_is_802_3(proto
))
339 if (skb
->len
< sizeof(struct llc_snap_hdr
))
340 return htons(ETH_P_802_2
);
342 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
345 llc
= (struct llc_snap_hdr
*) skb
->data
;
346 if (llc
->dsap
!= LLC_SAP_SNAP
||
347 llc
->ssap
!= LLC_SAP_SNAP
||
348 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
349 return htons(ETH_P_802_2
);
351 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
353 if (eth_proto_is_802_3(llc
->ethertype
))
354 return llc
->ethertype
;
356 return htons(ETH_P_802_2
);
359 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
362 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
364 /* The ICMPv6 type and code fields use the 16-bit transport port
365 * fields, so we need to store them in 16-bit network byte order.
367 key
->tp
.src
= htons(icmp
->icmp6_type
);
368 key
->tp
.dst
= htons(icmp
->icmp6_code
);
369 memset(&key
->ipv6
.nd
, 0, sizeof(key
->ipv6
.nd
));
371 if (icmp
->icmp6_code
== 0 &&
372 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
373 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
374 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
378 /* In order to process neighbor discovery options, we need the
381 if (unlikely(icmp_len
< sizeof(*nd
)))
384 if (unlikely(skb_linearize(skb
)))
387 nd
= (struct nd_msg
*)skb_transport_header(skb
);
388 key
->ipv6
.nd
.target
= nd
->target
;
390 icmp_len
-= sizeof(*nd
);
392 while (icmp_len
>= 8) {
393 struct nd_opt_hdr
*nd_opt
=
394 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
395 int opt_len
= nd_opt
->nd_opt_len
* 8;
397 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
400 /* Store the link layer address if the appropriate
401 * option is provided. It is considered an error if
402 * the same link layer option is specified twice.
404 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
406 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
408 ether_addr_copy(key
->ipv6
.nd
.sll
,
409 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
410 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
412 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
414 ether_addr_copy(key
->ipv6
.nd
.tll
,
415 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
426 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
427 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
428 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
434 * key_extract - extracts a flow key from an Ethernet frame.
435 * @skb: sk_buff that contains the frame, with skb->data pointing to the
437 * @key: output flow key
439 * The caller must ensure that skb->len >= ETH_HLEN.
441 * Returns 0 if successful, otherwise a negative errno value.
443 * Initializes @skb header pointers as follows:
445 * - skb->mac_header: the Ethernet header.
447 * - skb->network_header: just past the Ethernet header, or just past the
448 * VLAN header, to the first byte of the Ethernet payload.
450 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
451 * on output, then just past the IP header, if one is present and
452 * of a correct length, otherwise the same as skb->network_header.
453 * For other key->eth.type values it is left untouched.
455 static int key_extract(struct sk_buff
*skb
, struct sw_flow_key
*key
)
460 /* Flags are always used as part of stats */
463 skb_reset_mac_header(skb
);
465 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
466 * header in the linear data area.
469 ether_addr_copy(key
->eth
.src
, eth
->h_source
);
470 ether_addr_copy(key
->eth
.dst
, eth
->h_dest
);
472 __skb_pull(skb
, 2 * ETH_ALEN
);
473 /* We are going to push all headers that we pull, so no need to
474 * update skb->csum here.
478 if (skb_vlan_tag_present(skb
))
479 key
->eth
.tci
= htons(vlan_get_tci(skb
));
480 else if (eth
->h_proto
== htons(ETH_P_8021Q
))
481 if (unlikely(parse_vlan(skb
, key
)))
484 key
->eth
.type
= parse_ethertype(skb
);
485 if (unlikely(key
->eth
.type
== htons(0)))
488 skb_reset_network_header(skb
);
489 skb_reset_mac_len(skb
);
490 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
493 if (key
->eth
.type
== htons(ETH_P_IP
)) {
497 error
= check_iphdr(skb
);
498 if (unlikely(error
)) {
499 memset(&key
->ip
, 0, sizeof(key
->ip
));
500 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
501 if (error
== -EINVAL
) {
502 skb
->transport_header
= skb
->network_header
;
509 key
->ipv4
.addr
.src
= nh
->saddr
;
510 key
->ipv4
.addr
.dst
= nh
->daddr
;
512 key
->ip
.proto
= nh
->protocol
;
513 key
->ip
.tos
= nh
->tos
;
514 key
->ip
.ttl
= nh
->ttl
;
516 offset
= nh
->frag_off
& htons(IP_OFFSET
);
518 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
521 if (nh
->frag_off
& htons(IP_MF
) ||
522 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
523 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
525 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
527 /* Transport layer. */
528 if (key
->ip
.proto
== IPPROTO_TCP
) {
529 if (tcphdr_ok(skb
)) {
530 struct tcphdr
*tcp
= tcp_hdr(skb
);
531 key
->tp
.src
= tcp
->source
;
532 key
->tp
.dst
= tcp
->dest
;
533 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
535 memset(&key
->tp
, 0, sizeof(key
->tp
));
538 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
539 if (udphdr_ok(skb
)) {
540 struct udphdr
*udp
= udp_hdr(skb
);
541 key
->tp
.src
= udp
->source
;
542 key
->tp
.dst
= udp
->dest
;
544 memset(&key
->tp
, 0, sizeof(key
->tp
));
546 } else if (key
->ip
.proto
== IPPROTO_SCTP
) {
547 if (sctphdr_ok(skb
)) {
548 struct sctphdr
*sctp
= sctp_hdr(skb
);
549 key
->tp
.src
= sctp
->source
;
550 key
->tp
.dst
= sctp
->dest
;
552 memset(&key
->tp
, 0, sizeof(key
->tp
));
554 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
555 if (icmphdr_ok(skb
)) {
556 struct icmphdr
*icmp
= icmp_hdr(skb
);
557 /* The ICMP type and code fields use the 16-bit
558 * transport port fields, so we need to store
559 * them in 16-bit network byte order.
561 key
->tp
.src
= htons(icmp
->type
);
562 key
->tp
.dst
= htons(icmp
->code
);
564 memset(&key
->tp
, 0, sizeof(key
->tp
));
568 } else if (key
->eth
.type
== htons(ETH_P_ARP
) ||
569 key
->eth
.type
== htons(ETH_P_RARP
)) {
570 struct arp_eth_header
*arp
;
571 bool arp_available
= arphdr_ok(skb
);
573 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
576 arp
->ar_hrd
== htons(ARPHRD_ETHER
) &&
577 arp
->ar_pro
== htons(ETH_P_IP
) &&
578 arp
->ar_hln
== ETH_ALEN
&&
581 /* We only match on the lower 8 bits of the opcode. */
582 if (ntohs(arp
->ar_op
) <= 0xff)
583 key
->ip
.proto
= ntohs(arp
->ar_op
);
587 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
588 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
589 ether_addr_copy(key
->ipv4
.arp
.sha
, arp
->ar_sha
);
590 ether_addr_copy(key
->ipv4
.arp
.tha
, arp
->ar_tha
);
592 memset(&key
->ip
, 0, sizeof(key
->ip
));
593 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
595 } else if (eth_p_mpls(key
->eth
.type
)) {
596 size_t stack_len
= MPLS_HLEN
;
598 /* In the presence of an MPLS label stack the end of the L2
599 * header and the beginning of the L3 header differ.
601 * Advance network_header to the beginning of the L3
602 * header. mac_len corresponds to the end of the L2 header.
607 error
= check_header(skb
, skb
->mac_len
+ stack_len
);
611 memcpy(&lse
, skb_network_header(skb
), MPLS_HLEN
);
613 if (stack_len
== MPLS_HLEN
)
614 memcpy(&key
->mpls
.top_lse
, &lse
, MPLS_HLEN
);
616 skb_set_network_header(skb
, skb
->mac_len
+ stack_len
);
617 if (lse
& htonl(MPLS_LS_S_MASK
))
620 stack_len
+= MPLS_HLEN
;
622 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
623 int nh_len
; /* IPv6 Header + Extensions */
625 nh_len
= parse_ipv6hdr(skb
, key
);
626 if (unlikely(nh_len
< 0)) {
627 memset(&key
->ip
, 0, sizeof(key
->ip
));
628 memset(&key
->ipv6
.addr
, 0, sizeof(key
->ipv6
.addr
));
629 if (nh_len
== -EINVAL
) {
630 skb
->transport_header
= skb
->network_header
;
638 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
)
640 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
641 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
643 /* Transport layer. */
644 if (key
->ip
.proto
== NEXTHDR_TCP
) {
645 if (tcphdr_ok(skb
)) {
646 struct tcphdr
*tcp
= tcp_hdr(skb
);
647 key
->tp
.src
= tcp
->source
;
648 key
->tp
.dst
= tcp
->dest
;
649 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
651 memset(&key
->tp
, 0, sizeof(key
->tp
));
653 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
654 if (udphdr_ok(skb
)) {
655 struct udphdr
*udp
= udp_hdr(skb
);
656 key
->tp
.src
= udp
->source
;
657 key
->tp
.dst
= udp
->dest
;
659 memset(&key
->tp
, 0, sizeof(key
->tp
));
661 } else if (key
->ip
.proto
== NEXTHDR_SCTP
) {
662 if (sctphdr_ok(skb
)) {
663 struct sctphdr
*sctp
= sctp_hdr(skb
);
664 key
->tp
.src
= sctp
->source
;
665 key
->tp
.dst
= sctp
->dest
;
667 memset(&key
->tp
, 0, sizeof(key
->tp
));
669 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
670 if (icmp6hdr_ok(skb
)) {
671 error
= parse_icmpv6(skb
, key
, nh_len
);
675 memset(&key
->tp
, 0, sizeof(key
->tp
));
682 int ovs_flow_key_update(struct sk_buff
*skb
, struct sw_flow_key
*key
)
684 return key_extract(skb
, key
);
687 int ovs_flow_key_extract(const struct ovs_tunnel_info
*tun_info
,
688 struct sk_buff
*skb
, struct sw_flow_key
*key
)
690 /* Extract metadata from packet. */
692 memcpy(&key
->tun_key
, &tun_info
->tunnel
, sizeof(key
->tun_key
));
694 BUILD_BUG_ON(((1 << (sizeof(tun_info
->options_len
) * 8)) - 1) >
695 sizeof(key
->tun_opts
));
697 if (tun_info
->options
) {
698 memcpy(TUN_METADATA_OPTS(key
, tun_info
->options_len
),
699 tun_info
->options
, tun_info
->options_len
);
700 key
->tun_opts_len
= tun_info
->options_len
;
702 key
->tun_opts_len
= 0;
705 key
->tun_opts_len
= 0;
706 memset(&key
->tun_key
, 0, sizeof(key
->tun_key
));
709 key
->phy
.priority
= skb
->priority
;
710 key
->phy
.in_port
= OVS_CB(skb
)->input_vport
->port_no
;
711 key
->phy
.skb_mark
= skb
->mark
;
712 key
->ovs_flow_hash
= 0;
715 return key_extract(skb
, key
);
718 int ovs_flow_key_extract_userspace(const struct nlattr
*attr
,
720 struct sw_flow_key
*key
, bool log
)
724 /* Extract metadata from netlink attributes. */
725 err
= ovs_nla_get_flow_metadata(attr
, key
, log
);
729 return key_extract(skb
, key
);