2 * Copyright (c) 2007-2011 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/icmp.h>
40 #include <linux/icmpv6.h>
41 #include <linux/rculist.h>
44 #include <net/ndisc.h>
48 static struct kmem_cache
*flow_cache
;
50 static int check_header(struct sk_buff
*skb
, int len
)
52 if (unlikely(skb
->len
< len
))
54 if (unlikely(!pskb_may_pull(skb
, len
)))
59 static bool arphdr_ok(struct sk_buff
*skb
)
61 return pskb_may_pull(skb
, skb_network_offset(skb
) +
62 sizeof(struct arp_eth_header
));
65 static int check_iphdr(struct sk_buff
*skb
)
67 unsigned int nh_ofs
= skb_network_offset(skb
);
71 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
75 ip_len
= ip_hdrlen(skb
);
76 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
77 skb
->len
< nh_ofs
+ ip_len
))
80 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
84 static bool tcphdr_ok(struct sk_buff
*skb
)
86 int th_ofs
= skb_transport_offset(skb
);
89 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
92 tcp_len
= tcp_hdrlen(skb
);
93 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
94 skb
->len
< th_ofs
+ tcp_len
))
100 static bool udphdr_ok(struct sk_buff
*skb
)
102 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
103 sizeof(struct udphdr
));
106 static bool icmphdr_ok(struct sk_buff
*skb
)
108 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
109 sizeof(struct icmphdr
));
112 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
114 struct timespec cur_ts
;
117 ktime_get_ts(&cur_ts
);
118 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
119 cur_ms
= (u64
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
120 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
122 return cur_ms
- idle_ms
;
125 #define SW_FLOW_KEY_OFFSET(field) \
126 (offsetof(struct sw_flow_key, field) + \
127 FIELD_SIZEOF(struct sw_flow_key, field))
129 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
,
132 unsigned int nh_ofs
= skb_network_offset(skb
);
140 *key_lenp
= SW_FLOW_KEY_OFFSET(ipv6
.label
);
142 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
147 nexthdr
= nh
->nexthdr
;
148 payload_ofs
= (u8
*)(nh
+ 1) - skb
->data
;
150 key
->ip
.proto
= NEXTHDR_NONE
;
151 key
->ip
.tos
= ipv6_get_dsfield(nh
);
152 key
->ip
.ttl
= nh
->hop_limit
;
153 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
154 key
->ipv6
.addr
.src
= nh
->saddr
;
155 key
->ipv6
.addr
.dst
= nh
->daddr
;
157 payload_ofs
= ipv6_skip_exthdr(skb
, payload_ofs
, &nexthdr
, &frag_off
);
158 if (unlikely(payload_ofs
< 0))
162 if (frag_off
& htons(~0x7))
163 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
165 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
168 nh_len
= payload_ofs
- nh_ofs
;
169 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
170 key
->ip
.proto
= nexthdr
;
174 static bool icmp6hdr_ok(struct sk_buff
*skb
)
176 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
177 sizeof(struct icmp6hdr
));
180 #define TCP_FLAGS_OFFSET 13
181 #define TCP_FLAG_MASK 0x3f
183 void ovs_flow_used(struct sw_flow
*flow
, struct sk_buff
*skb
)
187 if ((flow
->key
.eth
.type
== htons(ETH_P_IP
) ||
188 flow
->key
.eth
.type
== htons(ETH_P_IPV6
)) &&
189 flow
->key
.ip
.proto
== IPPROTO_TCP
&&
190 likely(skb
->len
>= skb_transport_offset(skb
) + sizeof(struct tcphdr
))) {
191 u8
*tcp
= (u8
*)tcp_hdr(skb
);
192 tcp_flags
= *(tcp
+ TCP_FLAGS_OFFSET
) & TCP_FLAG_MASK
;
195 spin_lock(&flow
->lock
);
196 flow
->used
= jiffies
;
197 flow
->packet_count
++;
198 flow
->byte_count
+= skb
->len
;
199 flow
->tcp_flags
|= tcp_flags
;
200 spin_unlock(&flow
->lock
);
203 struct sw_flow_actions
*ovs_flow_actions_alloc(const struct nlattr
*actions
)
205 int actions_len
= nla_len(actions
);
206 struct sw_flow_actions
*sfa
;
208 if (actions_len
> MAX_ACTIONS_BUFSIZE
)
209 return ERR_PTR(-EINVAL
);
211 sfa
= kmalloc(sizeof(*sfa
) + actions_len
, GFP_KERNEL
);
213 return ERR_PTR(-ENOMEM
);
215 sfa
->actions_len
= actions_len
;
216 memcpy(sfa
->actions
, nla_data(actions
), actions_len
);
220 struct sw_flow
*ovs_flow_alloc(void)
222 struct sw_flow
*flow
;
224 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
226 return ERR_PTR(-ENOMEM
);
228 spin_lock_init(&flow
->lock
);
229 atomic_set(&flow
->refcnt
, 1);
230 flow
->sf_acts
= NULL
;
236 static struct hlist_head
*find_bucket(struct flow_table
*table
, u32 hash
)
238 hash
= jhash_1word(hash
, table
->hash_seed
);
239 return flex_array_get(table
->buckets
,
240 (hash
& (table
->n_buckets
- 1)));
243 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
245 struct flex_array
*buckets
;
248 buckets
= flex_array_alloc(sizeof(struct hlist_head
*),
249 n_buckets
, GFP_KERNEL
);
253 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
255 flex_array_free(buckets
);
259 for (i
= 0; i
< n_buckets
; i
++)
260 INIT_HLIST_HEAD((struct hlist_head
*)
261 flex_array_get(buckets
, i
));
266 static void free_buckets(struct flex_array
*buckets
)
268 flex_array_free(buckets
);
271 struct flow_table
*ovs_flow_tbl_alloc(int new_size
)
273 struct flow_table
*table
= kmalloc(sizeof(*table
), GFP_KERNEL
);
278 table
->buckets
= alloc_buckets(new_size
);
280 if (!table
->buckets
) {
284 table
->n_buckets
= new_size
;
287 table
->keep_flows
= false;
288 get_random_bytes(&table
->hash_seed
, sizeof(u32
));
293 static void flow_free(struct sw_flow
*flow
)
299 void ovs_flow_tbl_destroy(struct flow_table
*table
)
306 if (table
->keep_flows
)
309 for (i
= 0; i
< table
->n_buckets
; i
++) {
310 struct sw_flow
*flow
;
311 struct hlist_head
*head
= flex_array_get(table
->buckets
, i
);
312 struct hlist_node
*node
, *n
;
313 int ver
= table
->node_ver
;
315 hlist_for_each_entry_safe(flow
, node
, n
, head
, hash_node
[ver
]) {
316 hlist_del_rcu(&flow
->hash_node
[ver
]);
322 free_buckets(table
->buckets
);
326 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
328 struct flow_table
*table
= container_of(rcu
, struct flow_table
, rcu
);
330 ovs_flow_tbl_destroy(table
);
333 void ovs_flow_tbl_deferred_destroy(struct flow_table
*table
)
338 call_rcu(&table
->rcu
, flow_tbl_destroy_rcu_cb
);
341 struct sw_flow
*ovs_flow_tbl_next(struct flow_table
*table
, u32
*bucket
, u32
*last
)
343 struct sw_flow
*flow
;
344 struct hlist_head
*head
;
345 struct hlist_node
*n
;
349 ver
= table
->node_ver
;
350 while (*bucket
< table
->n_buckets
) {
352 head
= flex_array_get(table
->buckets
, *bucket
);
353 hlist_for_each_entry_rcu(flow
, n
, head
, hash_node
[ver
]) {
368 static void flow_table_copy_flows(struct flow_table
*old
, struct flow_table
*new)
373 old_ver
= old
->node_ver
;
374 new->node_ver
= !old_ver
;
376 /* Insert in new table. */
377 for (i
= 0; i
< old
->n_buckets
; i
++) {
378 struct sw_flow
*flow
;
379 struct hlist_head
*head
;
380 struct hlist_node
*n
;
382 head
= flex_array_get(old
->buckets
, i
);
384 hlist_for_each_entry(flow
, n
, head
, hash_node
[old_ver
])
385 ovs_flow_tbl_insert(new, flow
);
387 old
->keep_flows
= true;
390 static struct flow_table
*__flow_tbl_rehash(struct flow_table
*table
, int n_buckets
)
392 struct flow_table
*new_table
;
394 new_table
= ovs_flow_tbl_alloc(n_buckets
);
396 return ERR_PTR(-ENOMEM
);
398 flow_table_copy_flows(table
, new_table
);
403 struct flow_table
*ovs_flow_tbl_rehash(struct flow_table
*table
)
405 return __flow_tbl_rehash(table
, table
->n_buckets
);
408 struct flow_table
*ovs_flow_tbl_expand(struct flow_table
*table
)
410 return __flow_tbl_rehash(table
, table
->n_buckets
* 2);
413 /* RCU callback used by ovs_flow_deferred_free. */
414 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
416 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
422 /* Schedules 'flow' to be freed after the next RCU grace period.
423 * The caller must hold rcu_read_lock for this to be sensible. */
424 void ovs_flow_deferred_free(struct sw_flow
*flow
)
426 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
429 void ovs_flow_hold(struct sw_flow
*flow
)
431 atomic_inc(&flow
->refcnt
);
434 void ovs_flow_put(struct sw_flow
*flow
)
439 if (atomic_dec_and_test(&flow
->refcnt
)) {
440 kfree((struct sf_flow_acts __force
*)flow
->sf_acts
);
441 kmem_cache_free(flow_cache
, flow
);
445 /* RCU callback used by ovs_flow_deferred_free_acts. */
446 static void rcu_free_acts_callback(struct rcu_head
*rcu
)
448 struct sw_flow_actions
*sf_acts
= container_of(rcu
,
449 struct sw_flow_actions
, rcu
);
453 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
454 * The caller must hold rcu_read_lock for this to be sensible. */
455 void ovs_flow_deferred_free_acts(struct sw_flow_actions
*sf_acts
)
457 call_rcu(&sf_acts
->rcu
, rcu_free_acts_callback
);
460 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
463 __be16 eth_type
; /* ETH_P_8021Q */
466 struct qtag_prefix
*qp
;
468 if (unlikely(skb
->len
< sizeof(struct qtag_prefix
) + sizeof(__be16
)))
471 if (unlikely(!pskb_may_pull(skb
, sizeof(struct qtag_prefix
) +
475 qp
= (struct qtag_prefix
*) skb
->data
;
476 key
->eth
.tci
= qp
->tci
| htons(VLAN_TAG_PRESENT
);
477 __skb_pull(skb
, sizeof(struct qtag_prefix
));
482 static __be16
parse_ethertype(struct sk_buff
*skb
)
484 struct llc_snap_hdr
{
485 u8 dsap
; /* Always 0xAA */
486 u8 ssap
; /* Always 0xAA */
491 struct llc_snap_hdr
*llc
;
494 proto
= *(__be16
*) skb
->data
;
495 __skb_pull(skb
, sizeof(__be16
));
497 if (ntohs(proto
) >= 1536)
500 if (skb
->len
< sizeof(struct llc_snap_hdr
))
501 return htons(ETH_P_802_2
);
503 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
506 llc
= (struct llc_snap_hdr
*) skb
->data
;
507 if (llc
->dsap
!= LLC_SAP_SNAP
||
508 llc
->ssap
!= LLC_SAP_SNAP
||
509 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
510 return htons(ETH_P_802_2
);
512 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
513 return llc
->ethertype
;
516 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
517 int *key_lenp
, int nh_len
)
519 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
523 /* The ICMPv6 type and code fields use the 16-bit transport port
524 * fields, so we need to store them in 16-bit network byte order.
526 key
->ipv6
.tp
.src
= htons(icmp
->icmp6_type
);
527 key
->ipv6
.tp
.dst
= htons(icmp
->icmp6_code
);
528 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
530 if (icmp
->icmp6_code
== 0 &&
531 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
532 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
533 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
537 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.nd
);
539 /* In order to process neighbor discovery options, we need the
542 if (unlikely(icmp_len
< sizeof(*nd
)))
544 if (unlikely(skb_linearize(skb
))) {
549 nd
= (struct nd_msg
*)skb_transport_header(skb
);
550 key
->ipv6
.nd
.target
= nd
->target
;
551 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.nd
);
553 icmp_len
-= sizeof(*nd
);
555 while (icmp_len
>= 8) {
556 struct nd_opt_hdr
*nd_opt
=
557 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
558 int opt_len
= nd_opt
->nd_opt_len
* 8;
560 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
563 /* Store the link layer address if the appropriate
564 * option is provided. It is considered an error if
565 * the same link layer option is specified twice.
567 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
569 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
571 memcpy(key
->ipv6
.nd
.sll
,
572 &nd
->opt
[offset
+sizeof(*nd_opt
)], ETH_ALEN
);
573 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
575 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
577 memcpy(key
->ipv6
.nd
.tll
,
578 &nd
->opt
[offset
+sizeof(*nd_opt
)], ETH_ALEN
);
589 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
590 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
591 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
599 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
600 * @skb: sk_buff that contains the frame, with skb->data pointing to the
602 * @in_port: port number on which @skb was received.
603 * @key: output flow key
604 * @key_lenp: length of output flow key
606 * The caller must ensure that skb->len >= ETH_HLEN.
608 * Returns 0 if successful, otherwise a negative errno value.
610 * Initializes @skb header pointers as follows:
612 * - skb->mac_header: the Ethernet header.
614 * - skb->network_header: just past the Ethernet header, or just past the
615 * VLAN header, to the first byte of the Ethernet payload.
617 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
618 * on output, then just past the IP header, if one is present and
619 * of a correct length, otherwise the same as skb->network_header.
620 * For other key->dl_type values it is left untouched.
622 int ovs_flow_extract(struct sk_buff
*skb
, u16 in_port
, struct sw_flow_key
*key
,
626 int key_len
= SW_FLOW_KEY_OFFSET(eth
);
629 memset(key
, 0, sizeof(*key
));
631 key
->phy
.priority
= skb
->priority
;
632 key
->phy
.tun_id
= OVS_CB(skb
)->tun_id
;
633 key
->phy
.in_port
= in_port
;
635 skb_reset_mac_header(skb
);
637 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
638 * header in the linear data area.
641 memcpy(key
->eth
.src
, eth
->h_source
, ETH_ALEN
);
642 memcpy(key
->eth
.dst
, eth
->h_dest
, ETH_ALEN
);
644 __skb_pull(skb
, 2 * ETH_ALEN
);
646 if (vlan_tx_tag_present(skb
))
647 key
->eth
.tci
= htons(vlan_get_tci(skb
));
648 else if (eth
->h_proto
== htons(ETH_P_8021Q
))
649 if (unlikely(parse_vlan(skb
, key
)))
652 key
->eth
.type
= parse_ethertype(skb
);
653 if (unlikely(key
->eth
.type
== htons(0)))
656 skb_reset_network_header(skb
);
657 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
660 if (key
->eth
.type
== htons(ETH_P_IP
)) {
664 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.addr
);
666 error
= check_iphdr(skb
);
667 if (unlikely(error
)) {
668 if (error
== -EINVAL
) {
669 skb
->transport_header
= skb
->network_header
;
676 key
->ipv4
.addr
.src
= nh
->saddr
;
677 key
->ipv4
.addr
.dst
= nh
->daddr
;
679 key
->ip
.proto
= nh
->protocol
;
680 key
->ip
.tos
= nh
->tos
;
681 key
->ip
.ttl
= nh
->ttl
;
683 offset
= nh
->frag_off
& htons(IP_OFFSET
);
685 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
688 if (nh
->frag_off
& htons(IP_MF
) ||
689 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
690 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
692 /* Transport layer. */
693 if (key
->ip
.proto
== IPPROTO_TCP
) {
694 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
695 if (tcphdr_ok(skb
)) {
696 struct tcphdr
*tcp
= tcp_hdr(skb
);
697 key
->ipv4
.tp
.src
= tcp
->source
;
698 key
->ipv4
.tp
.dst
= tcp
->dest
;
700 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
701 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
702 if (udphdr_ok(skb
)) {
703 struct udphdr
*udp
= udp_hdr(skb
);
704 key
->ipv4
.tp
.src
= udp
->source
;
705 key
->ipv4
.tp
.dst
= udp
->dest
;
707 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
708 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
709 if (icmphdr_ok(skb
)) {
710 struct icmphdr
*icmp
= icmp_hdr(skb
);
711 /* The ICMP type and code fields use the 16-bit
712 * transport port fields, so we need to store
713 * them in 16-bit network byte order. */
714 key
->ipv4
.tp
.src
= htons(icmp
->type
);
715 key
->ipv4
.tp
.dst
= htons(icmp
->code
);
719 } else if (key
->eth
.type
== htons(ETH_P_ARP
) && arphdr_ok(skb
)) {
720 struct arp_eth_header
*arp
;
722 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
724 if (arp
->ar_hrd
== htons(ARPHRD_ETHER
)
725 && arp
->ar_pro
== htons(ETH_P_IP
)
726 && arp
->ar_hln
== ETH_ALEN
727 && arp
->ar_pln
== 4) {
729 /* We only match on the lower 8 bits of the opcode. */
730 if (ntohs(arp
->ar_op
) <= 0xff)
731 key
->ip
.proto
= ntohs(arp
->ar_op
);
733 if (key
->ip
.proto
== ARPOP_REQUEST
734 || key
->ip
.proto
== ARPOP_REPLY
) {
735 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
736 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
737 memcpy(key
->ipv4
.arp
.sha
, arp
->ar_sha
, ETH_ALEN
);
738 memcpy(key
->ipv4
.arp
.tha
, arp
->ar_tha
, ETH_ALEN
);
739 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.arp
);
742 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
743 int nh_len
; /* IPv6 Header + Extensions */
745 nh_len
= parse_ipv6hdr(skb
, key
, &key_len
);
746 if (unlikely(nh_len
< 0)) {
747 if (nh_len
== -EINVAL
)
748 skb
->transport_header
= skb
->network_header
;
754 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
)
756 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
757 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
759 /* Transport layer. */
760 if (key
->ip
.proto
== NEXTHDR_TCP
) {
761 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
762 if (tcphdr_ok(skb
)) {
763 struct tcphdr
*tcp
= tcp_hdr(skb
);
764 key
->ipv6
.tp
.src
= tcp
->source
;
765 key
->ipv6
.tp
.dst
= tcp
->dest
;
767 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
768 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
769 if (udphdr_ok(skb
)) {
770 struct udphdr
*udp
= udp_hdr(skb
);
771 key
->ipv6
.tp
.src
= udp
->source
;
772 key
->ipv6
.tp
.dst
= udp
->dest
;
774 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
775 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
776 if (icmp6hdr_ok(skb
)) {
777 error
= parse_icmpv6(skb
, key
, &key_len
, nh_len
);
789 u32
ovs_flow_hash(const struct sw_flow_key
*key
, int key_len
)
791 return jhash2((u32
*)key
, DIV_ROUND_UP(key_len
, sizeof(u32
)), 0);
794 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*table
,
795 struct sw_flow_key
*key
, int key_len
)
797 struct sw_flow
*flow
;
798 struct hlist_node
*n
;
799 struct hlist_head
*head
;
802 hash
= ovs_flow_hash(key
, key_len
);
804 head
= find_bucket(table
, hash
);
805 hlist_for_each_entry_rcu(flow
, n
, head
, hash_node
[table
->node_ver
]) {
807 if (flow
->hash
== hash
&&
808 !memcmp(&flow
->key
, key
, key_len
)) {
815 void ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
)
817 struct hlist_head
*head
;
819 head
= find_bucket(table
, flow
->hash
);
820 hlist_add_head_rcu(&flow
->hash_node
[table
->node_ver
], head
);
824 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
826 hlist_del_rcu(&flow
->hash_node
[table
->node_ver
]);
828 BUG_ON(table
->count
< 0);
831 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
832 const int ovs_key_lens
[OVS_KEY_ATTR_MAX
+ 1] = {
833 [OVS_KEY_ATTR_ENCAP
] = -1,
834 [OVS_KEY_ATTR_PRIORITY
] = sizeof(u32
),
835 [OVS_KEY_ATTR_IN_PORT
] = sizeof(u32
),
836 [OVS_KEY_ATTR_ETHERNET
] = sizeof(struct ovs_key_ethernet
),
837 [OVS_KEY_ATTR_VLAN
] = sizeof(__be16
),
838 [OVS_KEY_ATTR_ETHERTYPE
] = sizeof(__be16
),
839 [OVS_KEY_ATTR_IPV4
] = sizeof(struct ovs_key_ipv4
),
840 [OVS_KEY_ATTR_IPV6
] = sizeof(struct ovs_key_ipv6
),
841 [OVS_KEY_ATTR_TCP
] = sizeof(struct ovs_key_tcp
),
842 [OVS_KEY_ATTR_UDP
] = sizeof(struct ovs_key_udp
),
843 [OVS_KEY_ATTR_ICMP
] = sizeof(struct ovs_key_icmp
),
844 [OVS_KEY_ATTR_ICMPV6
] = sizeof(struct ovs_key_icmpv6
),
845 [OVS_KEY_ATTR_ARP
] = sizeof(struct ovs_key_arp
),
846 [OVS_KEY_ATTR_ND
] = sizeof(struct ovs_key_nd
),
849 [OVS_KEY_ATTR_TUN_ID
] = sizeof(__be64
),
852 static int ipv4_flow_from_nlattrs(struct sw_flow_key
*swkey
, int *key_len
,
853 const struct nlattr
*a
[], u64
*attrs
)
855 const struct ovs_key_icmp
*icmp_key
;
856 const struct ovs_key_tcp
*tcp_key
;
857 const struct ovs_key_udp
*udp_key
;
859 switch (swkey
->ip
.proto
) {
861 if (!(*attrs
& (1 << OVS_KEY_ATTR_TCP
)))
863 *attrs
&= ~(1 << OVS_KEY_ATTR_TCP
);
865 *key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
866 tcp_key
= nla_data(a
[OVS_KEY_ATTR_TCP
]);
867 swkey
->ipv4
.tp
.src
= tcp_key
->tcp_src
;
868 swkey
->ipv4
.tp
.dst
= tcp_key
->tcp_dst
;
872 if (!(*attrs
& (1 << OVS_KEY_ATTR_UDP
)))
874 *attrs
&= ~(1 << OVS_KEY_ATTR_UDP
);
876 *key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
877 udp_key
= nla_data(a
[OVS_KEY_ATTR_UDP
]);
878 swkey
->ipv4
.tp
.src
= udp_key
->udp_src
;
879 swkey
->ipv4
.tp
.dst
= udp_key
->udp_dst
;
883 if (!(*attrs
& (1 << OVS_KEY_ATTR_ICMP
)))
885 *attrs
&= ~(1 << OVS_KEY_ATTR_ICMP
);
887 *key_len
= SW_FLOW_KEY_OFFSET(ipv4
.tp
);
888 icmp_key
= nla_data(a
[OVS_KEY_ATTR_ICMP
]);
889 swkey
->ipv4
.tp
.src
= htons(icmp_key
->icmp_type
);
890 swkey
->ipv4
.tp
.dst
= htons(icmp_key
->icmp_code
);
897 static int ipv6_flow_from_nlattrs(struct sw_flow_key
*swkey
, int *key_len
,
898 const struct nlattr
*a
[], u64
*attrs
)
900 const struct ovs_key_icmpv6
*icmpv6_key
;
901 const struct ovs_key_tcp
*tcp_key
;
902 const struct ovs_key_udp
*udp_key
;
904 switch (swkey
->ip
.proto
) {
906 if (!(*attrs
& (1 << OVS_KEY_ATTR_TCP
)))
908 *attrs
&= ~(1 << OVS_KEY_ATTR_TCP
);
910 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
911 tcp_key
= nla_data(a
[OVS_KEY_ATTR_TCP
]);
912 swkey
->ipv6
.tp
.src
= tcp_key
->tcp_src
;
913 swkey
->ipv6
.tp
.dst
= tcp_key
->tcp_dst
;
917 if (!(*attrs
& (1 << OVS_KEY_ATTR_UDP
)))
919 *attrs
&= ~(1 << OVS_KEY_ATTR_UDP
);
921 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
922 udp_key
= nla_data(a
[OVS_KEY_ATTR_UDP
]);
923 swkey
->ipv6
.tp
.src
= udp_key
->udp_src
;
924 swkey
->ipv6
.tp
.dst
= udp_key
->udp_dst
;
928 if (!(*attrs
& (1 << OVS_KEY_ATTR_ICMPV6
)))
930 *attrs
&= ~(1 << OVS_KEY_ATTR_ICMPV6
);
932 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.tp
);
933 icmpv6_key
= nla_data(a
[OVS_KEY_ATTR_ICMPV6
]);
934 swkey
->ipv6
.tp
.src
= htons(icmpv6_key
->icmpv6_type
);
935 swkey
->ipv6
.tp
.dst
= htons(icmpv6_key
->icmpv6_code
);
937 if (swkey
->ipv6
.tp
.src
== htons(NDISC_NEIGHBOUR_SOLICITATION
) ||
938 swkey
->ipv6
.tp
.src
== htons(NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
939 const struct ovs_key_nd
*nd_key
;
941 if (!(*attrs
& (1 << OVS_KEY_ATTR_ND
)))
943 *attrs
&= ~(1 << OVS_KEY_ATTR_ND
);
945 *key_len
= SW_FLOW_KEY_OFFSET(ipv6
.nd
);
946 nd_key
= nla_data(a
[OVS_KEY_ATTR_ND
]);
947 memcpy(&swkey
->ipv6
.nd
.target
, nd_key
->nd_target
,
948 sizeof(swkey
->ipv6
.nd
.target
));
949 memcpy(swkey
->ipv6
.nd
.sll
, nd_key
->nd_sll
, ETH_ALEN
);
950 memcpy(swkey
->ipv6
.nd
.tll
, nd_key
->nd_tll
, ETH_ALEN
);
958 static int parse_flow_nlattrs(const struct nlattr
*attr
,
959 const struct nlattr
*a
[], u64
*attrsp
)
961 const struct nlattr
*nla
;
966 nla_for_each_nested(nla
, attr
, rem
) {
967 u16 type
= nla_type(nla
);
970 if (type
> OVS_KEY_ATTR_MAX
|| attrs
& (1ULL << type
))
973 expected_len
= ovs_key_lens
[type
];
974 if (nla_len(nla
) != expected_len
&& expected_len
!= -1)
977 attrs
|= 1ULL << type
;
988 * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
989 * @swkey: receives the extracted flow key.
990 * @key_lenp: number of bytes used in @swkey.
991 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
994 int ovs_flow_from_nlattrs(struct sw_flow_key
*swkey
, int *key_lenp
,
995 const struct nlattr
*attr
)
997 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
998 const struct ovs_key_ethernet
*eth_key
;
1003 memset(swkey
, 0, sizeof(struct sw_flow_key
));
1004 key_len
= SW_FLOW_KEY_OFFSET(eth
);
1006 err
= parse_flow_nlattrs(attr
, a
, &attrs
);
1010 /* Metadata attributes. */
1011 if (attrs
& (1 << OVS_KEY_ATTR_PRIORITY
)) {
1012 swkey
->phy
.priority
= nla_get_u32(a
[OVS_KEY_ATTR_PRIORITY
]);
1013 attrs
&= ~(1 << OVS_KEY_ATTR_PRIORITY
);
1015 if (attrs
& (1 << OVS_KEY_ATTR_IN_PORT
)) {
1016 u32 in_port
= nla_get_u32(a
[OVS_KEY_ATTR_IN_PORT
]);
1017 if (in_port
>= DP_MAX_PORTS
)
1019 swkey
->phy
.in_port
= in_port
;
1020 attrs
&= ~(1 << OVS_KEY_ATTR_IN_PORT
);
1022 swkey
->phy
.in_port
= DP_MAX_PORTS
;
1025 if (attrs
& (1ULL << OVS_KEY_ATTR_TUN_ID
)) {
1026 swkey
->phy
.tun_id
= nla_get_be64(a
[OVS_KEY_ATTR_TUN_ID
]);
1027 attrs
&= ~(1ULL << OVS_KEY_ATTR_TUN_ID
);
1030 /* Data attributes. */
1031 if (!(attrs
& (1 << OVS_KEY_ATTR_ETHERNET
)))
1033 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERNET
);
1035 eth_key
= nla_data(a
[OVS_KEY_ATTR_ETHERNET
]);
1036 memcpy(swkey
->eth
.src
, eth_key
->eth_src
, ETH_ALEN
);
1037 memcpy(swkey
->eth
.dst
, eth_key
->eth_dst
, ETH_ALEN
);
1039 if (attrs
& (1u << OVS_KEY_ATTR_ETHERTYPE
) &&
1040 nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]) == htons(ETH_P_8021Q
)) {
1041 const struct nlattr
*encap
;
1044 if (attrs
!= ((1 << OVS_KEY_ATTR_VLAN
) |
1045 (1 << OVS_KEY_ATTR_ETHERTYPE
) |
1046 (1 << OVS_KEY_ATTR_ENCAP
)))
1049 encap
= a
[OVS_KEY_ATTR_ENCAP
];
1050 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
1051 if (tci
& htons(VLAN_TAG_PRESENT
)) {
1052 swkey
->eth
.tci
= tci
;
1054 err
= parse_flow_nlattrs(encap
, a
, &attrs
);
1058 /* Corner case for truncated 802.1Q header. */
1062 swkey
->eth
.type
= htons(ETH_P_8021Q
);
1063 *key_lenp
= key_len
;
1070 if (attrs
& (1 << OVS_KEY_ATTR_ETHERTYPE
)) {
1071 swkey
->eth
.type
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
1072 if (ntohs(swkey
->eth
.type
) < 1536)
1074 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1076 swkey
->eth
.type
= htons(ETH_P_802_2
);
1079 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1080 const struct ovs_key_ipv4
*ipv4_key
;
1082 if (!(attrs
& (1 << OVS_KEY_ATTR_IPV4
)))
1084 attrs
&= ~(1 << OVS_KEY_ATTR_IPV4
);
1086 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.addr
);
1087 ipv4_key
= nla_data(a
[OVS_KEY_ATTR_IPV4
]);
1088 if (ipv4_key
->ipv4_frag
> OVS_FRAG_TYPE_MAX
)
1090 swkey
->ip
.proto
= ipv4_key
->ipv4_proto
;
1091 swkey
->ip
.tos
= ipv4_key
->ipv4_tos
;
1092 swkey
->ip
.ttl
= ipv4_key
->ipv4_ttl
;
1093 swkey
->ip
.frag
= ipv4_key
->ipv4_frag
;
1094 swkey
->ipv4
.addr
.src
= ipv4_key
->ipv4_src
;
1095 swkey
->ipv4
.addr
.dst
= ipv4_key
->ipv4_dst
;
1097 if (swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1098 err
= ipv4_flow_from_nlattrs(swkey
, &key_len
, a
, &attrs
);
1102 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1103 const struct ovs_key_ipv6
*ipv6_key
;
1105 if (!(attrs
& (1 << OVS_KEY_ATTR_IPV6
)))
1107 attrs
&= ~(1 << OVS_KEY_ATTR_IPV6
);
1109 key_len
= SW_FLOW_KEY_OFFSET(ipv6
.label
);
1110 ipv6_key
= nla_data(a
[OVS_KEY_ATTR_IPV6
]);
1111 if (ipv6_key
->ipv6_frag
> OVS_FRAG_TYPE_MAX
)
1113 swkey
->ipv6
.label
= ipv6_key
->ipv6_label
;
1114 swkey
->ip
.proto
= ipv6_key
->ipv6_proto
;
1115 swkey
->ip
.tos
= ipv6_key
->ipv6_tclass
;
1116 swkey
->ip
.ttl
= ipv6_key
->ipv6_hlimit
;
1117 swkey
->ip
.frag
= ipv6_key
->ipv6_frag
;
1118 memcpy(&swkey
->ipv6
.addr
.src
, ipv6_key
->ipv6_src
,
1119 sizeof(swkey
->ipv6
.addr
.src
));
1120 memcpy(&swkey
->ipv6
.addr
.dst
, ipv6_key
->ipv6_dst
,
1121 sizeof(swkey
->ipv6
.addr
.dst
));
1123 if (swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1124 err
= ipv6_flow_from_nlattrs(swkey
, &key_len
, a
, &attrs
);
1128 } else if (swkey
->eth
.type
== htons(ETH_P_ARP
)) {
1129 const struct ovs_key_arp
*arp_key
;
1131 if (!(attrs
& (1 << OVS_KEY_ATTR_ARP
)))
1133 attrs
&= ~(1 << OVS_KEY_ATTR_ARP
);
1135 key_len
= SW_FLOW_KEY_OFFSET(ipv4
.arp
);
1136 arp_key
= nla_data(a
[OVS_KEY_ATTR_ARP
]);
1137 swkey
->ipv4
.addr
.src
= arp_key
->arp_sip
;
1138 swkey
->ipv4
.addr
.dst
= arp_key
->arp_tip
;
1139 if (arp_key
->arp_op
& htons(0xff00))
1141 swkey
->ip
.proto
= ntohs(arp_key
->arp_op
);
1142 memcpy(swkey
->ipv4
.arp
.sha
, arp_key
->arp_sha
, ETH_ALEN
);
1143 memcpy(swkey
->ipv4
.arp
.tha
, arp_key
->arp_tha
, ETH_ALEN
);
1148 *key_lenp
= key_len
;
1154 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1155 * @in_port: receives the extracted input port.
1156 * @tun_id: receives the extracted tunnel ID.
1157 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1160 * This parses a series of Netlink attributes that form a flow key, which must
1161 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1162 * get the metadata, that is, the parts of the flow key that cannot be
1163 * extracted from the packet itself.
1165 int ovs_flow_metadata_from_nlattrs(u32
*priority
, u16
*in_port
, __be64
*tun_id
,
1166 const struct nlattr
*attr
)
1168 const struct nlattr
*nla
;
1171 *in_port
= DP_MAX_PORTS
;
1175 nla_for_each_nested(nla
, attr
, rem
) {
1176 int type
= nla_type(nla
);
1178 if (type
<= OVS_KEY_ATTR_MAX
&& ovs_key_lens
[type
] > 0) {
1179 if (nla_len(nla
) != ovs_key_lens
[type
])
1183 case OVS_KEY_ATTR_PRIORITY
:
1184 *priority
= nla_get_u32(nla
);
1187 case OVS_KEY_ATTR_TUN_ID
:
1188 *tun_id
= nla_get_be64(nla
);
1191 case OVS_KEY_ATTR_IN_PORT
:
1192 if (nla_get_u32(nla
) >= DP_MAX_PORTS
)
1194 *in_port
= nla_get_u32(nla
);
1204 int ovs_flow_to_nlattrs(const struct sw_flow_key
*swkey
, struct sk_buff
*skb
)
1206 struct ovs_key_ethernet
*eth_key
;
1207 struct nlattr
*nla
, *encap
;
1209 if (swkey
->phy
.priority
&&
1210 nla_put_u32(skb
, OVS_KEY_ATTR_PRIORITY
, swkey
->phy
.priority
))
1211 goto nla_put_failure
;
1213 if (swkey
->phy
.tun_id
!= cpu_to_be64(0) &&
1214 nla_put_be64(skb
, OVS_KEY_ATTR_TUN_ID
, swkey
->phy
.tun_id
))
1215 goto nla_put_failure
;
1217 if (swkey
->phy
.in_port
!= DP_MAX_PORTS
&&
1218 nla_put_u32(skb
, OVS_KEY_ATTR_IN_PORT
, swkey
->phy
.in_port
))
1219 goto nla_put_failure
;
1221 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ETHERNET
, sizeof(*eth_key
));
1223 goto nla_put_failure
;
1224 eth_key
= nla_data(nla
);
1225 memcpy(eth_key
->eth_src
, swkey
->eth
.src
, ETH_ALEN
);
1226 memcpy(eth_key
->eth_dst
, swkey
->eth
.dst
, ETH_ALEN
);
1228 if (swkey
->eth
.tci
|| swkey
->eth
.type
== htons(ETH_P_8021Q
)) {
1229 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, htons(ETH_P_8021Q
)) ||
1230 nla_put_be16(skb
, OVS_KEY_ATTR_VLAN
, swkey
->eth
.tci
))
1231 goto nla_put_failure
;
1232 encap
= nla_nest_start(skb
, OVS_KEY_ATTR_ENCAP
);
1233 if (!swkey
->eth
.tci
)
1239 if (swkey
->eth
.type
== htons(ETH_P_802_2
))
1242 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, swkey
->eth
.type
))
1243 goto nla_put_failure
;
1245 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1246 struct ovs_key_ipv4
*ipv4_key
;
1248 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV4
, sizeof(*ipv4_key
));
1250 goto nla_put_failure
;
1251 ipv4_key
= nla_data(nla
);
1252 ipv4_key
->ipv4_src
= swkey
->ipv4
.addr
.src
;
1253 ipv4_key
->ipv4_dst
= swkey
->ipv4
.addr
.dst
;
1254 ipv4_key
->ipv4_proto
= swkey
->ip
.proto
;
1255 ipv4_key
->ipv4_tos
= swkey
->ip
.tos
;
1256 ipv4_key
->ipv4_ttl
= swkey
->ip
.ttl
;
1257 ipv4_key
->ipv4_frag
= swkey
->ip
.frag
;
1258 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1259 struct ovs_key_ipv6
*ipv6_key
;
1261 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV6
, sizeof(*ipv6_key
));
1263 goto nla_put_failure
;
1264 ipv6_key
= nla_data(nla
);
1265 memcpy(ipv6_key
->ipv6_src
, &swkey
->ipv6
.addr
.src
,
1266 sizeof(ipv6_key
->ipv6_src
));
1267 memcpy(ipv6_key
->ipv6_dst
, &swkey
->ipv6
.addr
.dst
,
1268 sizeof(ipv6_key
->ipv6_dst
));
1269 ipv6_key
->ipv6_label
= swkey
->ipv6
.label
;
1270 ipv6_key
->ipv6_proto
= swkey
->ip
.proto
;
1271 ipv6_key
->ipv6_tclass
= swkey
->ip
.tos
;
1272 ipv6_key
->ipv6_hlimit
= swkey
->ip
.ttl
;
1273 ipv6_key
->ipv6_frag
= swkey
->ip
.frag
;
1274 } else if (swkey
->eth
.type
== htons(ETH_P_ARP
)) {
1275 struct ovs_key_arp
*arp_key
;
1277 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ARP
, sizeof(*arp_key
));
1279 goto nla_put_failure
;
1280 arp_key
= nla_data(nla
);
1281 memset(arp_key
, 0, sizeof(struct ovs_key_arp
));
1282 arp_key
->arp_sip
= swkey
->ipv4
.addr
.src
;
1283 arp_key
->arp_tip
= swkey
->ipv4
.addr
.dst
;
1284 arp_key
->arp_op
= htons(swkey
->ip
.proto
);
1285 memcpy(arp_key
->arp_sha
, swkey
->ipv4
.arp
.sha
, ETH_ALEN
);
1286 memcpy(arp_key
->arp_tha
, swkey
->ipv4
.arp
.tha
, ETH_ALEN
);
1289 if ((swkey
->eth
.type
== htons(ETH_P_IP
) ||
1290 swkey
->eth
.type
== htons(ETH_P_IPV6
)) &&
1291 swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1293 if (swkey
->ip
.proto
== IPPROTO_TCP
) {
1294 struct ovs_key_tcp
*tcp_key
;
1296 nla
= nla_reserve(skb
, OVS_KEY_ATTR_TCP
, sizeof(*tcp_key
));
1298 goto nla_put_failure
;
1299 tcp_key
= nla_data(nla
);
1300 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1301 tcp_key
->tcp_src
= swkey
->ipv4
.tp
.src
;
1302 tcp_key
->tcp_dst
= swkey
->ipv4
.tp
.dst
;
1303 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1304 tcp_key
->tcp_src
= swkey
->ipv6
.tp
.src
;
1305 tcp_key
->tcp_dst
= swkey
->ipv6
.tp
.dst
;
1307 } else if (swkey
->ip
.proto
== IPPROTO_UDP
) {
1308 struct ovs_key_udp
*udp_key
;
1310 nla
= nla_reserve(skb
, OVS_KEY_ATTR_UDP
, sizeof(*udp_key
));
1312 goto nla_put_failure
;
1313 udp_key
= nla_data(nla
);
1314 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1315 udp_key
->udp_src
= swkey
->ipv4
.tp
.src
;
1316 udp_key
->udp_dst
= swkey
->ipv4
.tp
.dst
;
1317 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1318 udp_key
->udp_src
= swkey
->ipv6
.tp
.src
;
1319 udp_key
->udp_dst
= swkey
->ipv6
.tp
.dst
;
1321 } else if (swkey
->eth
.type
== htons(ETH_P_IP
) &&
1322 swkey
->ip
.proto
== IPPROTO_ICMP
) {
1323 struct ovs_key_icmp
*icmp_key
;
1325 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMP
, sizeof(*icmp_key
));
1327 goto nla_put_failure
;
1328 icmp_key
= nla_data(nla
);
1329 icmp_key
->icmp_type
= ntohs(swkey
->ipv4
.tp
.src
);
1330 icmp_key
->icmp_code
= ntohs(swkey
->ipv4
.tp
.dst
);
1331 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
) &&
1332 swkey
->ip
.proto
== IPPROTO_ICMPV6
) {
1333 struct ovs_key_icmpv6
*icmpv6_key
;
1335 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMPV6
,
1336 sizeof(*icmpv6_key
));
1338 goto nla_put_failure
;
1339 icmpv6_key
= nla_data(nla
);
1340 icmpv6_key
->icmpv6_type
= ntohs(swkey
->ipv6
.tp
.src
);
1341 icmpv6_key
->icmpv6_code
= ntohs(swkey
->ipv6
.tp
.dst
);
1343 if (icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
1344 icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
) {
1345 struct ovs_key_nd
*nd_key
;
1347 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ND
, sizeof(*nd_key
));
1349 goto nla_put_failure
;
1350 nd_key
= nla_data(nla
);
1351 memcpy(nd_key
->nd_target
, &swkey
->ipv6
.nd
.target
,
1352 sizeof(nd_key
->nd_target
));
1353 memcpy(nd_key
->nd_sll
, swkey
->ipv6
.nd
.sll
, ETH_ALEN
);
1354 memcpy(nd_key
->nd_tll
, swkey
->ipv6
.nd
.tll
, ETH_ALEN
);
1361 nla_nest_end(skb
, encap
);
1369 /* Initializes the flow module.
1370 * Returns zero if successful or a negative error code. */
1371 int ovs_flow_init(void)
1373 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
), 0,
1375 if (flow_cache
== NULL
)
1381 /* Uninitializes the flow module. */
1382 void ovs_flow_exit(void)
1384 kmem_cache_destroy(flow_cache
);