2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/in6.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_vlan.h>
32 #include <net/checksum.h>
33 #include <net/dsfield.h>
40 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
41 const struct nlattr
*attr
, int len
, bool keep_skb
);
43 static int make_writable(struct sk_buff
*skb
, int write_len
)
45 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
48 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
51 /* remove VLAN header from packet and update csum accordingly. */
52 static int __pop_vlan_tci(struct sk_buff
*skb
, __be16
*current_tci
)
54 struct vlan_hdr
*vhdr
;
57 err
= make_writable(skb
, VLAN_ETH_HLEN
);
61 if (get_ip_summed(skb
) == OVS_CSUM_COMPLETE
)
62 skb
->csum
= csum_sub(skb
->csum
, csum_partial(skb
->data
63 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
65 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
66 *current_tci
= vhdr
->h_vlan_TCI
;
68 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 2 * ETH_ALEN
);
69 __skb_pull(skb
, VLAN_HLEN
);
71 vlan_set_encap_proto(skb
, vhdr
);
72 skb
->mac_header
+= VLAN_HLEN
;
73 skb_reset_mac_len(skb
);
78 static int pop_vlan(struct sk_buff
*skb
)
83 if (likely(vlan_tx_tag_present(skb
))) {
86 if (unlikely(skb
->protocol
!= htons(ETH_P_8021Q
) ||
87 skb
->len
< VLAN_ETH_HLEN
))
90 err
= __pop_vlan_tci(skb
, &tci
);
94 /* move next vlan tag to hw accel tag */
95 if (likely(skb
->protocol
!= htons(ETH_P_8021Q
) ||
96 skb
->len
< VLAN_ETH_HLEN
))
99 err
= __pop_vlan_tci(skb
, &tci
);
103 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(tci
));
107 static int push_vlan(struct sk_buff
*skb
, const struct ovs_action_push_vlan
*vlan
)
109 if (unlikely(vlan_tx_tag_present(skb
))) {
112 /* push down current VLAN tag */
113 current_tag
= vlan_tx_tag_get(skb
);
115 if (!__vlan_put_tag(skb
, skb
->vlan_proto
, current_tag
))
118 if (get_ip_summed(skb
) == OVS_CSUM_COMPLETE
)
119 skb
->csum
= csum_add(skb
->csum
, csum_partial(skb
->data
120 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
123 __vlan_hwaccel_put_tag(skb
, vlan
->vlan_tpid
, ntohs(vlan
->vlan_tci
) & ~VLAN_TAG_PRESENT
);
127 static int set_eth_addr(struct sk_buff
*skb
,
128 const struct ovs_key_ethernet
*eth_key
)
131 err
= make_writable(skb
, ETH_HLEN
);
135 if (get_ip_summed(skb
) == OVS_CSUM_COMPLETE
)
136 skb
->csum
= csum_sub(skb
->csum
, csum_partial(eth_hdr(skb
),
139 memcpy(eth_hdr(skb
)->h_source
, eth_key
->eth_src
, ETH_ALEN
);
140 memcpy(eth_hdr(skb
)->h_dest
, eth_key
->eth_dst
, ETH_ALEN
);
142 if (get_ip_summed(skb
) == OVS_CSUM_COMPLETE
)
143 skb
->csum
= csum_add(skb
->csum
, csum_partial(eth_hdr(skb
),
149 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
150 __be32
*addr
, __be32 new_addr
)
152 int transport_len
= skb
->len
- skb_transport_offset(skb
);
154 if (nh
->protocol
== IPPROTO_TCP
) {
155 if (likely(transport_len
>= sizeof(struct tcphdr
)))
156 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
158 } else if (nh
->protocol
== IPPROTO_UDP
) {
159 if (likely(transport_len
>= sizeof(struct udphdr
))) {
160 struct udphdr
*uh
= udp_hdr(skb
);
163 get_ip_summed(skb
) == OVS_CSUM_PARTIAL
) {
164 inet_proto_csum_replace4(&uh
->check
, skb
,
167 uh
->check
= CSUM_MANGLED_0
;
172 csum_replace4(&nh
->check
, *addr
, new_addr
);
173 skb_clear_rxhash(skb
);
177 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
178 __be32 addr
[4], const __be32 new_addr
[4])
180 int transport_len
= skb
->len
- skb_transport_offset(skb
);
182 if (l4_proto
== IPPROTO_TCP
) {
183 if (likely(transport_len
>= sizeof(struct tcphdr
)))
184 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
186 } else if (l4_proto
== IPPROTO_UDP
) {
187 if (likely(transport_len
>= sizeof(struct udphdr
))) {
188 struct udphdr
*uh
= udp_hdr(skb
);
191 get_ip_summed(skb
) == OVS_CSUM_PARTIAL
) {
192 inet_proto_csum_replace16(&uh
->check
, skb
,
195 uh
->check
= CSUM_MANGLED_0
;
201 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
202 __be32 addr
[4], const __be32 new_addr
[4],
203 bool recalculate_csum
)
205 if (recalculate_csum
)
206 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
208 skb_clear_rxhash(skb
);
209 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
212 static void set_ipv6_tc(struct ipv6hdr
*nh
, u8 tc
)
214 nh
->priority
= tc
>> 4;
215 nh
->flow_lbl
[0] = (nh
->flow_lbl
[0] & 0x0F) | ((tc
& 0x0F) << 4);
218 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
)
220 nh
->flow_lbl
[0] = (nh
->flow_lbl
[0] & 0xF0) | (fl
& 0x000F0000) >> 16;
221 nh
->flow_lbl
[1] = (fl
& 0x0000FF00) >> 8;
222 nh
->flow_lbl
[2] = fl
& 0x000000FF;
225 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
)
227 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
231 static int set_ipv4(struct sk_buff
*skb
, const struct ovs_key_ipv4
*ipv4_key
)
236 err
= make_writable(skb
, skb_network_offset(skb
) +
237 sizeof(struct iphdr
));
243 if (ipv4_key
->ipv4_src
!= nh
->saddr
)
244 set_ip_addr(skb
, nh
, &nh
->saddr
, ipv4_key
->ipv4_src
);
246 if (ipv4_key
->ipv4_dst
!= nh
->daddr
)
247 set_ip_addr(skb
, nh
, &nh
->daddr
, ipv4_key
->ipv4_dst
);
249 if (ipv4_key
->ipv4_tos
!= nh
->tos
)
250 ipv4_change_dsfield(nh
, 0, ipv4_key
->ipv4_tos
);
252 if (ipv4_key
->ipv4_ttl
!= nh
->ttl
)
253 set_ip_ttl(skb
, nh
, ipv4_key
->ipv4_ttl
);
258 static int set_ipv6(struct sk_buff
*skb
, const struct ovs_key_ipv6
*ipv6_key
)
265 err
= make_writable(skb
, skb_network_offset(skb
) +
266 sizeof(struct ipv6hdr
));
271 saddr
= (__be32
*)&nh
->saddr
;
272 daddr
= (__be32
*)&nh
->daddr
;
274 if (memcmp(ipv6_key
->ipv6_src
, saddr
, sizeof(ipv6_key
->ipv6_src
)))
275 set_ipv6_addr(skb
, ipv6_key
->ipv6_proto
, saddr
,
276 ipv6_key
->ipv6_src
, true);
278 if (memcmp(ipv6_key
->ipv6_dst
, daddr
, sizeof(ipv6_key
->ipv6_dst
))) {
279 unsigned int offset
= 0;
280 int flags
= OVS_IP6T_FH_F_SKIP_RH
;
281 bool recalc_csum
= true;
283 if (ipv6_ext_hdr(nh
->nexthdr
))
284 recalc_csum
= ipv6_find_hdr(skb
, &offset
,
285 NEXTHDR_ROUTING
, NULL
,
286 &flags
) != NEXTHDR_ROUTING
;
288 set_ipv6_addr(skb
, ipv6_key
->ipv6_proto
, daddr
,
289 ipv6_key
->ipv6_dst
, recalc_csum
);
292 set_ipv6_tc(nh
, ipv6_key
->ipv6_tclass
);
293 set_ipv6_fl(nh
, ntohl(ipv6_key
->ipv6_label
));
294 nh
->hop_limit
= ipv6_key
->ipv6_hlimit
;
299 /* Must follow make_writable() since that can move the skb data. */
300 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
301 __be16 new_port
, __sum16
*check
)
303 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, 0);
305 skb_clear_rxhash(skb
);
308 static void set_udp_port(struct sk_buff
*skb
, __be16
*port
, __be16 new_port
)
310 struct udphdr
*uh
= udp_hdr(skb
);
312 if (uh
->check
&& get_ip_summed(skb
) != OVS_CSUM_PARTIAL
) {
313 set_tp_port(skb
, port
, new_port
, &uh
->check
);
316 uh
->check
= CSUM_MANGLED_0
;
319 skb_clear_rxhash(skb
);
323 static int set_udp(struct sk_buff
*skb
, const struct ovs_key_udp
*udp_port_key
)
328 err
= make_writable(skb
, skb_transport_offset(skb
) +
329 sizeof(struct udphdr
));
334 if (udp_port_key
->udp_src
!= uh
->source
)
335 set_udp_port(skb
, &uh
->source
, udp_port_key
->udp_src
);
337 if (udp_port_key
->udp_dst
!= uh
->dest
)
338 set_udp_port(skb
, &uh
->dest
, udp_port_key
->udp_dst
);
343 static int set_tcp(struct sk_buff
*skb
, const struct ovs_key_tcp
*tcp_port_key
)
348 err
= make_writable(skb
, skb_transport_offset(skb
) +
349 sizeof(struct tcphdr
));
354 if (tcp_port_key
->tcp_src
!= th
->source
)
355 set_tp_port(skb
, &th
->source
, tcp_port_key
->tcp_src
, &th
->check
);
357 if (tcp_port_key
->tcp_dst
!= th
->dest
)
358 set_tp_port(skb
, &th
->dest
, tcp_port_key
->tcp_dst
, &th
->check
);
363 static int do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
)
370 vport
= ovs_vport_rcu(dp
, out_port
);
371 if (unlikely(!vport
)) {
376 ovs_vport_send(vport
, skb
);
380 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
381 const struct nlattr
*attr
)
383 struct dp_upcall_info upcall
;
384 const struct nlattr
*a
;
387 BUG_ON(!OVS_CB(skb
)->pkt_key
);
389 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
390 upcall
.key
= OVS_CB(skb
)->pkt_key
;
391 upcall
.userdata
= NULL
;
394 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
395 a
= nla_next(a
, &rem
)) {
396 switch (nla_type(a
)) {
397 case OVS_USERSPACE_ATTR_USERDATA
:
401 case OVS_USERSPACE_ATTR_PID
:
402 upcall
.portid
= nla_get_u32(a
);
407 return ovs_dp_upcall(dp
, skb
, &upcall
);
410 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
411 const struct nlattr
*attr
)
413 const struct nlattr
*acts_list
= NULL
;
414 const struct nlattr
*a
;
417 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
418 a
= nla_next(a
, &rem
)) {
419 switch (nla_type(a
)) {
420 case OVS_SAMPLE_ATTR_PROBABILITY
:
421 if (net_random() >= nla_get_u32(a
))
425 case OVS_SAMPLE_ATTR_ACTIONS
:
431 return do_execute_actions(dp
, skb
, nla_data(acts_list
),
432 nla_len(acts_list
), true);
435 static int execute_set_action(struct sk_buff
*skb
,
436 const struct nlattr
*nested_attr
)
440 switch (nla_type(nested_attr
)) {
441 case OVS_KEY_ATTR_PRIORITY
:
442 skb
->priority
= nla_get_u32(nested_attr
);
445 case OVS_KEY_ATTR_SKB_MARK
:
446 skb_set_mark(skb
, nla_get_u32(nested_attr
));
449 case OVS_KEY_ATTR_IPV4_TUNNEL
:
450 OVS_CB(skb
)->tun_key
= nla_data(nested_attr
);
453 case OVS_KEY_ATTR_ETHERNET
:
454 err
= set_eth_addr(skb
, nla_data(nested_attr
));
457 case OVS_KEY_ATTR_IPV4
:
458 err
= set_ipv4(skb
, nla_data(nested_attr
));
461 case OVS_KEY_ATTR_IPV6
:
462 err
= set_ipv6(skb
, nla_data(nested_attr
));
465 case OVS_KEY_ATTR_TCP
:
466 err
= set_tcp(skb
, nla_data(nested_attr
));
469 case OVS_KEY_ATTR_UDP
:
470 err
= set_udp(skb
, nla_data(nested_attr
));
477 /* Execute a list of actions against 'skb'. */
478 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
479 const struct nlattr
*attr
, int len
, bool keep_skb
)
481 /* Every output action needs a separate clone of 'skb', but the common
482 * case is just a single output action, so that doing a clone and
483 * then freeing the original skbuff is wasteful. So the following code
484 * is slightly obscure just to avoid that. */
486 const struct nlattr
*a
;
489 for (a
= attr
, rem
= len
; rem
> 0;
490 a
= nla_next(a
, &rem
)) {
493 if (prev_port
!= -1) {
494 do_output(dp
, skb_clone(skb
, GFP_ATOMIC
), prev_port
);
498 switch (nla_type(a
)) {
499 case OVS_ACTION_ATTR_OUTPUT
:
500 prev_port
= nla_get_u32(a
);
503 case OVS_ACTION_ATTR_USERSPACE
:
504 output_userspace(dp
, skb
, a
);
507 case OVS_ACTION_ATTR_PUSH_VLAN
:
508 err
= push_vlan(skb
, nla_data(a
));
509 if (unlikely(err
)) /* skb already freed. */
513 case OVS_ACTION_ATTR_POP_VLAN
:
517 case OVS_ACTION_ATTR_SET
:
518 err
= execute_set_action(skb
, nla_data(a
));
521 case OVS_ACTION_ATTR_SAMPLE
:
522 err
= sample(dp
, skb
, a
);
532 if (prev_port
!= -1) {
534 skb
= skb_clone(skb
, GFP_ATOMIC
);
536 do_output(dp
, skb
, prev_port
);
537 } else if (!keep_skb
)
543 /* We limit the number of times that we pass into execute_actions()
544 * to avoid blowing out the stack in the event that we have a loop. */
547 struct loop_counter
{
548 u8 count
; /* Count. */
549 bool looping
; /* Loop detected? */
552 static DEFINE_PER_CPU(struct loop_counter
, loop_counters
);
554 static int loop_suppress(struct datapath
*dp
, struct sw_flow_actions
*actions
)
557 pr_warn("%s: flow looped %d times, dropping\n",
558 ovs_dp_name(dp
), MAX_LOOPS
);
559 actions
->actions_len
= 0;
563 /* Execute a list of actions against 'skb'. */
564 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
)
566 struct sw_flow_actions
*acts
= rcu_dereference(OVS_CB(skb
)->flow
->sf_acts
);
567 struct loop_counter
*loop
;
570 /* Check whether we've looped too much. */
571 loop
= &__get_cpu_var(loop_counters
);
572 if (unlikely(++loop
->count
> MAX_LOOPS
))
573 loop
->looping
= true;
574 if (unlikely(loop
->looping
)) {
575 error
= loop_suppress(dp
, acts
);
580 OVS_CB(skb
)->tun_key
= NULL
;
581 error
= do_execute_actions(dp
, skb
, acts
->actions
,
582 acts
->actions_len
, false);
584 /* Check whether sub-actions looped too much. */
585 if (unlikely(loop
->looping
))
586 error
= loop_suppress(dp
, acts
);
589 /* Decrement loop counter. */
591 loop
->looping
= false;