]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/actions.c
datapath: Move Netlink PID for userspace actions from flows to actions.
[mirror_ovs.git] / datapath / actions.c
1 /*
2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 /* Functions for executing flow actions. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/skbuff.h>
14 #include <linux/in.h>
15 #include <linux/ip.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <linux/in6.h>
19 #include <linux/if_arp.h>
20 #include <linux/if_vlan.h>
21 #include <net/inet_ecn.h>
22 #include <net/ip.h>
23 #include <net/checksum.h>
24
25 #include "actions.h"
26 #include "checksum.h"
27 #include "datapath.h"
28 #include "openvswitch/datapath-protocol.h"
29 #include "vlan.h"
30 #include "vport.h"
31
32 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
33 const struct nlattr *attr, int len, bool keep_skb);
34
35 static int make_writable(struct sk_buff *skb, int write_len)
36 {
37 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
38 return 0;
39
40 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
41 }
42
43 /* remove VLAN header from packet and update csum accrodingly. */
44 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
45 {
46 struct ethhdr *eh;
47 struct vlan_ethhdr *veth;
48 int err;
49
50 err = make_writable(skb, VLAN_ETH_HLEN);
51 if (unlikely(err))
52 return err;
53
54 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
55 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
56 + ETH_HLEN, VLAN_HLEN, 0));
57
58 veth = (struct vlan_ethhdr *) skb->data;
59 *current_tci = veth->h_vlan_TCI;
60
61 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
62
63 eh = (struct ethhdr *)__skb_pull(skb, VLAN_HLEN);
64
65 skb->protocol = eh->h_proto;
66 skb->mac_header += VLAN_HLEN;
67
68 return 0;
69 }
70
71 static int pop_vlan(struct sk_buff *skb)
72 {
73 __be16 tci;
74 int err;
75
76 if (likely(vlan_tx_tag_present(skb))) {
77 vlan_set_tci(skb, 0);
78 } else {
79 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
80 skb->len < VLAN_ETH_HLEN))
81 return 0;
82
83 err = __pop_vlan_tci(skb, &tci);
84 if (err)
85 return err;
86 }
87 /* move next vlan tag to hw accel tag */
88 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
89 skb->len < VLAN_ETH_HLEN))
90 return 0;
91
92 err = __pop_vlan_tci(skb, &tci);
93 if (unlikely(err))
94 return err;
95
96 __vlan_hwaccel_put_tag(skb, ntohs(tci));
97 return 0;
98 }
99
100 static int push_vlan(struct sk_buff *skb, __be16 new_tci)
101 {
102 if (unlikely(vlan_tx_tag_present(skb))) {
103 u16 current_tag;
104
105 /* push down current VLAN tag */
106 current_tag = vlan_tx_tag_get(skb);
107
108 if (!__vlan_put_tag(skb, current_tag))
109 return -ENOMEM;
110
111 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
112 skb->csum = csum_add(skb->csum, csum_partial(skb->data
113 + ETH_HLEN, VLAN_HLEN, 0));
114
115 }
116 __vlan_hwaccel_put_tag(skb, ntohs(new_tci));
117 return 0;
118 }
119
120 static bool is_ip(struct sk_buff *skb)
121 {
122 return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) &&
123 skb->transport_header > skb->network_header);
124 }
125
126 static __sum16 *get_l4_checksum(struct sk_buff *skb)
127 {
128 u8 nw_proto = OVS_CB(skb)->flow->key.ip.proto;
129 int transport_len = skb->len - skb_transport_offset(skb);
130 if (nw_proto == IPPROTO_TCP) {
131 if (likely(transport_len >= sizeof(struct tcphdr)))
132 return &tcp_hdr(skb)->check;
133 } else if (nw_proto == IPPROTO_UDP) {
134 if (likely(transport_len >= sizeof(struct udphdr)))
135 return &udp_hdr(skb)->check;
136 }
137 return NULL;
138 }
139
140 static int set_nw_addr(struct sk_buff *skb, const struct nlattr *a)
141 {
142 __be32 new_nwaddr = nla_get_be32(a);
143 struct iphdr *nh;
144 __sum16 *check;
145 __be32 *nwaddr;
146 int err;
147
148 if (unlikely(!is_ip(skb)))
149 return 0;
150
151 err = make_writable(skb, skb_network_offset(skb) +
152 sizeof(struct iphdr));
153 if (unlikely(err))
154 return err;
155
156 nh = ip_hdr(skb);
157 nwaddr = nla_type(a) == OVS_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
158
159 check = get_l4_checksum(skb);
160 if (likely(check))
161 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
162 csum_replace4(&nh->check, *nwaddr, new_nwaddr);
163
164 skb_clear_rxhash(skb);
165
166 *nwaddr = new_nwaddr;
167
168 return 0;
169 }
170
171 static int set_nw_tos(struct sk_buff *skb, u8 nw_tos)
172 {
173 struct iphdr *nh = ip_hdr(skb);
174 u8 old, new;
175 int err;
176
177 if (unlikely(!is_ip(skb)))
178 return 0;
179
180 err = make_writable(skb, skb_network_offset(skb) +
181 sizeof(struct iphdr));
182 if (unlikely(err))
183 return err;
184
185 /* Set the DSCP bits and preserve the ECN bits. */
186 old = nh->tos;
187 new = nw_tos | (nh->tos & INET_ECN_MASK);
188 csum_replace4(&nh->check, (__force __be32)old,
189 (__force __be32)new);
190 nh->tos = new;
191
192 return 0;
193 }
194
195 static int set_tp_port(struct sk_buff *skb, const struct nlattr *a)
196 {
197 struct udphdr *th;
198 __sum16 *check;
199 __be16 *port;
200 int err;
201
202 if (unlikely(!is_ip(skb)))
203 return 0;
204
205 err = make_writable(skb, skb_transport_offset(skb) +
206 sizeof(struct tcphdr));
207 if (unlikely(err))
208 return err;
209
210 /* Must follow make_writable() since that can move the skb data. */
211 check = get_l4_checksum(skb);
212 if (unlikely(!check))
213 return 0;
214
215 /*
216 * Update port and checksum.
217 *
218 * This is OK because source and destination port numbers are at the
219 * same offsets in both UDP and TCP headers, and get_l4_checksum() only
220 * supports those protocols.
221 */
222 th = udp_hdr(skb);
223 port = nla_type(a) == OVS_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
224 inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
225 *port = nla_get_be16(a);
226 skb_clear_rxhash(skb);
227
228 return 0;
229 }
230
231 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
232 {
233 struct vport *vport;
234
235 if (unlikely(!skb))
236 return -ENOMEM;
237
238 vport = rcu_dereference(dp->ports[out_port]);
239 if (unlikely(!vport)) {
240 kfree_skb(skb);
241 return -ENODEV;
242 }
243
244 vport_send(vport, skb);
245 return 0;
246 }
247
248 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
249 const struct nlattr *attr)
250 {
251 struct dp_upcall_info upcall;
252 const struct nlattr *a;
253 int rem;
254
255 upcall.cmd = OVS_PACKET_CMD_ACTION;
256 upcall.key = &OVS_CB(skb)->flow->key;
257 upcall.userdata = NULL;
258 upcall.pid = 0;
259
260 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
261 a = nla_next(a, &rem)) {
262 switch (nla_type(a)) {
263 case OVS_USERSPACE_ATTR_USERDATA:
264 upcall.userdata = a;
265 break;
266
267 case OVS_USERSPACE_ATTR_PID:
268 upcall.pid = nla_get_u32(a);
269 break;
270 }
271 }
272
273 return dp_upcall(dp, skb, &upcall);
274 }
275
276 static int sample(struct datapath *dp, struct sk_buff *skb,
277 const struct nlattr *attr)
278 {
279 const struct nlattr *acts_list = NULL;
280 const struct nlattr *a;
281 int rem;
282
283 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
284 a = nla_next(a, &rem)) {
285 switch (nla_type(a)) {
286 case OVS_SAMPLE_ATTR_PROBABILITY:
287 if (net_random() >= nla_get_u32(a))
288 return 0;
289 break;
290
291 case OVS_SAMPLE_ATTR_ACTIONS:
292 acts_list = a;
293 break;
294 }
295 }
296
297 return do_execute_actions(dp, skb, nla_data(acts_list),
298 nla_len(acts_list), true);
299 }
300
301 /* Execute a list of actions against 'skb'. */
302 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
303 const struct nlattr *attr, int len, bool keep_skb)
304 {
305 /* Every output action needs a separate clone of 'skb', but the common
306 * case is just a single output action, so that doing a clone and
307 * then freeing the original skbuff is wasteful. So the following code
308 * is slightly obscure just to avoid that. */
309 int prev_port = -1;
310 u32 priority = skb->priority;
311 const struct nlattr *a;
312 int rem;
313
314 for (a = attr, rem = len; rem > 0;
315 a = nla_next(a, &rem)) {
316 int err = 0;
317
318 if (prev_port != -1) {
319 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
320 prev_port = -1;
321 }
322
323 switch (nla_type(a)) {
324 case OVS_ACTION_ATTR_OUTPUT:
325 prev_port = nla_get_u32(a);
326 break;
327
328 case OVS_ACTION_ATTR_USERSPACE:
329 output_userspace(dp, skb, a);
330 break;
331
332 case OVS_ACTION_ATTR_SET_TUNNEL:
333 OVS_CB(skb)->tun_id = nla_get_be64(a);
334 break;
335
336 case OVS_ACTION_ATTR_PUSH_VLAN:
337 err = push_vlan(skb, nla_get_be16(a));
338 if (unlikely(err)) /* skb already freed */
339 return err;
340 break;
341
342 case OVS_ACTION_ATTR_POP_VLAN:
343 err = pop_vlan(skb);
344 break;
345
346 case OVS_ACTION_ATTR_SET_DL_SRC:
347 err = make_writable(skb, ETH_HLEN);
348 if (likely(!err))
349 memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
350 break;
351
352 case OVS_ACTION_ATTR_SET_DL_DST:
353 err = make_writable(skb, ETH_HLEN);
354 if (likely(!err))
355 memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
356 break;
357
358 case OVS_ACTION_ATTR_SET_NW_SRC:
359 case OVS_ACTION_ATTR_SET_NW_DST:
360 err = set_nw_addr(skb, a);
361 break;
362
363 case OVS_ACTION_ATTR_SET_NW_TOS:
364 err = set_nw_tos(skb, nla_get_u8(a));
365 break;
366
367 case OVS_ACTION_ATTR_SET_TP_SRC:
368 case OVS_ACTION_ATTR_SET_TP_DST:
369 err = set_tp_port(skb, a);
370 break;
371
372 case OVS_ACTION_ATTR_SET_PRIORITY:
373 skb->priority = nla_get_u32(a);
374 break;
375
376 case OVS_ACTION_ATTR_POP_PRIORITY:
377 skb->priority = priority;
378 break;
379
380 case OVS_ACTION_ATTR_SAMPLE:
381 err = sample(dp, skb, a);
382 break;
383
384 }
385 if (unlikely(err)) {
386 kfree_skb(skb);
387 return err;
388 }
389 }
390
391 if (prev_port != -1) {
392 if (keep_skb)
393 skb = skb_clone(skb, GFP_ATOMIC);
394
395 do_output(dp, skb, prev_port);
396 } else if (!keep_skb)
397 consume_skb(skb);
398
399 return 0;
400 }
401
402 /* We limit the number of times that we pass into execute_actions()
403 * to avoid blowing out the stack in the event that we have a loop. */
404 #define MAX_LOOPS 5
405
406 struct loop_counter {
407 u8 count; /* Count. */
408 bool looping; /* Loop detected? */
409 };
410
411 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
412
413 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
414 {
415 if (net_ratelimit())
416 pr_warn("%s: flow looped %d times, dropping\n",
417 dp_name(dp), MAX_LOOPS);
418 actions->actions_len = 0;
419 return -ELOOP;
420 }
421
422 /* Execute a list of actions against 'skb'. */
423 int execute_actions(struct datapath *dp, struct sk_buff *skb)
424 {
425 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
426 struct loop_counter *loop;
427 int error;
428
429 /* Check whether we've looped too much. */
430 loop = &__get_cpu_var(loop_counters);
431 if (unlikely(++loop->count > MAX_LOOPS))
432 loop->looping = true;
433 if (unlikely(loop->looping)) {
434 error = loop_suppress(dp, acts);
435 kfree_skb(skb);
436 goto out_loop;
437 }
438
439 OVS_CB(skb)->tun_id = 0;
440 error = do_execute_actions(dp, skb, acts->actions,
441 acts->actions_len, false);
442
443 /* Check whether sub-actions looped too much. */
444 if (unlikely(loop->looping))
445 error = loop_suppress(dp, acts);
446
447 out_loop:
448 /* Decrement loop counter. */
449 if (!--loop->count)
450 loop->looping = false;
451
452 return error;
453 }