]> git.proxmox.com Git - ovs.git/blame - datapath/datapath.c
datapath: Always use GFP_ATOMIC to execute actions.
[ovs.git] / datapath / datapath.c
CommitLineData
064af421 1/*
a6057323 2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
a14bc59f
BP
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
064af421
BP
7 */
8
9/* Functions for managing the dp interface/device. */
10
dfffaef1
JP
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
064af421
BP
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/if_arp.h>
064af421
BP
17#include <linux/if_vlan.h>
18#include <linux/in.h>
19#include <linux/ip.h>
20#include <linux/delay.h>
21#include <linux/time.h>
22#include <linux/etherdevice.h>
23#include <linux/kernel.h>
24#include <linux/kthread.h>
064af421
BP
25#include <linux/mutex.h>
26#include <linux/percpu.h>
27#include <linux/rcupdate.h>
28#include <linux/tcp.h>
29#include <linux/udp.h>
30#include <linux/version.h>
31#include <linux/ethtool.h>
064af421
BP
32#include <linux/wait.h>
33#include <asm/system.h>
34#include <asm/div64.h>
35#include <asm/bug.h>
656a0e37 36#include <linux/highmem.h>
064af421
BP
37#include <linux/netfilter_bridge.h>
38#include <linux/netfilter_ipv4.h>
39#include <linux/inetdevice.h>
40#include <linux/list.h>
41#include <linux/rculist.h>
064af421 42#include <linux/dmi.h>
3c5f6de3 43#include <net/inet_ecn.h>
3fbd517a 44#include <linux/compat.h>
064af421
BP
45
46#include "openvswitch/datapath-protocol.h"
47#include "datapath.h"
48#include "actions.h"
064af421 49#include "flow.h"
3fbd517a 50#include "odp-compat.h"
8d5ebd83 51#include "table.h"
f2459fe7 52#include "vport-internal_dev.h"
064af421
BP
53
54#include "compat.h"
55
56
57int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
58EXPORT_SYMBOL(dp_ioctl_hook);
59
064af421 60/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
0d3b8a34 61 * by dp_mutex.
064af421
BP
62 *
63 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
64 * lock first.
65 *
f2459fe7 66 * It is safe to access the datapath and dp_port structures with just
064af421
BP
67 * dp_mutex.
68 */
69static struct datapath *dps[ODP_MAX];
70static DEFINE_MUTEX(dp_mutex);
71
55574bb0
BP
72/* We limit the number of times that we pass into dp_process_received_packet()
73 * to avoid blowing out the stack in the event that we have a loop. */
74struct loop_counter {
75 int count; /* Count. */
76 bool looping; /* Loop detected? */
77};
78
79#define DP_MAX_LOOPS 5
80
81/* We use a separate counter for each CPU for both interrupt and non-interrupt
82 * context in order to keep the limit deterministic for a given packet. */
83struct percpu_loop_counters {
84 struct loop_counter counters[2];
85};
86
87static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters);
88
f2459fe7 89static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
064af421
BP
90
91/* Must be called with rcu_read_lock or dp_mutex. */
92struct datapath *get_dp(int dp_idx)
93{
94 if (dp_idx < 0 || dp_idx >= ODP_MAX)
95 return NULL;
96 return rcu_dereference(dps[dp_idx]);
97}
98EXPORT_SYMBOL_GPL(get_dp);
99
35f7605b 100static struct datapath *get_dp_locked(int dp_idx)
064af421
BP
101{
102 struct datapath *dp;
103
104 mutex_lock(&dp_mutex);
105 dp = get_dp(dp_idx);
106 if (dp)
107 mutex_lock(&dp->mutex);
108 mutex_unlock(&dp_mutex);
109 return dp;
110}
111
f2459fe7
JG
112/* Must be called with rcu_read_lock or RTNL lock. */
113const char *dp_name(const struct datapath *dp)
114{
115 return vport_get_name(dp->ports[ODPP_LOCAL]->vport);
116}
117
064af421
BP
118static inline size_t br_nlmsg_size(void)
119{
120 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
121 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
122 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
123 + nla_total_size(4) /* IFLA_MASTER */
124 + nla_total_size(4) /* IFLA_MTU */
125 + nla_total_size(4) /* IFLA_LINK */
126 + nla_total_size(1); /* IFLA_OPERSTATE */
127}
128
129static int dp_fill_ifinfo(struct sk_buff *skb,
f2459fe7 130 const struct dp_port *port,
064af421
BP
131 int event, unsigned int flags)
132{
133 const struct datapath *dp = port->dp;
f2459fe7
JG
134 int ifindex = vport_get_ifindex(port->vport);
135 int iflink = vport_get_iflink(port->vport);
064af421
BP
136 struct ifinfomsg *hdr;
137 struct nlmsghdr *nlh;
138
f2459fe7
JG
139 if (ifindex < 0)
140 return ifindex;
141
142 if (iflink < 0)
143 return iflink;
144
064af421
BP
145 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
146 if (nlh == NULL)
147 return -EMSGSIZE;
148
149 hdr = nlmsg_data(nlh);
150 hdr->ifi_family = AF_BRIDGE;
151 hdr->__ifi_pad = 0;
f2459fe7
JG
152 hdr->ifi_type = ARPHRD_ETHER;
153 hdr->ifi_index = ifindex;
154 hdr->ifi_flags = vport_get_flags(port->vport);
064af421
BP
155 hdr->ifi_change = 0;
156
f2459fe7
JG
157 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port->vport));
158 NLA_PUT_U32(skb, IFLA_MASTER, vport_get_ifindex(dp->ports[ODPP_LOCAL]->vport));
159 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port->vport));
064af421
BP
160#ifdef IFLA_OPERSTATE
161 NLA_PUT_U8(skb, IFLA_OPERSTATE,
f2459fe7
JG
162 vport_is_running(port->vport)
163 ? vport_get_operstate(port->vport)
164 : IF_OPER_DOWN);
064af421
BP
165#endif
166
f2459fe7
JG
167 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN,
168 vport_get_addr(port->vport));
064af421 169
f2459fe7
JG
170 if (ifindex != iflink)
171 NLA_PUT_U32(skb, IFLA_LINK,iflink);
064af421
BP
172
173 return nlmsg_end(skb, nlh);
174
175nla_put_failure:
176 nlmsg_cancel(skb, nlh);
177 return -EMSGSIZE;
178}
179
f2459fe7 180static void dp_ifinfo_notify(int event, struct dp_port *port)
064af421 181{
064af421
BP
182 struct sk_buff *skb;
183 int err = -ENOBUFS;
184
185 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
186 if (skb == NULL)
187 goto errout;
188
189 err = dp_fill_ifinfo(skb, port, event, 0);
190 if (err < 0) {
191 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
192 WARN_ON(err == -EMSGSIZE);
193 kfree_skb(skb);
194 goto errout;
195 }
f2459fe7 196 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
cfe7c1f5 197 return;
064af421
BP
198errout:
199 if (err < 0)
f2459fe7 200 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
064af421
BP
201}
202
58c342f6
BP
203static void release_dp(struct kobject *kobj)
204{
205 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
206 kfree(dp);
207}
208
35f7605b 209static struct kobj_type dp_ktype = {
58c342f6
BP
210 .release = release_dp
211};
212
064af421
BP
213static int create_dp(int dp_idx, const char __user *devnamep)
214{
f2459fe7 215 struct odp_port internal_dev_port;
064af421
BP
216 char devname[IFNAMSIZ];
217 struct datapath *dp;
218 int err;
219 int i;
220
221 if (devnamep) {
968f7c8d
BP
222 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
223 if (retval < 0) {
224 err = -EFAULT;
064af421 225 goto err;
968f7c8d
BP
226 } else if (retval >= IFNAMSIZ) {
227 err = -ENAMETOOLONG;
228 goto err;
229 }
064af421
BP
230 } else {
231 snprintf(devname, sizeof devname, "of%d", dp_idx);
232 }
233
234 rtnl_lock();
235 mutex_lock(&dp_mutex);
236 err = -ENODEV;
237 if (!try_module_get(THIS_MODULE))
238 goto err_unlock;
239
240 /* Exit early if a datapath with that number already exists.
241 * (We don't use -EEXIST because that's ambiguous with 'devname'
242 * conflicting with an existing network device name.) */
243 err = -EBUSY;
244 if (get_dp(dp_idx))
245 goto err_put_module;
246
247 err = -ENOMEM;
248 dp = kzalloc(sizeof *dp, GFP_KERNEL);
249 if (dp == NULL)
250 goto err_put_module;
828bc1f0 251 INIT_LIST_HEAD(&dp->port_list);
064af421
BP
252 mutex_init(&dp->mutex);
253 dp->dp_idx = dp_idx;
254 for (i = 0; i < DP_N_QUEUES; i++)
255 skb_queue_head_init(&dp->queues[i]);
256 init_waitqueue_head(&dp->waitqueue);
257
58c342f6 258 /* Initialize kobject for bridge. This will be added as
b0c32774 259 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
58c342f6 260 dp->ifobj.kset = NULL;
58c342f6
BP
261 kobject_init(&dp->ifobj, &dp_ktype);
262
828bc1f0
BP
263 /* Allocate table. */
264 err = -ENOMEM;
8d5ebd83 265 rcu_assign_pointer(dp->table, tbl_create(0));
828bc1f0
BP
266 if (!dp->table)
267 goto err_free_dp;
268
d6fbec6d 269 /* Set up our datapath device. */
092a872d
BP
270 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
271 strcpy(internal_dev_port.devname, devname);
f2459fe7
JG
272 internal_dev_port.flags = ODP_PORT_INTERNAL;
273 err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
828bc1f0 274 if (err) {
f2459fe7
JG
275 if (err == -EBUSY)
276 err = -EEXIST;
277
064af421 278 goto err_destroy_table;
828bc1f0 279 }
064af421
BP
280
281 dp->drop_frags = 0;
282 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
283 if (!dp->stats_percpu)
284 goto err_destroy_local_port;
285
286 rcu_assign_pointer(dps[dp_idx], dp);
287 mutex_unlock(&dp_mutex);
288 rtnl_unlock();
289
2ba9026e 290 dp_sysfs_add_dp(dp);
064af421
BP
291
292 return 0;
293
294err_destroy_local_port:
f2459fe7 295 dp_detach_port(dp->ports[ODPP_LOCAL], 1);
064af421 296err_destroy_table:
8d5ebd83 297 tbl_destroy(dp->table, NULL);
064af421
BP
298err_free_dp:
299 kfree(dp);
300err_put_module:
301 module_put(THIS_MODULE);
302err_unlock:
303 mutex_unlock(&dp_mutex);
304 rtnl_unlock();
305err:
306 return err;
307}
308
72ca14c1 309static void do_destroy_dp(struct datapath *dp)
064af421 310{
f2459fe7 311 struct dp_port *p, *n;
064af421
BP
312 int i;
313
6fba0d0b
BP
314 list_for_each_entry_safe (p, n, &dp->port_list, node)
315 if (p->port_no != ODPP_LOCAL)
f2459fe7 316 dp_detach_port(p, 1);
6fba0d0b 317
2ba9026e 318 dp_sysfs_del_dp(dp);
064af421 319
064af421 320 rcu_assign_pointer(dps[dp->dp_idx], NULL);
064af421 321
f2459fe7 322 dp_detach_port(dp->ports[ODPP_LOCAL], 1);
6fba0d0b 323
8d5ebd83 324 tbl_destroy(dp->table, flow_free_tbl);
6fba0d0b 325
064af421
BP
326 for (i = 0; i < DP_N_QUEUES; i++)
327 skb_queue_purge(&dp->queues[i]);
064af421 328 free_percpu(dp->stats_percpu);
58c342f6 329 kobject_put(&dp->ifobj);
064af421
BP
330 module_put(THIS_MODULE);
331}
332
333static int destroy_dp(int dp_idx)
334{
064af421 335 struct datapath *dp;
064af421
BP
336 int err;
337
338 rtnl_lock();
339 mutex_lock(&dp_mutex);
340 dp = get_dp(dp_idx);
341 err = -ENODEV;
342 if (!dp)
343 goto err_unlock;
344
72ca14c1 345 do_destroy_dp(dp);
064af421
BP
346 err = 0;
347
348err_unlock:
349 mutex_unlock(&dp_mutex);
350 rtnl_unlock();
064af421
BP
351 return err;
352}
353
f2459fe7 354static void release_dp_port(struct kobject *kobj)
58c342f6 355{
f2459fe7 356 struct dp_port *p = container_of(kobj, struct dp_port, kobj);
58c342f6
BP
357 kfree(p);
358}
359
35f7605b 360static struct kobj_type brport_ktype = {
8fef8c71 361#ifdef CONFIG_SYSFS
58c342f6
BP
362 .sysfs_ops = &brport_sysfs_ops,
363#endif
f2459fe7 364 .release = release_dp_port
58c342f6
BP
365};
366
064af421 367/* Called with RTNL lock and dp_mutex. */
f2459fe7 368static int new_dp_port(struct datapath *dp, struct odp_port *odp_port, int port_no)
064af421 369{
f2459fe7
JG
370 struct vport *vport;
371 struct dp_port *p;
372 int err;
373
374 vport = vport_locate(odp_port->devname);
375 if (!vport) {
376 vport_lock();
377
378 if (odp_port->flags & ODP_PORT_INTERNAL)
61e89cd6 379 vport = vport_add(odp_port->devname, "internal", NULL);
f2459fe7 380 else
61e89cd6 381 vport = vport_add(odp_port->devname, "netdev", NULL);
064af421 382
f2459fe7
JG
383 vport_unlock();
384
385 if (IS_ERR(vport))
386 return PTR_ERR(vport);
387 }
064af421
BP
388
389 p = kzalloc(sizeof(*p), GFP_KERNEL);
390 if (!p)
391 return -ENOMEM;
392
064af421
BP
393 p->port_no = port_no;
394 p->dp = dp;
cc98976a 395 p->vport = vport;
56fd8edf 396 atomic_set(&p->sflow_pool, 0);
f2459fe7
JG
397
398 err = vport_attach(vport, p);
399 if (err) {
400 kfree(p);
401 return err;
064af421 402 }
f2459fe7 403
064af421
BP
404 rcu_assign_pointer(dp->ports[port_no], p);
405 list_add_rcu(&p->node, &dp->port_list);
406 dp->n_ports++;
407
58c342f6
BP
408 /* Initialize kobject for bridge. This will be added as
409 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
58c342f6 410 p->kobj.kset = NULL;
58c342f6
BP
411 kobject_init(&p->kobj, &brport_ktype);
412
064af421
BP
413 dp_ifinfo_notify(RTM_NEWLINK, p);
414
415 return 0;
416}
417
f2459fe7 418static int attach_port(int dp_idx, struct odp_port __user *portp)
064af421 419{
064af421
BP
420 struct datapath *dp;
421 struct odp_port port;
422 int port_no;
423 int err;
424
425 err = -EFAULT;
426 if (copy_from_user(&port, portp, sizeof port))
427 goto out;
428 port.devname[IFNAMSIZ - 1] = '\0';
064af421
BP
429
430 rtnl_lock();
431 dp = get_dp_locked(dp_idx);
432 err = -ENODEV;
433 if (!dp)
434 goto out_unlock_rtnl;
435
9ee3ae3e
BP
436 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
437 if (!dp->ports[port_no])
438 goto got_port_no;
3c71830a 439 err = -EFBIG;
9ee3ae3e 440 goto out_unlock_dp;
064af421 441
9ee3ae3e 442got_port_no:
f2459fe7 443 err = new_dp_port(dp, &port, port_no);
064af421 444 if (err)
f2459fe7 445 goto out_unlock_dp;
064af421 446
d8b5d43a 447 set_internal_devs_mtu(dp);
2ba9026e 448 dp_sysfs_add_if(dp->ports[port_no]);
064af421 449
776f10ce 450 err = put_user(port_no, &portp->port);
064af421 451
064af421
BP
452out_unlock_dp:
453 mutex_unlock(&dp->mutex);
454out_unlock_rtnl:
455 rtnl_unlock();
456out:
457 return err;
458}
459
f2459fe7 460int dp_detach_port(struct dp_port *p, int may_delete)
064af421 461{
f2459fe7
JG
462 struct vport *vport = p->vport;
463 int err;
464
064af421
BP
465 ASSERT_RTNL();
466
2e7dd8ec 467 if (p->port_no != ODPP_LOCAL)
0515ceb3 468 dp_sysfs_del_if(p);
064af421
BP
469 dp_ifinfo_notify(RTM_DELLINK, p);
470
064af421 471 /* First drop references to device. */
f2459fe7 472 p->dp->n_ports--;
064af421
BP
473 list_del_rcu(&p->node);
474 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
f2459fe7
JG
475
476 err = vport_detach(vport);
477 if (err)
478 return err;
064af421
BP
479
480 /* Then wait until no one is still using it, and destroy it. */
481 synchronize_rcu();
482
f2459fe7
JG
483 if (may_delete) {
484 const char *port_type = vport_get_type(vport);
485
486 if (!strcmp(port_type, "netdev") || !strcmp(port_type, "internal")) {
487 vport_lock();
61e89cd6 488 vport_del(vport);
f2459fe7
JG
489 vport_unlock();
490 }
491 }
492
58c342f6 493 kobject_put(&p->kobj);
064af421
BP
494
495 return 0;
496}
497
f2459fe7 498static int detach_port(int dp_idx, int port_no)
064af421 499{
f2459fe7 500 struct dp_port *p;
064af421 501 struct datapath *dp;
064af421
BP
502 int err;
503
504 err = -EINVAL;
505 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
506 goto out;
507
508 rtnl_lock();
509 dp = get_dp_locked(dp_idx);
510 err = -ENODEV;
511 if (!dp)
512 goto out_unlock_rtnl;
513
514 p = dp->ports[port_no];
515 err = -ENOENT;
516 if (!p)
517 goto out_unlock_dp;
518
f2459fe7 519 err = dp_detach_port(p, 1);
064af421
BP
520
521out_unlock_dp:
522 mutex_unlock(&dp->mutex);
523out_unlock_rtnl:
524 rtnl_unlock();
525out:
064af421
BP
526 return err;
527}
528
55574bb0
BP
529static void suppress_loop(struct datapath *dp, struct sw_flow_actions *actions)
530{
531 if (net_ratelimit())
dfffaef1
JP
532 pr_warn("%s: flow looped %d times, dropping\n",
533 dp_name(dp), DP_MAX_LOOPS);
55574bb0
BP
534 actions->n_actions = 0;
535}
536
8819fac7 537/* Must be called with rcu_read_lock. */
f2459fe7 538void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
064af421
BP
539{
540 struct datapath *dp = p->dp;
541 struct dp_stats_percpu *stats;
8819fac7 542 int stats_counter_off;
55574bb0
BP
543 struct sw_flow_actions *acts;
544 struct loop_counter *loop;
4c1ad233 545 int error;
064af421 546
f2459fe7 547 OVS_CB(skb)->dp_port = p;
a063b0df 548
3976f6d5
JG
549 if (!OVS_CB(skb)->flow) {
550 struct odp_flow_key key;
551 struct tbl_node *flow_node;
b7a31ec1 552 bool is_frag;
4c1ad233 553
3976f6d5 554 /* Extract flow from 'skb' into 'key'. */
b7a31ec1 555 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
3976f6d5
JG
556 if (unlikely(error)) {
557 kfree_skb(skb);
558 return;
559 }
064af421 560
b7a31ec1 561 if (is_frag && dp->drop_frags) {
3976f6d5
JG
562 kfree_skb(skb);
563 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
564 goto out;
565 }
566
567 /* Look up flow. */
568 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
569 flow_hash(&key), flow_cmp);
570 if (unlikely(!flow_node)) {
571 dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
572 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
573 goto out;
574 }
575
576 OVS_CB(skb)->flow = flow_cast(flow_node);
55574bb0
BP
577 }
578
3976f6d5 579 flow_used(OVS_CB(skb)->flow, skb);
55574bb0 580
3976f6d5 581 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
55574bb0
BP
582
583 /* Check whether we've looped too much. */
584 loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()];
585 if (unlikely(++loop->count > DP_MAX_LOOPS))
586 loop->looping = true;
587 if (unlikely(loop->looping)) {
588 suppress_loop(dp, acts);
589 goto out_loop;
064af421 590 }
8819fac7 591
55574bb0 592 /* Execute actions. */
3976f6d5 593 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
7956695a 594 acts->n_actions);
55574bb0
BP
595 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
596
597 /* Check whether sub-actions looped too much. */
598 if (unlikely(loop->looping))
599 suppress_loop(dp, acts);
600
601out_loop:
602 /* Decrement loop counter. */
603 if (!--loop->count)
604 loop->looping = false;
605 put_cpu_var(dp_loop_counters);
606
8819fac7 607out:
55574bb0 608 /* Update datapath statistics. */
8819fac7
JG
609 local_bh_disable();
610 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
38c6ecbc
JG
611
612 write_seqcount_begin(&stats->seqlock);
8819fac7 613 (*(u64 *)((u8 *)stats + stats_counter_off))++;
38c6ecbc
JG
614 write_seqcount_end(&stats->seqlock);
615
8819fac7 616 local_bh_enable();
064af421
BP
617}
618
f7fed000 619#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
e1c1de39
JG
620/* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
621 * can't call this function directly because it isn't exported in all
622 * versions. */
b2f460c7 623int vswitch_skb_checksum_setup(struct sk_buff *skb)
064af421 624{
8cdaca99
JG
625 struct iphdr *iph;
626 unsigned char *th;
627 int err = -EPROTO;
628 __u16 csum_start, csum_offset;
629
630 if (!skb->proto_csum_blank)
631 return 0;
632
633 if (skb->protocol != htons(ETH_P_IP))
634 goto out;
635
e1c1de39 636 if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
8cdaca99
JG
637 goto out;
638
639 iph = ip_hdr(skb);
640 th = skb_network_header(skb) + 4 * iph->ihl;
641
642 csum_start = th - skb->head;
643 switch (iph->protocol) {
644 case IPPROTO_TCP:
645 csum_offset = offsetof(struct tcphdr, check);
646 break;
647 case IPPROTO_UDP:
648 csum_offset = offsetof(struct udphdr, check);
649 break;
650 default:
651 if (net_ratelimit())
dfffaef1
JP
652 pr_err("Attempting to checksum a non-TCP/UDP packet, "
653 "dropping a protocol %d packet",
654 iph->protocol);
8cdaca99 655 goto out;
064af421 656 }
8cdaca99 657
e1c1de39 658 if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
8cdaca99
JG
659 goto out;
660
661 skb->ip_summed = CHECKSUM_PARTIAL;
662 skb->proto_csum_blank = 0;
663
664#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
665 skb->csum_start = csum_start;
666 skb->csum_offset = csum_offset;
667#else
668 skb_set_transport_header(skb, csum_start - skb_headroom(skb));
669 skb->csum = csum_offset;
670#endif
671
672 err = 0;
673
064af421 674out:
8cdaca99 675 return err;
064af421 676}
53d3bbbc 677#endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
064af421 678
a6057323
JG
679 /* Types of checksums that we can receive (these all refer to L4 checksums):
680 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
681 * (though not verified) checksum in packet but not in skb->csum. Packets
682 * from the bridge local port will also have this type.
683 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
684 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
685 * a valid skb->csum. Importantly, both contain a full checksum (not
686 * verified) in the packet itself. The only difference is that if the
687 * packet gets to L4 processing on this machine (not in DomU) we won't
688 * have to recompute the checksum to verify. Most hardware devices do not
689 * produce packets with this type, even if they support receive checksum
690 * offloading (they produce type #5).
691 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
692 * be computed if it is sent off box. Unfortunately on earlier kernels,
693 * this case is impossible to distinguish from #2, despite having opposite
694 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
f4267e34 695 * to distinguish the different states.
a6057323
JG
696 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
697 * generated locally by a Xen DomU and has a partial checksum. If it is
698 * handled on this machine (Dom0 or DomU), then the checksum will not be
7dab847a 699 * computed. If it goes off box, the checksum in the packet needs to be
a6057323
JG
700 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
701 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
702 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
703 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
704 * full checksum or using a protocol without a checksum. skb->csum is
705 * undefined. This is common from devices with receive checksum
706 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
707 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
708 *
709 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
710 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
711 * based on whether it is on the transmit or receive path. After the datapath
712 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
713 * checksum, we will panic. Since we can receive packets with checksums, we
714 * assume that all CHECKSUM_HW packets have checksums and map them to
715 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
716 * packet is processed by the local IP stack, in which case it will need to
717 * be reverified). If we receive a packet with CHECKSUM_HW that really means
718 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
f4267e34 719 * shouldn't be any devices that do this with bridging. */
fceb2a5b 720void compute_ip_summed(struct sk_buff *skb, bool xmit)
a063b0df 721{
635c9298
JG
722 /* For our convenience these defines change repeatedly between kernel
723 * versions, so we can't just copy them over... */
724 switch (skb->ip_summed) {
725 case CHECKSUM_NONE:
726 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
727 break;
728 case CHECKSUM_UNNECESSARY:
729 OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
730 break;
a063b0df
JG
731#ifdef CHECKSUM_HW
732 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
f4267e34
JG
733 * However, on the receive side we should only get CHECKSUM_PARTIAL
734 * packets from Xen, which uses some special fields to represent this
735 * (see below). Since we can only make one type work, pick the one
736 * that actually happens in practice.
635c9298 737 *
f4267e34
JG
738 * On the transmit side (basically after skb_checksum_setup()
739 * has been run or on internal dev transmit), packets with
740 * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
635c9298
JG
741 case CHECKSUM_HW:
742 if (!xmit)
743 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
744 else
745 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
746
747 break;
748#else
749 case CHECKSUM_COMPLETE:
750 OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
751 break;
752 case CHECKSUM_PARTIAL:
753 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
754 break;
a063b0df 755#endif
635c9298 756 default:
dfffaef1 757 pr_err("unknown checksum type %d\n", skb->ip_summed);
635c9298
JG
758 /* None seems the safest... */
759 OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
d295e8e9 760 }
635c9298 761
a063b0df
JG
762#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
763 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
635c9298 764 * kernels. It should not be set on the transmit path though. */
a063b0df 765 if (skb->proto_csum_blank)
635c9298
JG
766 OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
767
768 WARN_ON_ONCE(skb->proto_csum_blank && xmit);
a063b0df
JG
769#endif
770}
771
f4267e34
JG
772/* This function closely resembles skb_forward_csum() used by the bridge. It
773 * is slightly different because we are only concerned with bridging and not
774 * other types of forwarding and can get away with slightly more optimal
775 * behavior.*/
fceb2a5b 776void forward_ip_summed(struct sk_buff *skb)
a6057323
JG
777{
778#ifdef CHECKSUM_HW
635c9298 779 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
a6057323
JG
780 skb->ip_summed = CHECKSUM_NONE;
781#endif
782}
783
cb5087ca
BP
784/* Append each packet in 'skb' list to 'queue'. There will be only one packet
785 * unless we broke up a GSO packet. */
fceb2a5b
JG
786static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
787 int queue_no, u32 arg)
cb5087ca
BP
788{
789 struct sk_buff *nskb;
790 int port_no;
791 int err;
792
f2459fe7
JG
793 if (OVS_CB(skb)->dp_port)
794 port_no = OVS_CB(skb)->dp_port->port_no;
795 else
796 port_no = ODPP_LOCAL;
cb5087ca
BP
797
798 do {
799 struct odp_msg *header;
800
801 nskb = skb->next;
802 skb->next = NULL;
803
cb5087ca
BP
804 err = skb_cow(skb, sizeof *header);
805 if (err)
806 goto err_kfree_skbs;
807
808 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
809 header->type = queue_no;
810 header->length = skb->len;
811 header->port = port_no;
812 header->reserved = 0;
813 header->arg = arg;
814 skb_queue_tail(queue, skb);
815
816 skb = nskb;
817 } while (skb);
818 return 0;
819
820err_kfree_skbs:
821 kfree_skb(skb);
822 while ((skb = nskb) != NULL) {
823 nskb = skb->next;
824 kfree_skb(skb);
825 }
826 return err;
827}
828
fceb2a5b
JG
829int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
830 u32 arg)
064af421
BP
831{
832 struct dp_stats_percpu *stats;
833 struct sk_buff_head *queue;
064af421
BP
834 int err;
835
836 WARN_ON_ONCE(skb_shared(skb));
72b06300 837 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
064af421
BP
838 queue = &dp->queues[queue_no];
839 err = -ENOBUFS;
840 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
841 goto err_kfree_skb;
842
a6057323
JG
843 forward_ip_summed(skb);
844
a2377e44
JG
845 err = vswitch_skb_checksum_setup(skb);
846 if (err)
847 goto err_kfree_skb;
848
064af421
BP
849 /* Break apart GSO packets into their component pieces. Otherwise
850 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
851 if (skb_is_gso(skb)) {
9cc8b4e4 852 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
064af421
BP
853 if (nskb) {
854 kfree_skb(skb);
855 skb = nskb;
856 if (unlikely(IS_ERR(skb))) {
857 err = PTR_ERR(skb);
858 goto err;
859 }
860 } else {
861 /* XXX This case might not be possible. It's hard to
862 * tell from the skb_gso_segment() code and comment. */
863 }
864 }
865
cb5087ca 866 err = queue_control_packets(skb, queue, queue_no, arg);
064af421 867 wake_up_interruptible(&dp->waitqueue);
cb5087ca 868 return err;
064af421
BP
869
870err_kfree_skb:
871 kfree_skb(skb);
872err:
1c075d0a
JG
873 local_bh_disable();
874 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
38c6ecbc
JG
875
876 write_seqcount_begin(&stats->seqlock);
064af421 877 stats->n_lost++;
38c6ecbc
JG
878 write_seqcount_end(&stats->seqlock);
879
1c075d0a 880 local_bh_enable();
064af421
BP
881
882 return err;
883}
884
885static int flush_flows(struct datapath *dp)
886{
8d5ebd83
JG
887 struct tbl *old_table = rcu_dereference(dp->table);
888 struct tbl *new_table;
889
890 new_table = tbl_create(0);
891 if (!new_table)
892 return -ENOMEM;
893
894 rcu_assign_pointer(dp->table, new_table);
895
896 tbl_deferred_destroy(old_table, flow_free_tbl);
897
898 return 0;
064af421
BP
899}
900
901static int validate_actions(const struct sw_flow_actions *actions)
902{
903 unsigned int i;
904
905 for (i = 0; i < actions->n_actions; i++) {
906 const union odp_action *a = &actions->actions[i];
26233bb4
BP
907 __be16 mask;
908
064af421 909 switch (a->type) {
f1588b1f
BP
910 case ODPAT_CONTROLLER:
911 case ODPAT_STRIP_VLAN:
912 case ODPAT_SET_DL_SRC:
913 case ODPAT_SET_DL_DST:
914 case ODPAT_SET_NW_SRC:
915 case ODPAT_SET_NW_DST:
916 case ODPAT_SET_TP_SRC:
917 case ODPAT_SET_TP_DST:
918 case ODPAT_SET_TUNNEL:
919 case ODPAT_SET_PRIORITY:
920 case ODPAT_POP_PRIORITY:
921 case ODPAT_DROP_SPOOFED_ARP:
922 /* No validation needed. */
064af421
BP
923 break;
924
f1588b1f
BP
925 case ODPAT_OUTPUT:
926 if (a->output.port >= DP_MAX_PORTS)
064af421
BP
927 return -EINVAL;
928 break;
929
26233bb4
BP
930 case ODPAT_SET_DL_TCI:
931 mask = a->dl_tci.mask;
932 if (mask != htons(VLAN_VID_MASK) &&
933 mask != htons(VLAN_PCP_MASK) &&
934 mask != htons(VLAN_VID_MASK | VLAN_PCP_MASK))
064af421 935 return -EINVAL;
26233bb4 936 if (a->dl_tci.tci & ~mask)
064af421
BP
937 return -EINVAL;
938 break;
939
3c5f6de3
JG
940 case ODPAT_SET_NW_TOS:
941 if (a->nw_tos.nw_tos & INET_ECN_MASK)
942 return -EINVAL;
943 break;
944
064af421 945 default:
f1588b1f 946 return -EOPNOTSUPP;
064af421
BP
947 }
948 }
949
950 return 0;
951}
952
953static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
954{
955 struct sw_flow_actions *actions;
956 int error;
957
958 actions = flow_actions_alloc(flow->n_actions);
959 error = PTR_ERR(actions);
960 if (IS_ERR(actions))
961 goto error;
962
963 error = -EFAULT;
964 if (copy_from_user(actions->actions, flow->actions,
965 flow->n_actions * sizeof(union odp_action)))
966 goto error_free_actions;
967 error = validate_actions(actions);
968 if (error)
969 goto error_free_actions;
970
971 return actions;
972
973error_free_actions:
974 kfree(actions);
975error:
976 return ERR_PTR(error);
977}
978
6bfafa55 979static struct timespec get_time_offset(void)
064af421 980{
6bfafa55
JG
981 struct timespec now_mono, now_jiffies;
982
983 ktime_get_ts(&now_mono);
984 jiffies_to_timespec(jiffies, &now_jiffies);
985 return timespec_sub(now_mono, now_jiffies);
986}
987
988static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats,
989 struct timespec time_offset)
990{
991 if (flow->used) {
992 struct timespec flow_ts, used;
993
994 jiffies_to_timespec(flow->used, &flow_ts);
995 set_normalized_timespec(&used, flow_ts.tv_sec + time_offset.tv_sec,
996 flow_ts.tv_nsec + time_offset.tv_nsec);
997
998 stats->used_sec = used.tv_sec;
999 stats->used_nsec = used.tv_nsec;
064af421
BP
1000 } else {
1001 stats->used_sec = 0;
1002 stats->used_nsec = 0;
1003 }
6bfafa55 1004
064af421
BP
1005 stats->n_packets = flow->packet_count;
1006 stats->n_bytes = flow->byte_count;
abfec865 1007 stats->reserved = 0;
064af421 1008 stats->tcp_flags = flow->tcp_flags;
f1aa2072 1009 stats->error = 0;
064af421
BP
1010}
1011
1012static void clear_stats(struct sw_flow *flow)
1013{
6bfafa55 1014 flow->used = 0;
064af421 1015 flow->tcp_flags = 0;
064af421
BP
1016 flow->packet_count = 0;
1017 flow->byte_count = 0;
1018}
1019
8d5ebd83
JG
1020static int expand_table(struct datapath *dp)
1021{
1022 struct tbl *old_table = rcu_dereference(dp->table);
1023 struct tbl *new_table;
1024
1025 new_table = tbl_expand(old_table);
1026 if (IS_ERR(new_table))
1027 return PTR_ERR(new_table);
1028
1029 rcu_assign_pointer(dp->table, new_table);
1030 tbl_deferred_destroy(old_table, NULL);
1031
1032 return 0;
1033}
1034
44e05eca
BP
1035static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
1036 struct odp_flow_stats *stats)
064af421 1037{
8d5ebd83 1038 struct tbl_node *flow_node;
6fa58f7a 1039 struct sw_flow *flow;
8d5ebd83 1040 struct tbl *table;
064af421
BP
1041 int error;
1042
064af421 1043 table = rcu_dereference(dp->table);
44e05eca 1044 flow_node = tbl_lookup(table, &uf->flow.key, flow_hash(&uf->flow.key), flow_cmp);
8d5ebd83 1045 if (!flow_node) {
6fa58f7a 1046 /* No such flow. */
064af421
BP
1047 struct sw_flow_actions *acts;
1048
1049 error = -ENOENT;
44e05eca 1050 if (!(uf->flags & ODPPF_CREATE))
064af421
BP
1051 goto error;
1052
1053 /* Expand table, if necessary, to make room. */
8d5ebd83
JG
1054 if (tbl_count(table) >= tbl_n_buckets(table)) {
1055 error = expand_table(dp);
064af421
BP
1056 if (error)
1057 goto error;
6fa58f7a 1058 table = rcu_dereference(dp->table);
064af421
BP
1059 }
1060
1061 /* Allocate flow. */
560e8022
JG
1062 flow = flow_alloc();
1063 if (IS_ERR(flow)) {
1064 error = PTR_ERR(flow);
064af421 1065 goto error;
560e8022 1066 }
44e05eca 1067 flow->key = uf->flow.key;
064af421
BP
1068 clear_stats(flow);
1069
1070 /* Obtain actions. */
44e05eca 1071 acts = get_actions(&uf->flow);
064af421
BP
1072 error = PTR_ERR(acts);
1073 if (IS_ERR(acts))
1074 goto error_free_flow;
1075 rcu_assign_pointer(flow->sf_acts, acts);
1076
1077 /* Put flow in bucket. */
8d5ebd83 1078 error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
6fa58f7a
BP
1079 if (error)
1080 goto error_free_flow_acts;
8d5ebd83 1081
44e05eca 1082 memset(stats, 0, sizeof(struct odp_flow_stats));
064af421
BP
1083 } else {
1084 /* We found a matching flow. */
064af421 1085 struct sw_flow_actions *old_acts, *new_acts;
064af421 1086
8d5ebd83
JG
1087 flow = flow_cast(flow_node);
1088
064af421
BP
1089 /* Bail out if we're not allowed to modify an existing flow. */
1090 error = -EEXIST;
44e05eca 1091 if (!(uf->flags & ODPPF_MODIFY))
064af421
BP
1092 goto error;
1093
1094 /* Swap actions. */
44e05eca 1095 new_acts = get_actions(&uf->flow);
064af421
BP
1096 error = PTR_ERR(new_acts);
1097 if (IS_ERR(new_acts))
1098 goto error;
1099 old_acts = rcu_dereference(flow->sf_acts);
1100 if (old_acts->n_actions != new_acts->n_actions ||
1101 memcmp(old_acts->actions, new_acts->actions,
1102 sizeof(union odp_action) * old_acts->n_actions)) {
1103 rcu_assign_pointer(flow->sf_acts, new_acts);
1104 flow_deferred_free_acts(old_acts);
1105 } else {
1106 kfree(new_acts);
1107 }
1108
1109 /* Fetch stats, then clear them if necessary. */
1d7241c7 1110 spin_lock_bh(&flow->lock);
6bfafa55 1111 get_stats(flow, stats, get_time_offset());
44e05eca 1112 if (uf->flags & ODPPF_ZERO_STATS)
064af421 1113 clear_stats(flow);
1d7241c7 1114 spin_unlock_bh(&flow->lock);
064af421
BP
1115 }
1116
064af421
BP
1117 return 0;
1118
6fa58f7a
BP
1119error_free_flow_acts:
1120 kfree(flow->sf_acts);
064af421 1121error_free_flow:
fb8c9347
JG
1122 flow->sf_acts = NULL;
1123 flow_put(flow);
064af421
BP
1124error:
1125 return error;
1126}
1127
44e05eca
BP
1128static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
1129{
1130 struct odp_flow_stats stats;
1131 struct odp_flow_put uf;
1132 int error;
1133
1134 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
1135 return -EFAULT;
1136
1137 error = do_put_flow(dp, &uf, &stats);
1138 if (error)
1139 return error;
1140
776f10ce
BP
1141 if (copy_to_user(&ufp->flow.stats, &stats,
1142 sizeof(struct odp_flow_stats)))
44e05eca
BP
1143 return -EFAULT;
1144
1145 return 0;
1146}
1147
1148static int do_answer_query(struct sw_flow *flow, u32 query_flags,
6bfafa55 1149 struct timespec time_offset,
44e05eca
BP
1150 struct odp_flow_stats __user *ustats,
1151 union odp_action __user *actions,
1152 u32 __user *n_actionsp)
064af421 1153{
064af421 1154 struct sw_flow_actions *sf_acts;
44e05eca 1155 struct odp_flow_stats stats;
064af421
BP
1156 u32 n_actions;
1157
1d7241c7 1158 spin_lock_bh(&flow->lock);
6bfafa55 1159 get_stats(flow, &stats, time_offset);
1d7241c7 1160 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
44e05eca 1161 flow->tcp_flags = 0;
1d7241c7
JG
1162
1163 spin_unlock_bh(&flow->lock);
44e05eca 1164
776f10ce
BP
1165 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
1166 get_user(n_actions, n_actionsp))
064af421
BP
1167 return -EFAULT;
1168
1169 if (!n_actions)
1170 return 0;
064af421
BP
1171
1172 sf_acts = rcu_dereference(flow->sf_acts);
776f10ce 1173 if (put_user(sf_acts->n_actions, n_actionsp) ||
064af421
BP
1174 (actions && copy_to_user(actions, sf_acts->actions,
1175 sizeof(union odp_action) *
1176 min(sf_acts->n_actions, n_actions))))
1177 return -EFAULT;
1178
1179 return 0;
1180}
1181
18fdbe16 1182static int answer_query(struct sw_flow *flow, u32 query_flags,
6bfafa55 1183 struct timespec time_offset,
18fdbe16 1184 struct odp_flow __user *ufp)
064af421 1185{
44e05eca 1186 union odp_action *actions;
064af421 1187
776f10ce 1188 if (get_user(actions, &ufp->actions))
064af421 1189 return -EFAULT;
44e05eca 1190
6bfafa55 1191 return do_answer_query(flow, query_flags, time_offset,
44e05eca 1192 &ufp->stats, actions, &ufp->n_actions);
064af421
BP
1193}
1194
44e05eca 1195static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
064af421 1196{
8d5ebd83 1197 struct tbl *table = rcu_dereference(dp->table);
8d5ebd83 1198 struct tbl_node *flow_node;
064af421
BP
1199 int error;
1200
44e05eca 1201 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
8d5ebd83 1202 if (!flow_node)
44e05eca 1203 return ERR_PTR(-ENOENT);
064af421 1204
8d5ebd83 1205 error = tbl_remove(table, flow_node);
f1aa2072 1206 if (error)
44e05eca 1207 return ERR_PTR(error);
064af421 1208
44e05eca
BP
1209 /* XXX Returned flow_node's statistics might lose a few packets, since
1210 * other CPUs can be using this flow. We used to synchronize_rcu() to
1211 * make sure that we get completely accurate stats, but that blows our
1212 * performance, badly. */
1213 return flow_cast(flow_node);
1214}
1215
1216static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1217{
1218 struct sw_flow *flow;
1219 struct odp_flow uf;
1220 int error;
1221
1222 if (copy_from_user(&uf, ufp, sizeof uf))
1223 return -EFAULT;
1224
1225 flow = do_del_flow(dp, &uf.key);
1226 if (IS_ERR(flow))
1227 return PTR_ERR(flow);
8d5ebd83 1228
6bfafa55 1229 error = answer_query(flow, 0, get_time_offset(), ufp);
f1aa2072 1230 flow_deferred_free(flow);
064af421
BP
1231 return error;
1232}
1233
44e05eca 1234static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
064af421 1235{
8d5ebd83 1236 struct tbl *table = rcu_dereference(dp->table);
6bfafa55 1237 struct timespec time_offset;
6d7568dc
BP
1238 u32 i;
1239
6bfafa55
JG
1240 time_offset = get_time_offset();
1241
064af421 1242 for (i = 0; i < flowvec->n_flows; i++) {
44e05eca 1243 struct odp_flow __user *ufp = &flowvec->flows[i];
064af421 1244 struct odp_flow uf;
8d5ebd83 1245 struct tbl_node *flow_node;
064af421
BP
1246 int error;
1247
776f10ce 1248 if (copy_from_user(&uf, ufp, sizeof uf))
064af421 1249 return -EFAULT;
064af421 1250
8d5ebd83
JG
1251 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1252 if (!flow_node)
776f10ce 1253 error = put_user(ENOENT, &ufp->stats.error);
064af421 1254 else
6bfafa55 1255 error = answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp);
064af421
BP
1256 if (error)
1257 return -EFAULT;
1258 }
1259 return flowvec->n_flows;
1260}
1261
1262struct list_flows_cbdata {
1263 struct odp_flow __user *uflows;
6d7568dc
BP
1264 u32 n_flows;
1265 u32 listed_flows;
6bfafa55 1266 struct timespec time_offset;
064af421
BP
1267};
1268
8d5ebd83 1269static int list_flow(struct tbl_node *node, void *cbdata_)
064af421 1270{
8d5ebd83 1271 struct sw_flow *flow = flow_cast(node);
064af421
BP
1272 struct list_flows_cbdata *cbdata = cbdata_;
1273 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1274 int error;
1275
776f10ce 1276 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
064af421 1277 return -EFAULT;
6bfafa55 1278 error = answer_query(flow, 0, cbdata->time_offset, ufp);
064af421
BP
1279 if (error)
1280 return error;
1281
1282 if (cbdata->listed_flows >= cbdata->n_flows)
1283 return cbdata->listed_flows;
1284 return 0;
1285}
1286
44e05eca 1287static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
064af421
BP
1288{
1289 struct list_flows_cbdata cbdata;
1290 int error;
1291
1292 if (!flowvec->n_flows)
1293 return 0;
1294
1295 cbdata.uflows = flowvec->flows;
1296 cbdata.n_flows = flowvec->n_flows;
1297 cbdata.listed_flows = 0;
6bfafa55
JG
1298 cbdata.time_offset = get_time_offset();
1299
8d5ebd83 1300 error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
064af421
BP
1301 return error ? error : cbdata.listed_flows;
1302}
1303
1304static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1305 int (*function)(struct datapath *,
1306 const struct odp_flowvec *))
1307{
1308 struct odp_flowvec __user *uflowvec;
1309 struct odp_flowvec flowvec;
1310 int retval;
1311
1312 uflowvec = (struct odp_flowvec __user *)argp;
776f10ce 1313 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
064af421
BP
1314 return -EFAULT;
1315
1316 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1317 return -EINVAL;
1318
064af421
BP
1319 retval = function(dp, &flowvec);
1320 return (retval < 0 ? retval
1321 : retval == flowvec.n_flows ? 0
776f10ce 1322 : put_user(retval, &uflowvec->n_flows));
064af421
BP
1323}
1324
44e05eca 1325static int do_execute(struct datapath *dp, const struct odp_execute *execute)
064af421 1326{
064af421
BP
1327 struct odp_flow_key key;
1328 struct sk_buff *skb;
1329 struct sw_flow_actions *actions;
a393b897 1330 struct ethhdr *eth;
b7a31ec1 1331 bool is_frag;
064af421
BP
1332 int err;
1333
064af421 1334 err = -EINVAL;
44e05eca 1335 if (execute->length < ETH_HLEN || execute->length > 65535)
064af421
BP
1336 goto error;
1337
44e05eca 1338 actions = flow_actions_alloc(execute->n_actions);
8ba1fd2f
JG
1339 if (IS_ERR(actions)) {
1340 err = PTR_ERR(actions);
064af421 1341 goto error;
8ba1fd2f 1342 }
064af421
BP
1343
1344 err = -EFAULT;
44e05eca
BP
1345 if (copy_from_user(actions->actions, execute->actions,
1346 execute->n_actions * sizeof *execute->actions))
064af421
BP
1347 goto error_free_actions;
1348
1349 err = validate_actions(actions);
1350 if (err)
1351 goto error_free_actions;
1352
1353 err = -ENOMEM;
44e05eca 1354 skb = alloc_skb(execute->length, GFP_KERNEL);
064af421
BP
1355 if (!skb)
1356 goto error_free_actions;
659586ef 1357
064af421 1358 err = -EFAULT;
44e05eca
BP
1359 if (copy_from_user(skb_put(skb, execute->length), execute->data,
1360 execute->length))
064af421
BP
1361 goto error_free_skb;
1362
a393b897
JP
1363 skb_reset_mac_header(skb);
1364 eth = eth_hdr(skb);
1365
de3f65ea
JP
1366 /* Normally, setting the skb 'protocol' field would be handled by a
1367 * call to eth_type_trans(), but it assumes there's a sending
1368 * device, which we may not have. */
a393b897
JP
1369 if (ntohs(eth->h_proto) >= 1536)
1370 skb->protocol = eth->h_proto;
1371 else
1372 skb->protocol = htons(ETH_P_802_2);
1373
f1588b1f 1374 err = flow_extract(skb, -1, &key, &is_frag);
4c1ad233
BP
1375 if (err)
1376 goto error_free_skb;
9dca7bd5
JG
1377
1378 rcu_read_lock();
7956695a 1379 err = execute_actions(dp, skb, &key, actions->actions, actions->n_actions);
9dca7bd5
JG
1380 rcu_read_unlock();
1381
064af421
BP
1382 kfree(actions);
1383 return err;
1384
1385error_free_skb:
1386 kfree_skb(skb);
1387error_free_actions:
1388 kfree(actions);
1389error:
1390 return err;
1391}
1392
44e05eca
BP
1393static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1394{
1395 struct odp_execute execute;
1396
1397 if (copy_from_user(&execute, executep, sizeof execute))
1398 return -EFAULT;
1399
1400 return do_execute(dp, &execute);
1401}
1402
16190191 1403static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
064af421 1404{
8d5ebd83 1405 struct tbl *table = rcu_dereference(dp->table);
064af421
BP
1406 struct odp_stats stats;
1407 int i;
1408
8d5ebd83
JG
1409 stats.n_flows = tbl_count(table);
1410 stats.cur_capacity = tbl_n_buckets(table);
1411 stats.max_capacity = TBL_MAX_BUCKETS;
064af421
BP
1412 stats.n_ports = dp->n_ports;
1413 stats.max_ports = DP_MAX_PORTS;
064af421
BP
1414 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1415 for_each_possible_cpu(i) {
38c6ecbc
JG
1416 const struct dp_stats_percpu *percpu_stats;
1417 struct dp_stats_percpu local_stats;
1418 unsigned seqcount;
1419
1420 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1421
1422 do {
1423 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1424 local_stats = *percpu_stats;
1425 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1426
1427 stats.n_frags += local_stats.n_frags;
1428 stats.n_hit += local_stats.n_hit;
1429 stats.n_missed += local_stats.n_missed;
1430 stats.n_lost += local_stats.n_lost;
064af421
BP
1431 }
1432 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1433 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1434 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1435}
1436
1dcf111b
JP
1437/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1438int dp_min_mtu(const struct datapath *dp)
1439{
f2459fe7 1440 struct dp_port *p;
1dcf111b
JP
1441 int mtu = 0;
1442
1443 ASSERT_RTNL();
1444
1445 list_for_each_entry_rcu (p, &dp->port_list, node) {
f2459fe7 1446 int dev_mtu;
1dcf111b
JP
1447
1448 /* Skip any internal ports, since that's what we're trying to
1449 * set. */
f2459fe7 1450 if (is_internal_vport(p->vport))
1dcf111b
JP
1451 continue;
1452
f2459fe7
JG
1453 dev_mtu = vport_get_mtu(p->vport);
1454 if (!mtu || dev_mtu < mtu)
1455 mtu = dev_mtu;
1dcf111b
JP
1456 }
1457
1458 return mtu ? mtu : ETH_DATA_LEN;
1459}
1460
f2459fe7 1461/* Sets the MTU of all datapath devices to the minimum of the ports. Must
d8b5d43a 1462 * be called with RTNL lock. */
f2459fe7 1463void set_internal_devs_mtu(const struct datapath *dp)
a7786963 1464{
f2459fe7 1465 struct dp_port *p;
a7786963
JG
1466 int mtu;
1467
1468 ASSERT_RTNL();
1469
a7786963
JG
1470 mtu = dp_min_mtu(dp);
1471
1472 list_for_each_entry_rcu (p, &dp->port_list, node) {
f2459fe7
JG
1473 if (is_internal_vport(p->vport))
1474 vport_set_mtu(p->vport, mtu);
a7786963
JG
1475 }
1476}
1477
fceb2a5b 1478static int put_port(const struct dp_port *p, struct odp_port __user *uop)
064af421
BP
1479{
1480 struct odp_port op;
f2459fe7 1481
064af421 1482 memset(&op, 0, sizeof op);
f2459fe7
JG
1483
1484 rcu_read_lock();
1485 strncpy(op.devname, vport_get_name(p->vport), sizeof op.devname);
1486 rcu_read_unlock();
1487
064af421 1488 op.port = p->port_no;
f2459fe7
JG
1489 op.flags = is_internal_vport(p->vport) ? ODP_PORT_INTERNAL : 0;
1490
064af421
BP
1491 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1492}
1493
fceb2a5b 1494static int query_port(struct datapath *dp, struct odp_port __user *uport)
064af421
BP
1495{
1496 struct odp_port port;
1497
1498 if (copy_from_user(&port, uport, sizeof port))
1499 return -EFAULT;
f2459fe7 1500
064af421 1501 if (port.devname[0]) {
f2459fe7
JG
1502 struct vport *vport;
1503 struct dp_port *dp_port;
1504 int err = 0;
064af421
BP
1505
1506 port.devname[IFNAMSIZ - 1] = '\0';
1507
f2459fe7
JG
1508 vport_lock();
1509 rcu_read_lock();
064af421 1510
f2459fe7
JG
1511 vport = vport_locate(port.devname);
1512 if (!vport) {
1513 err = -ENODEV;
1514 goto error_unlock;
064af421 1515 }
064af421 1516
f2459fe7
JG
1517 dp_port = vport_get_dp_port(vport);
1518 if (!dp_port || dp_port->dp != dp) {
1519 err = -ENOENT;
1520 goto error_unlock;
1521 }
1522
1523 port.port = dp_port->port_no;
1524
1525error_unlock:
1526 rcu_read_unlock();
1527 vport_unlock();
1528
1529 if (err)
1530 return err;
064af421
BP
1531 } else {
1532 if (port.port >= DP_MAX_PORTS)
1533 return -EINVAL;
1534 if (!dp->ports[port.port])
1535 return -ENOENT;
064af421 1536 }
f2459fe7
JG
1537
1538 return put_port(dp->ports[port.port], uport);
064af421
BP
1539}
1540
fceb2a5b
JG
1541static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
1542 int n_ports)
064af421 1543{
44e05eca
BP
1544 int idx = 0;
1545 if (n_ports) {
1546 struct dp_port *p;
064af421 1547
064af421 1548 list_for_each_entry_rcu (p, &dp->port_list, node) {
44e05eca 1549 if (put_port(p, &uports[idx]))
064af421 1550 return -EFAULT;
44e05eca 1551 if (idx++ >= n_ports)
064af421
BP
1552 break;
1553 }
1554 }
44e05eca
BP
1555 return idx;
1556}
1557
fceb2a5b 1558static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
44e05eca
BP
1559{
1560 struct odp_portvec pv;
1561 int retval;
1562
1563 if (copy_from_user(&pv, upv, sizeof pv))
1564 return -EFAULT;
1565
1566 retval = do_list_ports(dp, pv.ports, pv.n_ports);
1567 if (retval < 0)
1568 return retval;
1569
1570 return put_user(retval, &upv->n_ports);
064af421
BP
1571}
1572
7c40efc9
BP
1573static int get_listen_mask(const struct file *f)
1574{
1575 return (long)f->private_data;
1576}
1577
1578static void set_listen_mask(struct file *f, int listen_mask)
1579{
1580 f->private_data = (void*)(long)listen_mask;
1581}
1582
064af421
BP
1583static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1584 unsigned long argp)
1585{
1586 int dp_idx = iminor(f->f_dentry->d_inode);
1587 struct datapath *dp;
1588 int drop_frags, listeners, port_no;
72b06300 1589 unsigned int sflow_probability;
064af421
BP
1590 int err;
1591
1592 /* Handle commands with special locking requirements up front. */
1593 switch (cmd) {
1594 case ODP_DP_CREATE:
e86c8696
BP
1595 err = create_dp(dp_idx, (char __user *)argp);
1596 goto exit;
064af421
BP
1597
1598 case ODP_DP_DESTROY:
e86c8696
BP
1599 err = destroy_dp(dp_idx);
1600 goto exit;
064af421 1601
f2459fe7
JG
1602 case ODP_PORT_ATTACH:
1603 err = attach_port(dp_idx, (struct odp_port __user *)argp);
e86c8696 1604 goto exit;
064af421 1605
f2459fe7 1606 case ODP_PORT_DETACH:
064af421 1607 err = get_user(port_no, (int __user *)argp);
e86c8696 1608 if (!err)
f2459fe7
JG
1609 err = detach_port(dp_idx, port_no);
1610 goto exit;
1611
1612 case ODP_VPORT_ADD:
61e89cd6 1613 err = vport_user_add((struct odp_vport_add __user *)argp);
f2459fe7
JG
1614 goto exit;
1615
1616 case ODP_VPORT_MOD:
61e89cd6 1617 err = vport_user_mod((struct odp_vport_mod __user *)argp);
f2459fe7
JG
1618 goto exit;
1619
1620 case ODP_VPORT_DEL:
61e89cd6 1621 err = vport_user_del((char __user *)argp);
f2459fe7
JG
1622 goto exit;
1623
1624 case ODP_VPORT_STATS_GET:
61e89cd6 1625 err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
f2459fe7
JG
1626 goto exit;
1627
780e6207
JG
1628 case ODP_VPORT_STATS_SET:
1629 err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
1630 goto exit;
1631
f2459fe7 1632 case ODP_VPORT_ETHER_GET:
61e89cd6 1633 err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
f2459fe7
JG
1634 goto exit;
1635
1636 case ODP_VPORT_ETHER_SET:
61e89cd6 1637 err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
f2459fe7
JG
1638 goto exit;
1639
1640 case ODP_VPORT_MTU_GET:
61e89cd6 1641 err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
f2459fe7
JG
1642 goto exit;
1643
1644 case ODP_VPORT_MTU_SET:
61e89cd6 1645 err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
e86c8696 1646 goto exit;
064af421
BP
1647 }
1648
1649 dp = get_dp_locked(dp_idx);
e86c8696 1650 err = -ENODEV;
064af421 1651 if (!dp)
e86c8696 1652 goto exit;
064af421
BP
1653
1654 switch (cmd) {
1655 case ODP_DP_STATS:
1656 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1657 break;
1658
1659 case ODP_GET_DROP_FRAGS:
1660 err = put_user(dp->drop_frags, (int __user *)argp);
1661 break;
1662
1663 case ODP_SET_DROP_FRAGS:
1664 err = get_user(drop_frags, (int __user *)argp);
1665 if (err)
1666 break;
1667 err = -EINVAL;
1668 if (drop_frags != 0 && drop_frags != 1)
1669 break;
1670 dp->drop_frags = drop_frags;
1671 err = 0;
1672 break;
1673
1674 case ODP_GET_LISTEN_MASK:
7c40efc9 1675 err = put_user(get_listen_mask(f), (int __user *)argp);
064af421
BP
1676 break;
1677
1678 case ODP_SET_LISTEN_MASK:
1679 err = get_user(listeners, (int __user *)argp);
1680 if (err)
1681 break;
1682 err = -EINVAL;
1683 if (listeners & ~ODPL_ALL)
1684 break;
1685 err = 0;
7c40efc9 1686 set_listen_mask(f, listeners);
064af421
BP
1687 break;
1688
72b06300
BP
1689 case ODP_GET_SFLOW_PROBABILITY:
1690 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1691 break;
1692
1693 case ODP_SET_SFLOW_PROBABILITY:
1694 err = get_user(sflow_probability, (unsigned int __user *)argp);
1695 if (!err)
1696 dp->sflow_probability = sflow_probability;
1697 break;
1698
064af421
BP
1699 case ODP_PORT_QUERY:
1700 err = query_port(dp, (struct odp_port __user *)argp);
1701 break;
1702
1703 case ODP_PORT_LIST:
1704 err = list_ports(dp, (struct odp_portvec __user *)argp);
1705 break;
1706
064af421
BP
1707 case ODP_FLOW_FLUSH:
1708 err = flush_flows(dp);
1709 break;
1710
1711 case ODP_FLOW_PUT:
1712 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1713 break;
1714
1715 case ODP_FLOW_DEL:
f1aa2072 1716 err = del_flow(dp, (struct odp_flow __user *)argp);
064af421
BP
1717 break;
1718
f1aa2072 1719 case ODP_FLOW_GET:
44e05eca 1720 err = do_flowvec_ioctl(dp, argp, do_query_flows);
064af421
BP
1721 break;
1722
1723 case ODP_FLOW_LIST:
44e05eca 1724 err = do_flowvec_ioctl(dp, argp, do_list_flows);
064af421
BP
1725 break;
1726
1727 case ODP_EXECUTE:
44e05eca 1728 err = execute_packet(dp, (struct odp_execute __user *)argp);
064af421
BP
1729 break;
1730
1731 default:
1732 err = -ENOIOCTLCMD;
1733 break;
1734 }
1735 mutex_unlock(&dp->mutex);
e86c8696 1736exit:
064af421
BP
1737 return err;
1738}
1739
1740static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1741{
1742 int i;
1743 for (i = 0; i < DP_N_QUEUES; i++) {
1744 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1745 return 1;
1746 }
1747 return 0;
1748}
1749
3fbd517a
BP
1750#ifdef CONFIG_COMPAT
1751static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
1752{
1753 struct compat_odp_portvec pv;
1754 int retval;
1755
1756 if (copy_from_user(&pv, upv, sizeof pv))
1757 return -EFAULT;
1758
1759 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1760 if (retval < 0)
1761 return retval;
1762
1763 return put_user(retval, &upv->n_ports);
1764}
1765
3fbd517a
BP
1766static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1767{
1768 compat_uptr_t actions;
1769
1770 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1771 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1772 __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
1773 __get_user(actions, &compat->actions) ||
1774 __get_user(flow->n_actions, &compat->n_actions) ||
1775 __get_user(flow->flags, &compat->flags))
1776 return -EFAULT;
1777
1778 flow->actions = compat_ptr(actions);
1779 return 0;
1780}
1781
1782static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1783{
1784 struct odp_flow_stats stats;
1785 struct odp_flow_put fp;
1786 int error;
1787
1788 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1789 get_user(fp.flags, &ufp->flags))
1790 return -EFAULT;
1791
1792 error = do_put_flow(dp, &fp, &stats);
1793 if (error)
1794 return error;
1795
1796 if (copy_to_user(&ufp->flow.stats, &stats,
1797 sizeof(struct odp_flow_stats)))
1798 return -EFAULT;
1799
1800 return 0;
1801}
1802
1803static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
6bfafa55 1804 struct timespec time_offset,
3fbd517a
BP
1805 struct compat_odp_flow __user *ufp)
1806{
1807 compat_uptr_t actions;
1808
1809 if (get_user(actions, &ufp->actions))
1810 return -EFAULT;
1811
6bfafa55 1812 return do_answer_query(flow, query_flags, time_offset, &ufp->stats,
3fbd517a
BP
1813 compat_ptr(actions), &ufp->n_actions);
1814}
1815
1816static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1817{
1818 struct sw_flow *flow;
1819 struct odp_flow uf;
1820 int error;
1821
1822 if (compat_get_flow(&uf, ufp))
1823 return -EFAULT;
1824
1825 flow = do_del_flow(dp, &uf.key);
1826 if (IS_ERR(flow))
1827 return PTR_ERR(flow);
1828
6bfafa55 1829 error = compat_answer_query(flow, 0, get_time_offset(), ufp);
3fbd517a
BP
1830 flow_deferred_free(flow);
1831 return error;
1832}
1833
1834static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
1835{
1836 struct tbl *table = rcu_dereference(dp->table);
6bfafa55 1837 struct timespec time_offset;
3fbd517a
BP
1838 u32 i;
1839
6bfafa55
JG
1840 time_offset = get_time_offset();
1841
3fbd517a
BP
1842 for (i = 0; i < n_flows; i++) {
1843 struct compat_odp_flow __user *ufp = &flows[i];
1844 struct odp_flow uf;
1845 struct tbl_node *flow_node;
1846 int error;
1847
1848 if (compat_get_flow(&uf, ufp))
1849 return -EFAULT;
1850 memset(uf.key.reserved, 0, sizeof uf.key.reserved);
1851
1852 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1853 if (!flow_node)
1854 error = put_user(ENOENT, &ufp->stats.error);
1855 else
6bfafa55 1856 error = compat_answer_query(flow_cast(flow_node), uf.flags, time_offset, ufp);
3fbd517a
BP
1857 if (error)
1858 return -EFAULT;
1859 }
1860 return n_flows;
1861}
1862
1863struct compat_list_flows_cbdata {
1864 struct compat_odp_flow __user *uflows;
1865 u32 n_flows;
1866 u32 listed_flows;
6bfafa55 1867 struct timespec time_offset;
3fbd517a
BP
1868};
1869
1870static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1871{
1872 struct sw_flow *flow = flow_cast(node);
1873 struct compat_list_flows_cbdata *cbdata = cbdata_;
1874 struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1875 int error;
1876
1877 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1878 return -EFAULT;
6bfafa55 1879 error = compat_answer_query(flow, 0, cbdata->time_offset, ufp);
3fbd517a
BP
1880 if (error)
1881 return error;
1882
1883 if (cbdata->listed_flows >= cbdata->n_flows)
1884 return cbdata->listed_flows;
1885 return 0;
1886}
1887
1888static int compat_list_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
1889{
1890 struct compat_list_flows_cbdata cbdata;
1891 int error;
1892
1893 if (!n_flows)
1894 return 0;
1895
1896 cbdata.uflows = flows;
1897 cbdata.n_flows = n_flows;
1898 cbdata.listed_flows = 0;
6bfafa55
JG
1899 cbdata.time_offset = get_time_offset();
1900
3fbd517a
BP
1901 error = tbl_foreach(rcu_dereference(dp->table), compat_list_flow, &cbdata);
1902 return error ? error : cbdata.listed_flows;
1903}
1904
1905static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1906 int (*function)(struct datapath *,
1907 struct compat_odp_flow *,
1908 u32 n_flows))
1909{
1910 struct compat_odp_flowvec __user *uflowvec;
1911 struct compat_odp_flow __user *flows;
1912 struct compat_odp_flowvec flowvec;
1913 int retval;
1914
1915 uflowvec = compat_ptr(argp);
1916 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1917 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1918 return -EFAULT;
1919
1920 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1921 return -EINVAL;
1922
1923 flows = compat_ptr(flowvec.flows);
1924 if (!access_ok(VERIFY_WRITE, flows,
1925 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1926 return -EFAULT;
1927
1928 retval = function(dp, flows, flowvec.n_flows);
1929 return (retval < 0 ? retval
1930 : retval == flowvec.n_flows ? 0
1931 : put_user(retval, &uflowvec->n_flows));
1932}
1933
1934static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1935{
1936 struct odp_execute execute;
1937 compat_uptr_t actions;
1938 compat_uptr_t data;
1939
1940 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
3fbd517a
BP
1941 __get_user(actions, &uexecute->actions) ||
1942 __get_user(execute.n_actions, &uexecute->n_actions) ||
1943 __get_user(data, &uexecute->data) ||
1944 __get_user(execute.length, &uexecute->length))
1945 return -EFAULT;
1946
1947 execute.actions = compat_ptr(actions);
1948 execute.data = compat_ptr(data);
1949
1950 return do_execute(dp, &execute);
1951}
1952
1953static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1954{
1955 int dp_idx = iminor(f->f_dentry->d_inode);
1956 struct datapath *dp;
1957 int err;
1958
1959 switch (cmd) {
1960 case ODP_DP_DESTROY:
1961 case ODP_FLOW_FLUSH:
1962 /* Ioctls that don't need any translation at all. */
1963 return openvswitch_ioctl(f, cmd, argp);
1964
1965 case ODP_DP_CREATE:
1966 case ODP_PORT_ATTACH:
1967 case ODP_PORT_DETACH:
1968 case ODP_VPORT_DEL:
1969 case ODP_VPORT_MTU_SET:
1970 case ODP_VPORT_MTU_GET:
1971 case ODP_VPORT_ETHER_SET:
1972 case ODP_VPORT_ETHER_GET:
780e6207 1973 case ODP_VPORT_STATS_SET:
3fbd517a
BP
1974 case ODP_VPORT_STATS_GET:
1975 case ODP_DP_STATS:
1976 case ODP_GET_DROP_FRAGS:
1977 case ODP_SET_DROP_FRAGS:
1978 case ODP_SET_LISTEN_MASK:
1979 case ODP_GET_LISTEN_MASK:
1980 case ODP_SET_SFLOW_PROBABILITY:
1981 case ODP_GET_SFLOW_PROBABILITY:
1982 case ODP_PORT_QUERY:
1983 /* Ioctls that just need their pointer argument extended. */
1984 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
1985
1986 case ODP_VPORT_ADD32:
61e89cd6 1987 return compat_vport_user_add(compat_ptr(argp));
3fbd517a
BP
1988
1989 case ODP_VPORT_MOD32:
61e89cd6 1990 return compat_vport_user_mod(compat_ptr(argp));
3fbd517a
BP
1991 }
1992
1993 dp = get_dp_locked(dp_idx);
1994 err = -ENODEV;
1995 if (!dp)
1996 goto exit;
1997
1998 switch (cmd) {
1999 case ODP_PORT_LIST32:
2000 err = compat_list_ports(dp, compat_ptr(argp));
2001 break;
2002
3fbd517a
BP
2003 case ODP_FLOW_PUT32:
2004 err = compat_put_flow(dp, compat_ptr(argp));
2005 break;
2006
2007 case ODP_FLOW_DEL32:
2008 err = compat_del_flow(dp, compat_ptr(argp));
2009 break;
2010
2011 case ODP_FLOW_GET32:
2012 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
2013 break;
2014
2015 case ODP_FLOW_LIST32:
2016 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
2017 break;
2018
2019 case ODP_EXECUTE32:
2020 err = compat_execute(dp, compat_ptr(argp));
2021 break;
2022
2023 default:
2024 err = -ENOIOCTLCMD;
2025 break;
2026 }
2027 mutex_unlock(&dp->mutex);
2028exit:
2029 return err;
2030}
2031#endif
2032
9cc8b4e4
JG
2033/* Unfortunately this function is not exported so this is a verbatim copy
2034 * from net/core/datagram.c in 2.6.30. */
2035static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
2036 u8 __user *to, int len,
2037 __wsum *csump)
2038{
2039 int start = skb_headlen(skb);
2040 int pos = 0;
2041 int i, copy = start - offset;
2042
2043 /* Copy header. */
2044 if (copy > 0) {
2045 int err = 0;
2046 if (copy > len)
2047 copy = len;
2048 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
2049 *csump, &err);
2050 if (err)
2051 goto fault;
2052 if ((len -= copy) == 0)
2053 return 0;
2054 offset += copy;
2055 to += copy;
2056 pos = copy;
2057 }
2058
2059 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2060 int end;
2061
2062 WARN_ON(start > offset + len);
2063
2064 end = start + skb_shinfo(skb)->frags[i].size;
2065 if ((copy = end - offset) > 0) {
2066 __wsum csum2;
2067 int err = 0;
2068 u8 *vaddr;
2069 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2070 struct page *page = frag->page;
2071
2072 if (copy > len)
2073 copy = len;
2074 vaddr = kmap(page);
2075 csum2 = csum_and_copy_to_user(vaddr +
2076 frag->page_offset +
2077 offset - start,
2078 to, copy, 0, &err);
2079 kunmap(page);
2080 if (err)
2081 goto fault;
2082 *csump = csum_block_add(*csump, csum2, pos);
2083 if (!(len -= copy))
2084 return 0;
2085 offset += copy;
2086 to += copy;
2087 pos += copy;
2088 }
2089 start = end;
2090 }
2091
2092 if (skb_shinfo(skb)->frag_list) {
2093 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2094
2095 for (; list; list=list->next) {
2096 int end;
2097
2098 WARN_ON(start > offset + len);
2099
2100 end = start + list->len;
2101 if ((copy = end - offset) > 0) {
2102 __wsum csum2 = 0;
2103 if (copy > len)
2104 copy = len;
2105 if (skb_copy_and_csum_datagram(list,
2106 offset - start,
2107 to, copy,
2108 &csum2))
2109 goto fault;
2110 *csump = csum_block_add(*csump, csum2, pos);
2111 if ((len -= copy) == 0)
2112 return 0;
2113 offset += copy;
2114 to += copy;
2115 pos += copy;
2116 }
2117 start = end;
2118 }
2119 }
2120 if (!len)
2121 return 0;
2122
2123fault:
2124 return -EFAULT;
2125}
2126
064af421
BP
2127ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes,
2128 loff_t *ppos)
2129{
6fba0d0b 2130 /* XXX is there sufficient synchronization here? */
7c40efc9 2131 int listeners = get_listen_mask(f);
064af421
BP
2132 int dp_idx = iminor(f->f_dentry->d_inode);
2133 struct datapath *dp = get_dp(dp_idx);
2134 struct sk_buff *skb;
9cc8b4e4 2135 size_t copy_bytes, tot_copy_bytes;
064af421
BP
2136 int retval;
2137
2138 if (!dp)
2139 return -ENODEV;
2140
2141 if (nbytes == 0 || !listeners)
2142 return 0;
2143
2144 for (;;) {
2145 int i;
2146
2147 for (i = 0; i < DP_N_QUEUES; i++) {
2148 if (listeners & (1 << i)) {
2149 skb = skb_dequeue(&dp->queues[i]);
2150 if (skb)
2151 goto success;
2152 }
2153 }
2154
2155 if (f->f_flags & O_NONBLOCK) {
2156 retval = -EAGAIN;
2157 goto error;
2158 }
2159
2160 wait_event_interruptible(dp->waitqueue,
2161 dp_has_packet_of_interest(dp,
2162 listeners));
2163
2164 if (signal_pending(current)) {
2165 retval = -ERESTARTSYS;
2166 goto error;
2167 }
2168 }
2169success:
9cc8b4e4 2170 copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
d295e8e9 2171
9cc8b4e4
JG
2172 retval = 0;
2173 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9fc10ed9
JG
2174 if (copy_bytes == skb->len) {
2175 __wsum csum = 0;
1336993c 2176 unsigned int csum_start, csum_offset;
9cc8b4e4 2177
9cc8b4e4 2178#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
f057cdda 2179 csum_start = skb->csum_start - skb_headroom(skb);
9fc10ed9 2180 csum_offset = skb->csum_offset;
9cc8b4e4 2181#else
f057cdda 2182 csum_start = skb_transport_header(skb) - skb->data;
9fc10ed9 2183 csum_offset = skb->csum;
9cc8b4e4 2184#endif
f057cdda 2185 BUG_ON(csum_start >= skb_headlen(skb));
9cc8b4e4
JG
2186 retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
2187 copy_bytes - csum_start, &csum);
9cc8b4e4
JG
2188 if (!retval) {
2189 __sum16 __user *csump;
2190
2191 copy_bytes = csum_start;
2192 csump = (__sum16 __user *)(buf + csum_start + csum_offset);
f057cdda
JG
2193
2194 BUG_ON((char *)csump + sizeof(__sum16) > buf + nbytes);
9cc8b4e4
JG
2195 put_user(csum_fold(csum), csump);
2196 }
9fc10ed9
JG
2197 } else
2198 retval = skb_checksum_help(skb);
9cc8b4e4
JG
2199 }
2200
2201 if (!retval) {
2202 struct iovec __user iov;
2203
2204 iov.iov_base = buf;
2205 iov.iov_len = copy_bytes;
2206 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
2207 }
2208
064af421 2209 if (!retval)
9cc8b4e4
JG
2210 retval = tot_copy_bytes;
2211
064af421
BP
2212 kfree_skb(skb);
2213
2214error:
2215 return retval;
2216}
2217
2218static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
2219{
6fba0d0b 2220 /* XXX is there sufficient synchronization here? */
064af421
BP
2221 int dp_idx = iminor(file->f_dentry->d_inode);
2222 struct datapath *dp = get_dp(dp_idx);
2223 unsigned int mask;
2224
2225 if (dp) {
2226 mask = 0;
2227 poll_wait(file, &dp->waitqueue, wait);
7c40efc9 2228 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
064af421
BP
2229 mask |= POLLIN | POLLRDNORM;
2230 } else {
2231 mask = POLLIN | POLLRDNORM | POLLHUP;
2232 }
2233 return mask;
2234}
2235
2236struct file_operations openvswitch_fops = {
2237 /* XXX .aio_read = openvswitch_aio_read, */
2238 .read = openvswitch_read,
2239 .poll = openvswitch_poll,
2240 .unlocked_ioctl = openvswitch_ioctl,
3fbd517a
BP
2241#ifdef CONFIG_COMPAT
2242 .compat_ioctl = openvswitch_compat_ioctl,
2243#endif
064af421
BP
2244 /* XXX .fasync = openvswitch_fasync, */
2245};
2246
2247static int major;
22d24ebf 2248
22d24ebf
BP
2249static int __init dp_init(void)
2250{
f2459fe7 2251 struct sk_buff *dummy_skb;
22d24ebf
BP
2252 int err;
2253
f2459fe7 2254 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
22d24ebf 2255
f2459fe7 2256 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
064af421
BP
2257
2258 err = flow_init();
2259 if (err)
2260 goto error;
2261
f2459fe7 2262 err = vport_init();
064af421
BP
2263 if (err)
2264 goto error_flow_exit;
2265
f2459fe7
JG
2266 err = register_netdevice_notifier(&dp_device_notifier);
2267 if (err)
2268 goto error_vport_exit;
2269
064af421
BP
2270 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2271 if (err < 0)
2272 goto error_unreg_notifier;
2273
064af421
BP
2274 return 0;
2275
2276error_unreg_notifier:
2277 unregister_netdevice_notifier(&dp_device_notifier);
f2459fe7
JG
2278error_vport_exit:
2279 vport_exit();
064af421
BP
2280error_flow_exit:
2281 flow_exit();
2282error:
2283 return err;
2284}
2285
2286static void dp_cleanup(void)
2287{
2288 rcu_barrier();
2289 unregister_chrdev(major, "openvswitch");
2290 unregister_netdevice_notifier(&dp_device_notifier);
f2459fe7 2291 vport_exit();
064af421 2292 flow_exit();
064af421
BP
2293}
2294
2295module_init(dp_init);
2296module_exit(dp_cleanup);
2297
2298MODULE_DESCRIPTION("Open vSwitch switching datapath");
2299MODULE_LICENSE("GPL");