]> git.proxmox.com Git - ovs.git/blob - datapath/datapath.c
datapath: Return vport configuration when queried.
[ovs.git] / datapath / datapath.c
1 /*
2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/fs.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
18 #include <linux/in.h>
19 #include <linux/ip.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
35 #include <asm/bug.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <linux/compat.h>
45
46 #include "openvswitch/datapath-protocol.h"
47 #include "checksum.h"
48 #include "datapath.h"
49 #include "actions.h"
50 #include "flow.h"
51 #include "loop_counter.h"
52 #include "odp-compat.h"
53 #include "table.h"
54 #include "vport-internal_dev.h"
55
56 #include "compat.h"
57
58 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
59 EXPORT_SYMBOL(dp_ioctl_hook);
60
61 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
62 * by dp_mutex.
63 *
64 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
65 * lock first.
66 *
67 * It is safe to access the datapath and vport structures with just
68 * dp_mutex.
69 */
70 static struct datapath __rcu *dps[ODP_MAX];
71 static DEFINE_MUTEX(dp_mutex);
72
73 static int new_vport(struct datapath *, struct odp_port *, int port_no);
74
75 /* Must be called with rcu_read_lock or dp_mutex. */
76 struct datapath *get_dp(int dp_idx)
77 {
78 if (dp_idx < 0 || dp_idx >= ODP_MAX)
79 return NULL;
80 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
81 lockdep_is_held(&dp_mutex));
82 }
83 EXPORT_SYMBOL_GPL(get_dp);
84
85 static struct datapath *get_dp_locked(int dp_idx)
86 {
87 struct datapath *dp;
88
89 mutex_lock(&dp_mutex);
90 dp = get_dp(dp_idx);
91 if (dp)
92 mutex_lock(&dp->mutex);
93 mutex_unlock(&dp_mutex);
94 return dp;
95 }
96
97 static struct tbl *get_table_protected(struct datapath *dp)
98 {
99 return rcu_dereference_protected(dp->table, lockdep_is_held(&dp->mutex));
100 }
101
102 /* Must be called with rcu_read_lock or RTNL lock. */
103 const char *dp_name(const struct datapath *dp)
104 {
105 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
106 }
107
108 static inline size_t br_nlmsg_size(void)
109 {
110 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
111 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
112 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
113 + nla_total_size(4) /* IFLA_MASTER */
114 + nla_total_size(4) /* IFLA_MTU */
115 + nla_total_size(4) /* IFLA_LINK */
116 + nla_total_size(1); /* IFLA_OPERSTATE */
117 }
118
119 static int dp_fill_ifinfo(struct sk_buff *skb,
120 const struct vport *port,
121 int event, unsigned int flags)
122 {
123 const struct datapath *dp = port->dp;
124 int ifindex = vport_get_ifindex(port);
125 int iflink = vport_get_iflink(port);
126 struct ifinfomsg *hdr;
127 struct nlmsghdr *nlh;
128
129 if (ifindex < 0)
130 return ifindex;
131
132 if (iflink < 0)
133 return iflink;
134
135 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
136 if (nlh == NULL)
137 return -EMSGSIZE;
138
139 hdr = nlmsg_data(nlh);
140 hdr->ifi_family = AF_BRIDGE;
141 hdr->__ifi_pad = 0;
142 hdr->ifi_type = ARPHRD_ETHER;
143 hdr->ifi_index = ifindex;
144 hdr->ifi_flags = vport_get_flags(port);
145 hdr->ifi_change = 0;
146
147 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
148 NLA_PUT_U32(skb, IFLA_MASTER,
149 vport_get_ifindex(rtnl_dereference(dp->ports[ODPP_LOCAL])));
150 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
151 #ifdef IFLA_OPERSTATE
152 NLA_PUT_U8(skb, IFLA_OPERSTATE,
153 vport_is_running(port)
154 ? vport_get_operstate(port)
155 : IF_OPER_DOWN);
156 #endif
157
158 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
159
160 if (ifindex != iflink)
161 NLA_PUT_U32(skb, IFLA_LINK,iflink);
162
163 return nlmsg_end(skb, nlh);
164
165 nla_put_failure:
166 nlmsg_cancel(skb, nlh);
167 return -EMSGSIZE;
168 }
169
170 static void dp_ifinfo_notify(int event, struct vport *port)
171 {
172 struct sk_buff *skb;
173 int err = -ENOBUFS;
174
175 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
176 if (skb == NULL)
177 goto errout;
178
179 err = dp_fill_ifinfo(skb, port, event, 0);
180 if (err < 0) {
181 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
182 WARN_ON(err == -EMSGSIZE);
183 kfree_skb(skb);
184 goto errout;
185 }
186 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
187 return;
188 errout:
189 if (err < 0)
190 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
191 }
192
193 static void release_dp(struct kobject *kobj)
194 {
195 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
196 kfree(dp);
197 }
198
199 static struct kobj_type dp_ktype = {
200 .release = release_dp
201 };
202
203 static int create_dp(int dp_idx, const char __user *devnamep)
204 {
205 struct odp_port internal_dev_port;
206 char devname[IFNAMSIZ];
207 struct datapath *dp;
208 int err;
209 int i;
210
211 if (devnamep) {
212 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
213 if (retval < 0) {
214 err = -EFAULT;
215 goto err;
216 } else if (retval >= IFNAMSIZ) {
217 err = -ENAMETOOLONG;
218 goto err;
219 }
220 } else {
221 snprintf(devname, sizeof devname, "of%d", dp_idx);
222 }
223
224 rtnl_lock();
225 mutex_lock(&dp_mutex);
226 err = -ENODEV;
227 if (!try_module_get(THIS_MODULE))
228 goto err_unlock;
229
230 /* Exit early if a datapath with that number already exists.
231 * (We don't use -EEXIST because that's ambiguous with 'devname'
232 * conflicting with an existing network device name.) */
233 err = -EBUSY;
234 if (get_dp(dp_idx))
235 goto err_put_module;
236
237 err = -ENOMEM;
238 dp = kzalloc(sizeof *dp, GFP_KERNEL);
239 if (dp == NULL)
240 goto err_put_module;
241 INIT_LIST_HEAD(&dp->port_list);
242 mutex_init(&dp->mutex);
243 dp->dp_idx = dp_idx;
244 for (i = 0; i < DP_N_QUEUES; i++)
245 skb_queue_head_init(&dp->queues[i]);
246 init_waitqueue_head(&dp->waitqueue);
247
248 /* Initialize kobject for bridge. This will be added as
249 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
250 dp->ifobj.kset = NULL;
251 kobject_init(&dp->ifobj, &dp_ktype);
252
253 /* Allocate table. */
254 err = -ENOMEM;
255 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
256 if (!dp->table)
257 goto err_free_dp;
258
259 /* Set up our datapath device. */
260 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
261 strcpy(internal_dev_port.devname, devname);
262 strcpy(internal_dev_port.type, "internal");
263 err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
264 if (err) {
265 if (err == -EBUSY)
266 err = -EEXIST;
267
268 goto err_destroy_table;
269 }
270
271 dp->drop_frags = 0;
272 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
273 if (!dp->stats_percpu) {
274 err = -ENOMEM;
275 goto err_destroy_local_port;
276 }
277
278 rcu_assign_pointer(dps[dp_idx], dp);
279 dp_sysfs_add_dp(dp);
280
281 mutex_unlock(&dp_mutex);
282 rtnl_unlock();
283
284 return 0;
285
286 err_destroy_local_port:
287 dp_detach_port(dp->ports[ODPP_LOCAL]);
288 err_destroy_table:
289 tbl_destroy(dp->table, NULL);
290 err_free_dp:
291 kfree(dp);
292 err_put_module:
293 module_put(THIS_MODULE);
294 err_unlock:
295 mutex_unlock(&dp_mutex);
296 rtnl_unlock();
297 err:
298 return err;
299 }
300
301 static void do_destroy_dp(struct datapath *dp)
302 {
303 struct vport *p, *n;
304 int i;
305
306 list_for_each_entry_safe (p, n, &dp->port_list, node)
307 if (p->port_no != ODPP_LOCAL)
308 dp_detach_port(p);
309
310 dp_sysfs_del_dp(dp);
311
312 rcu_assign_pointer(dps[dp->dp_idx], NULL);
313
314 dp_detach_port(dp->ports[ODPP_LOCAL]);
315
316 tbl_destroy(dp->table, flow_free_tbl);
317
318 for (i = 0; i < DP_N_QUEUES; i++)
319 skb_queue_purge(&dp->queues[i]);
320 free_percpu(dp->stats_percpu);
321 kobject_put(&dp->ifobj);
322 module_put(THIS_MODULE);
323 }
324
325 static int destroy_dp(int dp_idx)
326 {
327 struct datapath *dp;
328 int err;
329
330 rtnl_lock();
331 mutex_lock(&dp_mutex);
332 dp = get_dp(dp_idx);
333 err = -ENODEV;
334 if (!dp)
335 goto err_unlock;
336
337 do_destroy_dp(dp);
338 err = 0;
339
340 err_unlock:
341 mutex_unlock(&dp_mutex);
342 rtnl_unlock();
343 return err;
344 }
345
346 /* Called with RTNL lock and dp_mutex. */
347 static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
348 {
349 struct vport_parms parms;
350 struct vport *vport;
351
352 parms.name = odp_port->devname;
353 parms.type = odp_port->type;
354 parms.config = odp_port->config;
355 parms.dp = dp;
356 parms.port_no = port_no;
357
358 vport_lock();
359 vport = vport_add(&parms);
360 vport_unlock();
361
362 if (IS_ERR(vport))
363 return PTR_ERR(vport);
364
365 rcu_assign_pointer(dp->ports[port_no], vport);
366 list_add_rcu(&vport->node, &dp->port_list);
367 dp->n_ports++;
368
369 dp_ifinfo_notify(RTM_NEWLINK, vport);
370
371 return 0;
372 }
373
374 static int attach_port(int dp_idx, struct odp_port __user *portp)
375 {
376 struct datapath *dp;
377 struct odp_port port;
378 int port_no;
379 int err;
380
381 err = -EFAULT;
382 if (copy_from_user(&port, portp, sizeof port))
383 goto out;
384 port.devname[IFNAMSIZ - 1] = '\0';
385 port.type[VPORT_TYPE_SIZE - 1] = '\0';
386
387 rtnl_lock();
388 dp = get_dp_locked(dp_idx);
389 err = -ENODEV;
390 if (!dp)
391 goto out_unlock_rtnl;
392
393 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
394 if (!dp->ports[port_no])
395 goto got_port_no;
396 err = -EFBIG;
397 goto out_unlock_dp;
398
399 got_port_no:
400 err = new_vport(dp, &port, port_no);
401 if (err)
402 goto out_unlock_dp;
403
404 set_internal_devs_mtu(dp);
405 dp_sysfs_add_if(dp->ports[port_no]);
406
407 err = put_user(port_no, &portp->port);
408
409 out_unlock_dp:
410 mutex_unlock(&dp->mutex);
411 out_unlock_rtnl:
412 rtnl_unlock();
413 out:
414 return err;
415 }
416
417 int dp_detach_port(struct vport *p)
418 {
419 int err;
420
421 ASSERT_RTNL();
422
423 if (p->port_no != ODPP_LOCAL)
424 dp_sysfs_del_if(p);
425 dp_ifinfo_notify(RTM_DELLINK, p);
426
427 /* First drop references to device. */
428 p->dp->n_ports--;
429 list_del_rcu(&p->node);
430 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
431
432 /* Then destroy it. */
433 vport_lock();
434 err = vport_del(p);
435 vport_unlock();
436
437 return err;
438 }
439
440 static int detach_port(int dp_idx, int port_no)
441 {
442 struct vport *p;
443 struct datapath *dp;
444 int err;
445
446 err = -EINVAL;
447 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
448 goto out;
449
450 rtnl_lock();
451 dp = get_dp_locked(dp_idx);
452 err = -ENODEV;
453 if (!dp)
454 goto out_unlock_rtnl;
455
456 p = dp->ports[port_no];
457 err = -ENOENT;
458 if (!p)
459 goto out_unlock_dp;
460
461 err = dp_detach_port(p);
462
463 out_unlock_dp:
464 mutex_unlock(&dp->mutex);
465 out_unlock_rtnl:
466 rtnl_unlock();
467 out:
468 return err;
469 }
470
471 /* Must be called with rcu_read_lock. */
472 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
473 {
474 struct datapath *dp = p->dp;
475 struct dp_stats_percpu *stats;
476 int stats_counter_off;
477 struct sw_flow_actions *acts;
478 struct loop_counter *loop;
479 int error;
480
481 OVS_CB(skb)->vport = p;
482
483 if (!OVS_CB(skb)->flow) {
484 struct odp_flow_key key;
485 struct tbl_node *flow_node;
486 bool is_frag;
487
488 /* Extract flow from 'skb' into 'key'. */
489 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
490 if (unlikely(error)) {
491 kfree_skb(skb);
492 return;
493 }
494
495 if (is_frag && dp->drop_frags) {
496 kfree_skb(skb);
497 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
498 goto out;
499 }
500
501 /* Look up flow. */
502 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
503 flow_hash(&key), flow_cmp);
504 if (unlikely(!flow_node)) {
505 dp_output_control(dp, skb, _ODPL_MISS_NR,
506 (__force u64)OVS_CB(skb)->tun_id);
507 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
508 goto out;
509 }
510
511 OVS_CB(skb)->flow = flow_cast(flow_node);
512 }
513
514 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
515 flow_used(OVS_CB(skb)->flow, skb);
516
517 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
518
519 /* Check whether we've looped too much. */
520 loop = loop_get_counter();
521 if (unlikely(++loop->count > MAX_LOOPS))
522 loop->looping = true;
523 if (unlikely(loop->looping)) {
524 loop_suppress(dp, acts);
525 kfree_skb(skb);
526 goto out_loop;
527 }
528
529 /* Execute actions. */
530 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
531 acts->actions_len);
532
533 /* Check whether sub-actions looped too much. */
534 if (unlikely(loop->looping))
535 loop_suppress(dp, acts);
536
537 out_loop:
538 /* Decrement loop counter. */
539 if (!--loop->count)
540 loop->looping = false;
541 loop_put_counter();
542
543 out:
544 /* Update datapath statistics. */
545 local_bh_disable();
546 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
547
548 write_seqcount_begin(&stats->seqlock);
549 (*(u64 *)((u8 *)stats + stats_counter_off))++;
550 write_seqcount_end(&stats->seqlock);
551
552 local_bh_enable();
553 }
554
555 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
556 * unless we broke up a GSO packet. */
557 static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
558 int queue_no, u64 arg)
559 {
560 struct sk_buff *nskb;
561 int port_no;
562 int err;
563
564 if (OVS_CB(skb)->vport)
565 port_no = OVS_CB(skb)->vport->port_no;
566 else
567 port_no = ODPP_LOCAL;
568
569 do {
570 struct odp_msg *header;
571
572 nskb = skb->next;
573 skb->next = NULL;
574
575 err = skb_cow(skb, sizeof *header);
576 if (err)
577 goto err_kfree_skbs;
578
579 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
580 header->type = queue_no;
581 header->length = skb->len;
582 header->port = port_no;
583 header->arg = arg;
584 skb_queue_tail(queue, skb);
585
586 skb = nskb;
587 } while (skb);
588 return 0;
589
590 err_kfree_skbs:
591 kfree_skb(skb);
592 while ((skb = nskb) != NULL) {
593 nskb = skb->next;
594 kfree_skb(skb);
595 }
596 return err;
597 }
598
599 int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
600 u64 arg)
601 {
602 struct dp_stats_percpu *stats;
603 struct sk_buff_head *queue;
604 int err;
605
606 WARN_ON_ONCE(skb_shared(skb));
607 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
608 queue = &dp->queues[queue_no];
609 err = -ENOBUFS;
610 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
611 goto err_kfree_skb;
612
613 forward_ip_summed(skb);
614
615 err = vswitch_skb_checksum_setup(skb);
616 if (err)
617 goto err_kfree_skb;
618
619 /* Break apart GSO packets into their component pieces. Otherwise
620 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
621 if (skb_is_gso(skb)) {
622 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
623
624 kfree_skb(skb);
625 skb = nskb;
626 if (IS_ERR(skb)) {
627 err = PTR_ERR(skb);
628 goto err;
629 }
630 }
631
632 err = queue_control_packets(skb, queue, queue_no, arg);
633 wake_up_interruptible(&dp->waitqueue);
634 return err;
635
636 err_kfree_skb:
637 kfree_skb(skb);
638 err:
639 local_bh_disable();
640 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
641
642 write_seqcount_begin(&stats->seqlock);
643 stats->n_lost++;
644 write_seqcount_end(&stats->seqlock);
645
646 local_bh_enable();
647
648 return err;
649 }
650
651 static int flush_flows(struct datapath *dp)
652 {
653 struct tbl *old_table = get_table_protected(dp);
654 struct tbl *new_table;
655
656 new_table = tbl_create(TBL_MIN_BUCKETS);
657 if (!new_table)
658 return -ENOMEM;
659
660 rcu_assign_pointer(dp->table, new_table);
661
662 tbl_deferred_destroy(old_table, flow_free_tbl);
663
664 return 0;
665 }
666
667 static int validate_actions(const struct nlattr *actions, u32 actions_len)
668 {
669 const struct nlattr *a;
670 int rem;
671
672 nla_for_each_attr(a, actions, actions_len, rem) {
673 static const u32 action_lens[ODPAT_MAX + 1] = {
674 [ODPAT_OUTPUT] = 4,
675 [ODPAT_CONTROLLER] = 8,
676 [ODPAT_SET_DL_TCI] = 2,
677 [ODPAT_STRIP_VLAN] = 0,
678 [ODPAT_SET_DL_SRC] = ETH_ALEN,
679 [ODPAT_SET_DL_DST] = ETH_ALEN,
680 [ODPAT_SET_NW_SRC] = 4,
681 [ODPAT_SET_NW_DST] = 4,
682 [ODPAT_SET_NW_TOS] = 1,
683 [ODPAT_SET_TP_SRC] = 2,
684 [ODPAT_SET_TP_DST] = 2,
685 [ODPAT_SET_TUNNEL] = 8,
686 [ODPAT_SET_PRIORITY] = 4,
687 [ODPAT_POP_PRIORITY] = 0,
688 [ODPAT_DROP_SPOOFED_ARP] = 0,
689 };
690 int type = nla_type(a);
691
692 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
693 return -EINVAL;
694
695 switch (type) {
696 case ODPAT_UNSPEC:
697 return -EINVAL;
698
699 case ODPAT_CONTROLLER:
700 case ODPAT_STRIP_VLAN:
701 case ODPAT_SET_DL_SRC:
702 case ODPAT_SET_DL_DST:
703 case ODPAT_SET_NW_SRC:
704 case ODPAT_SET_NW_DST:
705 case ODPAT_SET_TP_SRC:
706 case ODPAT_SET_TP_DST:
707 case ODPAT_SET_TUNNEL:
708 case ODPAT_SET_PRIORITY:
709 case ODPAT_POP_PRIORITY:
710 case ODPAT_DROP_SPOOFED_ARP:
711 /* No validation needed. */
712 break;
713
714 case ODPAT_OUTPUT:
715 if (nla_get_u32(a) >= DP_MAX_PORTS)
716 return -EINVAL;
717
718 case ODPAT_SET_DL_TCI:
719 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
720 return -EINVAL;
721 break;
722
723 case ODPAT_SET_NW_TOS:
724 if (nla_get_u8(a) & INET_ECN_MASK)
725 return -EINVAL;
726 break;
727
728 default:
729 return -EOPNOTSUPP;
730 }
731 }
732
733 if (rem > 0)
734 return -EINVAL;
735
736 return 0;
737 }
738
739 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
740 {
741 struct sw_flow_actions *actions;
742 int error;
743
744 actions = flow_actions_alloc(flow->actions_len);
745 error = PTR_ERR(actions);
746 if (IS_ERR(actions))
747 goto error;
748
749 error = -EFAULT;
750 if (copy_from_user(actions->actions,
751 (struct nlattr __user __force *)flow->actions,
752 flow->actions_len))
753 goto error_free_actions;
754 error = validate_actions(actions->actions, actions->actions_len);
755 if (error)
756 goto error_free_actions;
757
758 return actions;
759
760 error_free_actions:
761 kfree(actions);
762 error:
763 return ERR_PTR(error);
764 }
765
766 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
767 {
768 if (flow->used) {
769 struct timespec offset_ts, used, now_mono;
770
771 ktime_get_ts(&now_mono);
772 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
773 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
774 now_mono.tv_nsec - offset_ts.tv_nsec);
775
776 stats->used_sec = used.tv_sec;
777 stats->used_nsec = used.tv_nsec;
778 } else {
779 stats->used_sec = 0;
780 stats->used_nsec = 0;
781 }
782
783 stats->n_packets = flow->packet_count;
784 stats->n_bytes = flow->byte_count;
785 stats->reserved = 0;
786 stats->tcp_flags = flow->tcp_flags;
787 stats->error = 0;
788 }
789
790 static void clear_stats(struct sw_flow *flow)
791 {
792 flow->used = 0;
793 flow->tcp_flags = 0;
794 flow->packet_count = 0;
795 flow->byte_count = 0;
796 }
797
798 static int expand_table(struct datapath *dp)
799 {
800 struct tbl *old_table = get_table_protected(dp);
801 struct tbl *new_table;
802
803 new_table = tbl_expand(old_table);
804 if (IS_ERR(new_table))
805 return PTR_ERR(new_table);
806
807 rcu_assign_pointer(dp->table, new_table);
808 tbl_deferred_destroy(old_table, NULL);
809
810 return 0;
811 }
812
813 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
814 struct odp_flow_stats *stats)
815 {
816 struct tbl_node *flow_node;
817 struct sw_flow *flow;
818 struct tbl *table;
819 int error;
820 u32 hash;
821
822 hash = flow_hash(&uf->flow.key);
823 table = get_table_protected(dp);
824 flow_node = tbl_lookup(table, &uf->flow.key, hash, flow_cmp);
825 if (!flow_node) {
826 /* No such flow. */
827 struct sw_flow_actions *acts;
828
829 error = -ENOENT;
830 if (!(uf->flags & ODPPF_CREATE))
831 goto error;
832
833 /* Expand table, if necessary, to make room. */
834 if (tbl_count(table) >= tbl_n_buckets(table)) {
835 error = expand_table(dp);
836 if (error)
837 goto error;
838 table = get_table_protected(dp);
839 }
840
841 /* Allocate flow. */
842 flow = flow_alloc();
843 if (IS_ERR(flow)) {
844 error = PTR_ERR(flow);
845 goto error;
846 }
847 flow->key = uf->flow.key;
848 clear_stats(flow);
849
850 /* Obtain actions. */
851 acts = get_actions(&uf->flow);
852 error = PTR_ERR(acts);
853 if (IS_ERR(acts))
854 goto error_free_flow;
855 rcu_assign_pointer(flow->sf_acts, acts);
856
857 /* Put flow in bucket. */
858 error = tbl_insert(table, &flow->tbl_node, hash);
859 if (error)
860 goto error_free_flow_acts;
861
862 memset(stats, 0, sizeof(struct odp_flow_stats));
863 } else {
864 /* We found a matching flow. */
865 struct sw_flow_actions *old_acts, *new_acts;
866
867 flow = flow_cast(flow_node);
868
869 /* Bail out if we're not allowed to modify an existing flow. */
870 error = -EEXIST;
871 if (!(uf->flags & ODPPF_MODIFY))
872 goto error;
873
874 /* Swap actions. */
875 new_acts = get_actions(&uf->flow);
876 error = PTR_ERR(new_acts);
877 if (IS_ERR(new_acts))
878 goto error;
879
880 old_acts = rcu_dereference_protected(flow->sf_acts,
881 lockdep_is_held(&dp->mutex));
882 if (old_acts->actions_len != new_acts->actions_len ||
883 memcmp(old_acts->actions, new_acts->actions,
884 old_acts->actions_len)) {
885 rcu_assign_pointer(flow->sf_acts, new_acts);
886 flow_deferred_free_acts(old_acts);
887 } else {
888 kfree(new_acts);
889 }
890
891 /* Fetch stats, then clear them if necessary. */
892 spin_lock_bh(&flow->lock);
893 get_stats(flow, stats);
894 if (uf->flags & ODPPF_ZERO_STATS)
895 clear_stats(flow);
896 spin_unlock_bh(&flow->lock);
897 }
898
899 return 0;
900
901 error_free_flow_acts:
902 kfree(flow->sf_acts);
903 error_free_flow:
904 flow->sf_acts = NULL;
905 flow_put(flow);
906 error:
907 return error;
908 }
909
910 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
911 {
912 struct odp_flow_stats stats;
913 struct odp_flow_put uf;
914 int error;
915
916 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
917 return -EFAULT;
918
919 error = do_put_flow(dp, &uf, &stats);
920 if (error)
921 return error;
922
923 if (copy_to_user(&ufp->flow.stats, &stats,
924 sizeof(struct odp_flow_stats)))
925 return -EFAULT;
926
927 return 0;
928 }
929
930 static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
931 u32 query_flags,
932 struct odp_flow_stats __user *ustats,
933 struct nlattr __user *actions,
934 u32 __user *actions_lenp)
935 {
936 struct sw_flow_actions *sf_acts;
937 struct odp_flow_stats stats;
938 u32 actions_len;
939
940 spin_lock_bh(&flow->lock);
941 get_stats(flow, &stats);
942 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
943 flow->tcp_flags = 0;
944
945 spin_unlock_bh(&flow->lock);
946
947 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
948 get_user(actions_len, actions_lenp))
949 return -EFAULT;
950
951 if (!actions_len)
952 return 0;
953
954 sf_acts = rcu_dereference_protected(flow->sf_acts,
955 lockdep_is_held(&dp->mutex));
956 if (put_user(sf_acts->actions_len, actions_lenp) ||
957 (actions && copy_to_user(actions, sf_acts->actions,
958 min(sf_acts->actions_len, actions_len))))
959 return -EFAULT;
960
961 return 0;
962 }
963
964 static int answer_query(struct datapath *dp, struct sw_flow *flow,
965 u32 query_flags, struct odp_flow __user *ufp)
966 {
967 struct nlattr __user *actions;
968
969 if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
970 return -EFAULT;
971
972 return do_answer_query(dp, flow, query_flags,
973 &ufp->stats, actions, &ufp->actions_len);
974 }
975
976 static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
977 {
978 struct tbl *table = get_table_protected(dp);
979 struct tbl_node *flow_node;
980 int error;
981
982 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
983 if (!flow_node)
984 return ERR_PTR(-ENOENT);
985
986 error = tbl_remove(table, flow_node);
987 if (error)
988 return ERR_PTR(error);
989
990 /* XXX Returned flow_node's statistics might lose a few packets, since
991 * other CPUs can be using this flow. We used to synchronize_rcu() to
992 * make sure that we get completely accurate stats, but that blows our
993 * performance, badly. */
994 return flow_cast(flow_node);
995 }
996
997 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
998 {
999 struct sw_flow *flow;
1000 struct odp_flow uf;
1001 int error;
1002
1003 if (copy_from_user(&uf, ufp, sizeof uf))
1004 return -EFAULT;
1005
1006 flow = do_del_flow(dp, &uf.key);
1007 if (IS_ERR(flow))
1008 return PTR_ERR(flow);
1009
1010 error = answer_query(dp, flow, 0, ufp);
1011 flow_deferred_free(flow);
1012 return error;
1013 }
1014
1015 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1016 {
1017 struct tbl *table = get_table_protected(dp);
1018 u32 i;
1019
1020 for (i = 0; i < flowvec->n_flows; i++) {
1021 struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
1022 struct odp_flow uf;
1023 struct tbl_node *flow_node;
1024 int error;
1025
1026 if (copy_from_user(&uf, ufp, sizeof uf))
1027 return -EFAULT;
1028
1029 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1030 if (!flow_node)
1031 error = put_user(ENOENT, &ufp->stats.error);
1032 else
1033 error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
1034 if (error)
1035 return -EFAULT;
1036 }
1037 return flowvec->n_flows;
1038 }
1039
1040 struct list_flows_cbdata {
1041 struct datapath *dp;
1042 struct odp_flow __user *uflows;
1043 u32 n_flows;
1044 u32 listed_flows;
1045 };
1046
1047 static int list_flow(struct tbl_node *node, void *cbdata_)
1048 {
1049 struct sw_flow *flow = flow_cast(node);
1050 struct list_flows_cbdata *cbdata = cbdata_;
1051 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1052 int error;
1053
1054 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1055 return -EFAULT;
1056 error = answer_query(cbdata->dp, flow, 0, ufp);
1057 if (error)
1058 return error;
1059
1060 if (cbdata->listed_flows >= cbdata->n_flows)
1061 return cbdata->listed_flows;
1062 return 0;
1063 }
1064
1065 static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1066 {
1067 struct list_flows_cbdata cbdata;
1068 int error;
1069
1070 if (!flowvec->n_flows)
1071 return 0;
1072
1073 cbdata.dp = dp;
1074 cbdata.uflows = (struct odp_flow __user __force*)flowvec->flows;
1075 cbdata.n_flows = flowvec->n_flows;
1076 cbdata.listed_flows = 0;
1077
1078 error = tbl_foreach(get_table_protected(dp), list_flow, &cbdata);
1079 return error ? error : cbdata.listed_flows;
1080 }
1081
1082 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1083 int (*function)(struct datapath *,
1084 const struct odp_flowvec *))
1085 {
1086 struct odp_flowvec __user *uflowvec;
1087 struct odp_flowvec flowvec;
1088 int retval;
1089
1090 uflowvec = (struct odp_flowvec __user *)argp;
1091 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1092 return -EFAULT;
1093
1094 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1095 return -EINVAL;
1096
1097 retval = function(dp, &flowvec);
1098 return (retval < 0 ? retval
1099 : retval == flowvec.n_flows ? 0
1100 : put_user(retval, &uflowvec->n_flows));
1101 }
1102
1103 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1104 {
1105 struct odp_flow_key key;
1106 struct sk_buff *skb;
1107 struct sw_flow_actions *actions;
1108 struct ethhdr *eth;
1109 bool is_frag;
1110 int err;
1111
1112 err = -EINVAL;
1113 if (execute->length < ETH_HLEN || execute->length > 65535)
1114 goto error;
1115
1116 actions = flow_actions_alloc(execute->actions_len);
1117 if (IS_ERR(actions)) {
1118 err = PTR_ERR(actions);
1119 goto error;
1120 }
1121
1122 err = -EFAULT;
1123 if (copy_from_user(actions->actions,
1124 (struct nlattr __user __force *)execute->actions, execute->actions_len))
1125 goto error_free_actions;
1126
1127 err = validate_actions(actions->actions, execute->actions_len);
1128 if (err)
1129 goto error_free_actions;
1130
1131 err = -ENOMEM;
1132 skb = alloc_skb(execute->length, GFP_KERNEL);
1133 if (!skb)
1134 goto error_free_actions;
1135
1136 err = -EFAULT;
1137 if (copy_from_user(skb_put(skb, execute->length),
1138 (const void __user __force *)execute->data,
1139 execute->length))
1140 goto error_free_skb;
1141
1142 skb_reset_mac_header(skb);
1143 eth = eth_hdr(skb);
1144
1145 /* Normally, setting the skb 'protocol' field would be handled by a
1146 * call to eth_type_trans(), but it assumes there's a sending
1147 * device, which we may not have. */
1148 if (ntohs(eth->h_proto) >= 1536)
1149 skb->protocol = eth->h_proto;
1150 else
1151 skb->protocol = htons(ETH_P_802_2);
1152
1153 err = flow_extract(skb, -1, &key, &is_frag);
1154 if (err)
1155 goto error_free_skb;
1156
1157 rcu_read_lock();
1158 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
1159 rcu_read_unlock();
1160
1161 kfree(actions);
1162 return err;
1163
1164 error_free_skb:
1165 kfree_skb(skb);
1166 error_free_actions:
1167 kfree(actions);
1168 error:
1169 return err;
1170 }
1171
1172 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1173 {
1174 struct odp_execute execute;
1175
1176 if (copy_from_user(&execute, executep, sizeof execute))
1177 return -EFAULT;
1178
1179 return do_execute(dp, &execute);
1180 }
1181
1182 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1183 {
1184 struct tbl *table = get_table_protected(dp);
1185 struct odp_stats stats;
1186 int i;
1187
1188 stats.n_flows = tbl_count(table);
1189 stats.cur_capacity = tbl_n_buckets(table);
1190 stats.max_capacity = TBL_MAX_BUCKETS;
1191 stats.n_ports = dp->n_ports;
1192 stats.max_ports = DP_MAX_PORTS;
1193 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1194 for_each_possible_cpu(i) {
1195 const struct dp_stats_percpu *percpu_stats;
1196 struct dp_stats_percpu local_stats;
1197 unsigned seqcount;
1198
1199 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1200
1201 do {
1202 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1203 local_stats = *percpu_stats;
1204 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1205
1206 stats.n_frags += local_stats.n_frags;
1207 stats.n_hit += local_stats.n_hit;
1208 stats.n_missed += local_stats.n_missed;
1209 stats.n_lost += local_stats.n_lost;
1210 }
1211 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1212 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1213 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1214 }
1215
1216 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1217 int dp_min_mtu(const struct datapath *dp)
1218 {
1219 struct vport *p;
1220 int mtu = 0;
1221
1222 ASSERT_RTNL();
1223
1224 list_for_each_entry_rcu (p, &dp->port_list, node) {
1225 int dev_mtu;
1226
1227 /* Skip any internal ports, since that's what we're trying to
1228 * set. */
1229 if (is_internal_vport(p))
1230 continue;
1231
1232 dev_mtu = vport_get_mtu(p);
1233 if (!mtu || dev_mtu < mtu)
1234 mtu = dev_mtu;
1235 }
1236
1237 return mtu ? mtu : ETH_DATA_LEN;
1238 }
1239
1240 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1241 * be called with RTNL lock. */
1242 void set_internal_devs_mtu(const struct datapath *dp)
1243 {
1244 struct vport *p;
1245 int mtu;
1246
1247 ASSERT_RTNL();
1248
1249 mtu = dp_min_mtu(dp);
1250
1251 list_for_each_entry_rcu (p, &dp->port_list, node) {
1252 if (is_internal_vport(p))
1253 vport_set_mtu(p, mtu);
1254 }
1255 }
1256
1257 static int put_port(const struct vport *p, struct odp_port __user *uop)
1258 {
1259 struct odp_port op;
1260
1261 memset(&op, 0, sizeof op);
1262
1263 rcu_read_lock();
1264 strncpy(op.devname, vport_get_name(p), sizeof op.devname);
1265 strncpy(op.type, vport_get_type(p), sizeof op.type);
1266 vport_get_config(p, op.config);
1267 rcu_read_unlock();
1268
1269 op.port = p->port_no;
1270
1271 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1272 }
1273
1274 static int query_port(struct datapath *dp, struct odp_port __user *uport)
1275 {
1276 struct odp_port port;
1277
1278 if (copy_from_user(&port, uport, sizeof port))
1279 return -EFAULT;
1280
1281 if (port.devname[0]) {
1282 struct vport *vport;
1283 int err = 0;
1284
1285 port.devname[IFNAMSIZ - 1] = '\0';
1286
1287 vport_lock();
1288 rcu_read_lock();
1289
1290 vport = vport_locate(port.devname);
1291 if (!vport) {
1292 err = -ENODEV;
1293 goto error_unlock;
1294 }
1295 if (vport->dp != dp) {
1296 err = -ENOENT;
1297 goto error_unlock;
1298 }
1299
1300 port.port = vport->port_no;
1301
1302 error_unlock:
1303 rcu_read_unlock();
1304 vport_unlock();
1305
1306 if (err)
1307 return err;
1308 } else {
1309 if (port.port >= DP_MAX_PORTS)
1310 return -EINVAL;
1311 if (!dp->ports[port.port])
1312 return -ENOENT;
1313 }
1314
1315 return put_port(dp->ports[port.port], uport);
1316 }
1317
1318 static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
1319 int n_ports)
1320 {
1321 int idx = 0;
1322 if (n_ports) {
1323 struct vport *p;
1324
1325 list_for_each_entry_rcu (p, &dp->port_list, node) {
1326 if (put_port(p, &uports[idx]))
1327 return -EFAULT;
1328 if (idx++ >= n_ports)
1329 break;
1330 }
1331 }
1332 return idx;
1333 }
1334
1335 static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
1336 {
1337 struct odp_portvec pv;
1338 int retval;
1339
1340 if (copy_from_user(&pv, upv, sizeof pv))
1341 return -EFAULT;
1342
1343 retval = do_list_ports(dp, (struct odp_port __user __force *)pv.ports,
1344 pv.n_ports);
1345 if (retval < 0)
1346 return retval;
1347
1348 return put_user(retval, &upv->n_ports);
1349 }
1350
1351 static int get_listen_mask(const struct file *f)
1352 {
1353 return (long)f->private_data;
1354 }
1355
1356 static void set_listen_mask(struct file *f, int listen_mask)
1357 {
1358 f->private_data = (void*)(long)listen_mask;
1359 }
1360
1361 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1362 unsigned long argp)
1363 {
1364 int dp_idx = iminor(f->f_dentry->d_inode);
1365 struct datapath *dp;
1366 int drop_frags, listeners, port_no;
1367 unsigned int sflow_probability;
1368 int err;
1369
1370 /* Handle commands with special locking requirements up front. */
1371 switch (cmd) {
1372 case ODP_DP_CREATE:
1373 err = create_dp(dp_idx, (char __user *)argp);
1374 goto exit;
1375
1376 case ODP_DP_DESTROY:
1377 err = destroy_dp(dp_idx);
1378 goto exit;
1379
1380 case ODP_VPORT_ATTACH:
1381 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1382 goto exit;
1383
1384 case ODP_VPORT_DETACH:
1385 err = get_user(port_no, (int __user *)argp);
1386 if (!err)
1387 err = detach_port(dp_idx, port_no);
1388 goto exit;
1389
1390 case ODP_VPORT_MOD:
1391 err = vport_user_mod((struct odp_port __user *)argp);
1392 goto exit;
1393
1394 case ODP_VPORT_STATS_GET:
1395 err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
1396 goto exit;
1397
1398 case ODP_VPORT_STATS_SET:
1399 err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
1400 goto exit;
1401
1402 case ODP_VPORT_ETHER_GET:
1403 err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
1404 goto exit;
1405
1406 case ODP_VPORT_ETHER_SET:
1407 err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
1408 goto exit;
1409
1410 case ODP_VPORT_MTU_GET:
1411 err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
1412 goto exit;
1413
1414 case ODP_VPORT_MTU_SET:
1415 err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
1416 goto exit;
1417 }
1418
1419 dp = get_dp_locked(dp_idx);
1420 err = -ENODEV;
1421 if (!dp)
1422 goto exit;
1423
1424 switch (cmd) {
1425 case ODP_DP_STATS:
1426 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1427 break;
1428
1429 case ODP_GET_DROP_FRAGS:
1430 err = put_user(dp->drop_frags, (int __user *)argp);
1431 break;
1432
1433 case ODP_SET_DROP_FRAGS:
1434 err = get_user(drop_frags, (int __user *)argp);
1435 if (err)
1436 break;
1437 err = -EINVAL;
1438 if (drop_frags != 0 && drop_frags != 1)
1439 break;
1440 dp->drop_frags = drop_frags;
1441 err = 0;
1442 break;
1443
1444 case ODP_GET_LISTEN_MASK:
1445 err = put_user(get_listen_mask(f), (int __user *)argp);
1446 break;
1447
1448 case ODP_SET_LISTEN_MASK:
1449 err = get_user(listeners, (int __user *)argp);
1450 if (err)
1451 break;
1452 err = -EINVAL;
1453 if (listeners & ~ODPL_ALL)
1454 break;
1455 err = 0;
1456 set_listen_mask(f, listeners);
1457 break;
1458
1459 case ODP_GET_SFLOW_PROBABILITY:
1460 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1461 break;
1462
1463 case ODP_SET_SFLOW_PROBABILITY:
1464 err = get_user(sflow_probability, (unsigned int __user *)argp);
1465 if (!err)
1466 dp->sflow_probability = sflow_probability;
1467 break;
1468
1469 case ODP_VPORT_QUERY:
1470 err = query_port(dp, (struct odp_port __user *)argp);
1471 break;
1472
1473 case ODP_VPORT_LIST:
1474 err = list_ports(dp, (struct odp_portvec __user *)argp);
1475 break;
1476
1477 case ODP_FLOW_FLUSH:
1478 err = flush_flows(dp);
1479 break;
1480
1481 case ODP_FLOW_PUT:
1482 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1483 break;
1484
1485 case ODP_FLOW_DEL:
1486 err = del_flow(dp, (struct odp_flow __user *)argp);
1487 break;
1488
1489 case ODP_FLOW_GET:
1490 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1491 break;
1492
1493 case ODP_FLOW_LIST:
1494 err = do_flowvec_ioctl(dp, argp, do_list_flows);
1495 break;
1496
1497 case ODP_EXECUTE:
1498 err = execute_packet(dp, (struct odp_execute __user *)argp);
1499 break;
1500
1501 default:
1502 err = -ENOIOCTLCMD;
1503 break;
1504 }
1505 mutex_unlock(&dp->mutex);
1506 exit:
1507 return err;
1508 }
1509
1510 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1511 {
1512 int i;
1513 for (i = 0; i < DP_N_QUEUES; i++) {
1514 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1515 return 1;
1516 }
1517 return 0;
1518 }
1519
1520 #ifdef CONFIG_COMPAT
1521 static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
1522 {
1523 struct compat_odp_portvec pv;
1524 int retval;
1525
1526 if (copy_from_user(&pv, upv, sizeof pv))
1527 return -EFAULT;
1528
1529 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1530 if (retval < 0)
1531 return retval;
1532
1533 return put_user(retval, &upv->n_ports);
1534 }
1535
1536 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1537 {
1538 compat_uptr_t actions;
1539
1540 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1541 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1542 __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
1543 __get_user(actions, &compat->actions) ||
1544 __get_user(flow->actions_len, &compat->actions_len) ||
1545 __get_user(flow->flags, &compat->flags))
1546 return -EFAULT;
1547
1548 flow->actions = (struct nlattr __force *)compat_ptr(actions);
1549 return 0;
1550 }
1551
1552 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1553 {
1554 struct odp_flow_stats stats;
1555 struct odp_flow_put fp;
1556 int error;
1557
1558 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1559 get_user(fp.flags, &ufp->flags))
1560 return -EFAULT;
1561
1562 error = do_put_flow(dp, &fp, &stats);
1563 if (error)
1564 return error;
1565
1566 if (copy_to_user(&ufp->flow.stats, &stats,
1567 sizeof(struct odp_flow_stats)))
1568 return -EFAULT;
1569
1570 return 0;
1571 }
1572
1573 static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
1574 u32 query_flags,
1575 struct compat_odp_flow __user *ufp)
1576 {
1577 compat_uptr_t actions;
1578
1579 if (get_user(actions, &ufp->actions))
1580 return -EFAULT;
1581
1582 return do_answer_query(dp, flow, query_flags, &ufp->stats,
1583 compat_ptr(actions), &ufp->actions_len);
1584 }
1585
1586 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1587 {
1588 struct sw_flow *flow;
1589 struct odp_flow uf;
1590 int error;
1591
1592 if (compat_get_flow(&uf, ufp))
1593 return -EFAULT;
1594
1595 flow = do_del_flow(dp, &uf.key);
1596 if (IS_ERR(flow))
1597 return PTR_ERR(flow);
1598
1599 error = compat_answer_query(dp, flow, 0, ufp);
1600 flow_deferred_free(flow);
1601 return error;
1602 }
1603
1604 static int compat_query_flows(struct datapath *dp,
1605 struct compat_odp_flow __user *flows,
1606 u32 n_flows)
1607 {
1608 struct tbl *table = get_table_protected(dp);
1609 u32 i;
1610
1611 for (i = 0; i < n_flows; i++) {
1612 struct compat_odp_flow __user *ufp = &flows[i];
1613 struct odp_flow uf;
1614 struct tbl_node *flow_node;
1615 int error;
1616
1617 if (compat_get_flow(&uf, ufp))
1618 return -EFAULT;
1619
1620 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1621 if (!flow_node)
1622 error = put_user(ENOENT, &ufp->stats.error);
1623 else
1624 error = compat_answer_query(dp, flow_cast(flow_node),
1625 uf.flags, ufp);
1626 if (error)
1627 return -EFAULT;
1628 }
1629 return n_flows;
1630 }
1631
1632 struct compat_list_flows_cbdata {
1633 struct datapath *dp;
1634 struct compat_odp_flow __user *uflows;
1635 u32 n_flows;
1636 u32 listed_flows;
1637 };
1638
1639 static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1640 {
1641 struct sw_flow *flow = flow_cast(node);
1642 struct compat_list_flows_cbdata *cbdata = cbdata_;
1643 struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1644 int error;
1645
1646 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1647 return -EFAULT;
1648 error = compat_answer_query(cbdata->dp, flow, 0, ufp);
1649 if (error)
1650 return error;
1651
1652 if (cbdata->listed_flows >= cbdata->n_flows)
1653 return cbdata->listed_flows;
1654 return 0;
1655 }
1656
1657 static int compat_list_flows(struct datapath *dp,
1658 struct compat_odp_flow __user *flows, u32 n_flows)
1659 {
1660 struct compat_list_flows_cbdata cbdata;
1661 int error;
1662
1663 if (!n_flows)
1664 return 0;
1665
1666 cbdata.dp = dp;
1667 cbdata.uflows = flows;
1668 cbdata.n_flows = n_flows;
1669 cbdata.listed_flows = 0;
1670
1671 error = tbl_foreach(get_table_protected(dp), compat_list_flow, &cbdata);
1672 return error ? error : cbdata.listed_flows;
1673 }
1674
1675 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1676 int (*function)(struct datapath *,
1677 struct compat_odp_flow __user *,
1678 u32 n_flows))
1679 {
1680 struct compat_odp_flowvec __user *uflowvec;
1681 struct compat_odp_flow __user *flows;
1682 struct compat_odp_flowvec flowvec;
1683 int retval;
1684
1685 uflowvec = compat_ptr(argp);
1686 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1687 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1688 return -EFAULT;
1689
1690 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1691 return -EINVAL;
1692
1693 flows = compat_ptr(flowvec.flows);
1694 if (!access_ok(VERIFY_WRITE, flows,
1695 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1696 return -EFAULT;
1697
1698 retval = function(dp, flows, flowvec.n_flows);
1699 return (retval < 0 ? retval
1700 : retval == flowvec.n_flows ? 0
1701 : put_user(retval, &uflowvec->n_flows));
1702 }
1703
1704 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1705 {
1706 struct odp_execute execute;
1707 compat_uptr_t actions;
1708 compat_uptr_t data;
1709
1710 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1711 __get_user(actions, &uexecute->actions) ||
1712 __get_user(execute.actions_len, &uexecute->actions_len) ||
1713 __get_user(data, &uexecute->data) ||
1714 __get_user(execute.length, &uexecute->length))
1715 return -EFAULT;
1716
1717 execute.actions = (struct nlattr __force *)compat_ptr(actions);
1718 execute.data = (const void __force *)compat_ptr(data);
1719
1720 return do_execute(dp, &execute);
1721 }
1722
1723 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1724 {
1725 int dp_idx = iminor(f->f_dentry->d_inode);
1726 struct datapath *dp;
1727 int err;
1728
1729 switch (cmd) {
1730 case ODP_DP_DESTROY:
1731 case ODP_FLOW_FLUSH:
1732 /* Ioctls that don't need any translation at all. */
1733 return openvswitch_ioctl(f, cmd, argp);
1734
1735 case ODP_DP_CREATE:
1736 case ODP_VPORT_ATTACH:
1737 case ODP_VPORT_DETACH:
1738 case ODP_VPORT_MOD:
1739 case ODP_VPORT_MTU_SET:
1740 case ODP_VPORT_MTU_GET:
1741 case ODP_VPORT_ETHER_SET:
1742 case ODP_VPORT_ETHER_GET:
1743 case ODP_VPORT_STATS_SET:
1744 case ODP_VPORT_STATS_GET:
1745 case ODP_DP_STATS:
1746 case ODP_GET_DROP_FRAGS:
1747 case ODP_SET_DROP_FRAGS:
1748 case ODP_SET_LISTEN_MASK:
1749 case ODP_GET_LISTEN_MASK:
1750 case ODP_SET_SFLOW_PROBABILITY:
1751 case ODP_GET_SFLOW_PROBABILITY:
1752 case ODP_VPORT_QUERY:
1753 /* Ioctls that just need their pointer argument extended. */
1754 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
1755 }
1756
1757 dp = get_dp_locked(dp_idx);
1758 err = -ENODEV;
1759 if (!dp)
1760 goto exit;
1761
1762 switch (cmd) {
1763 case ODP_VPORT_LIST32:
1764 err = compat_list_ports(dp, compat_ptr(argp));
1765 break;
1766
1767 case ODP_FLOW_PUT32:
1768 err = compat_put_flow(dp, compat_ptr(argp));
1769 break;
1770
1771 case ODP_FLOW_DEL32:
1772 err = compat_del_flow(dp, compat_ptr(argp));
1773 break;
1774
1775 case ODP_FLOW_GET32:
1776 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
1777 break;
1778
1779 case ODP_FLOW_LIST32:
1780 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
1781 break;
1782
1783 case ODP_EXECUTE32:
1784 err = compat_execute(dp, compat_ptr(argp));
1785 break;
1786
1787 default:
1788 err = -ENOIOCTLCMD;
1789 break;
1790 }
1791 mutex_unlock(&dp->mutex);
1792 exit:
1793 return err;
1794 }
1795 #endif
1796
1797 /* Unfortunately this function is not exported so this is a verbatim copy
1798 * from net/core/datagram.c in 2.6.30. */
1799 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
1800 u8 __user *to, int len,
1801 __wsum *csump)
1802 {
1803 int start = skb_headlen(skb);
1804 int pos = 0;
1805 int i, copy = start - offset;
1806
1807 /* Copy header. */
1808 if (copy > 0) {
1809 int err = 0;
1810 if (copy > len)
1811 copy = len;
1812 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
1813 *csump, &err);
1814 if (err)
1815 goto fault;
1816 if ((len -= copy) == 0)
1817 return 0;
1818 offset += copy;
1819 to += copy;
1820 pos = copy;
1821 }
1822
1823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1824 int end;
1825
1826 WARN_ON(start > offset + len);
1827
1828 end = start + skb_shinfo(skb)->frags[i].size;
1829 if ((copy = end - offset) > 0) {
1830 __wsum csum2;
1831 int err = 0;
1832 u8 *vaddr;
1833 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1834 struct page *page = frag->page;
1835
1836 if (copy > len)
1837 copy = len;
1838 vaddr = kmap(page);
1839 csum2 = csum_and_copy_to_user(vaddr +
1840 frag->page_offset +
1841 offset - start,
1842 to, copy, 0, &err);
1843 kunmap(page);
1844 if (err)
1845 goto fault;
1846 *csump = csum_block_add(*csump, csum2, pos);
1847 if (!(len -= copy))
1848 return 0;
1849 offset += copy;
1850 to += copy;
1851 pos += copy;
1852 }
1853 start = end;
1854 }
1855
1856 if (skb_shinfo(skb)->frag_list) {
1857 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1858
1859 for (; list; list=list->next) {
1860 int end;
1861
1862 WARN_ON(start > offset + len);
1863
1864 end = start + list->len;
1865 if ((copy = end - offset) > 0) {
1866 __wsum csum2 = 0;
1867 if (copy > len)
1868 copy = len;
1869 if (skb_copy_and_csum_datagram(list,
1870 offset - start,
1871 to, copy,
1872 &csum2))
1873 goto fault;
1874 *csump = csum_block_add(*csump, csum2, pos);
1875 if ((len -= copy) == 0)
1876 return 0;
1877 offset += copy;
1878 to += copy;
1879 pos += copy;
1880 }
1881 start = end;
1882 }
1883 }
1884 if (!len)
1885 return 0;
1886
1887 fault:
1888 return -EFAULT;
1889 }
1890
1891 static ssize_t openvswitch_read(struct file *f, char __user *buf,
1892 size_t nbytes, loff_t *ppos)
1893 {
1894 int listeners = get_listen_mask(f);
1895 int dp_idx = iminor(f->f_dentry->d_inode);
1896 struct datapath *dp = get_dp_locked(dp_idx);
1897 struct sk_buff *skb;
1898 size_t copy_bytes, tot_copy_bytes;
1899 int retval;
1900
1901 if (!dp)
1902 return -ENODEV;
1903
1904 if (nbytes == 0 || !listeners)
1905 return 0;
1906
1907 for (;;) {
1908 int i;
1909
1910 for (i = 0; i < DP_N_QUEUES; i++) {
1911 if (listeners & (1 << i)) {
1912 skb = skb_dequeue(&dp->queues[i]);
1913 if (skb)
1914 goto success;
1915 }
1916 }
1917
1918 if (f->f_flags & O_NONBLOCK) {
1919 retval = -EAGAIN;
1920 goto error;
1921 }
1922
1923 wait_event_interruptible(dp->waitqueue,
1924 dp_has_packet_of_interest(dp,
1925 listeners));
1926
1927 if (signal_pending(current)) {
1928 retval = -ERESTARTSYS;
1929 goto error;
1930 }
1931 }
1932 success:
1933 mutex_unlock(&dp->mutex);
1934
1935 copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
1936
1937 retval = 0;
1938 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1939 if (copy_bytes == skb->len) {
1940 __wsum csum = 0;
1941 u16 csum_start, csum_offset;
1942
1943 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
1944 csum_start -= skb_headroom(skb);
1945
1946 BUG_ON(csum_start >= skb_headlen(skb));
1947 retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
1948 copy_bytes - csum_start, &csum);
1949 if (!retval) {
1950 __sum16 __user *csump;
1951
1952 copy_bytes = csum_start;
1953 csump = (__sum16 __user *)(buf + csum_start + csum_offset);
1954
1955 BUG_ON((char __user *)csump + sizeof(__sum16) >
1956 buf + nbytes);
1957 put_user(csum_fold(csum), csump);
1958 }
1959 } else
1960 retval = skb_checksum_help(skb);
1961 }
1962
1963 if (!retval) {
1964 struct iovec iov;
1965
1966 iov.iov_base = buf;
1967 iov.iov_len = copy_bytes;
1968 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1969 }
1970
1971 if (!retval)
1972 retval = tot_copy_bytes;
1973
1974 kfree_skb(skb);
1975 return retval;
1976
1977 error:
1978 mutex_unlock(&dp->mutex);
1979 return retval;
1980 }
1981
1982 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1983 {
1984 int dp_idx = iminor(file->f_dentry->d_inode);
1985 struct datapath *dp = get_dp_locked(dp_idx);
1986 unsigned int mask;
1987
1988 if (dp) {
1989 mask = 0;
1990 poll_wait(file, &dp->waitqueue, wait);
1991 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
1992 mask |= POLLIN | POLLRDNORM;
1993 mutex_unlock(&dp->mutex);
1994 } else {
1995 mask = POLLIN | POLLRDNORM | POLLHUP;
1996 }
1997 return mask;
1998 }
1999
2000 static struct file_operations openvswitch_fops = {
2001 .read = openvswitch_read,
2002 .poll = openvswitch_poll,
2003 .unlocked_ioctl = openvswitch_ioctl,
2004 #ifdef CONFIG_COMPAT
2005 .compat_ioctl = openvswitch_compat_ioctl,
2006 #endif
2007 };
2008
2009 static int major;
2010
2011 static int __init dp_init(void)
2012 {
2013 struct sk_buff *dummy_skb;
2014 int err;
2015
2016 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2017
2018 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2019
2020 err = flow_init();
2021 if (err)
2022 goto error;
2023
2024 err = vport_init();
2025 if (err)
2026 goto error_flow_exit;
2027
2028 err = register_netdevice_notifier(&dp_device_notifier);
2029 if (err)
2030 goto error_vport_exit;
2031
2032 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2033 if (err < 0)
2034 goto error_unreg_notifier;
2035
2036 return 0;
2037
2038 error_unreg_notifier:
2039 unregister_netdevice_notifier(&dp_device_notifier);
2040 error_vport_exit:
2041 vport_exit();
2042 error_flow_exit:
2043 flow_exit();
2044 error:
2045 return err;
2046 }
2047
2048 static void dp_cleanup(void)
2049 {
2050 rcu_barrier();
2051 unregister_chrdev(major, "openvswitch");
2052 unregister_netdevice_notifier(&dp_device_notifier);
2053 vport_exit();
2054 flow_exit();
2055 }
2056
2057 module_init(dp_init);
2058 module_exit(dp_cleanup);
2059
2060 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2061 MODULE_LICENSE("GPL");