]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/datapath.c
datapath: Get rid of compat.h, compat26.h in favor of modern approach.
[mirror_ovs.git] / datapath / datapath.c
1 /*
2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
4 *
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
7 */
8
9 /* Functions for managing the dp interface/device. */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/fs.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
18 #include <linux/in.h>
19 #include <linux/ip.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
35 #include <asm/bug.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <linux/compat.h>
45
46 #include "openvswitch/datapath-protocol.h"
47 #include "checksum.h"
48 #include "datapath.h"
49 #include "actions.h"
50 #include "flow.h"
51 #include "loop_counter.h"
52 #include "odp-compat.h"
53 #include "table.h"
54 #include "vport-internal_dev.h"
55
56 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
57 EXPORT_SYMBOL(dp_ioctl_hook);
58
59 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
60 * by dp_mutex.
61 *
62 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
63 * lock first.
64 *
65 * It is safe to access the datapath and vport structures with just
66 * dp_mutex.
67 */
68 static struct datapath __rcu *dps[ODP_MAX];
69 static DEFINE_MUTEX(dp_mutex);
70
71 static int new_vport(struct datapath *, struct odp_port *, int port_no);
72
73 /* Must be called with rcu_read_lock or dp_mutex. */
74 struct datapath *get_dp(int dp_idx)
75 {
76 if (dp_idx < 0 || dp_idx >= ODP_MAX)
77 return NULL;
78 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
79 lockdep_is_held(&dp_mutex));
80 }
81 EXPORT_SYMBOL_GPL(get_dp);
82
83 static struct datapath *get_dp_locked(int dp_idx)
84 {
85 struct datapath *dp;
86
87 mutex_lock(&dp_mutex);
88 dp = get_dp(dp_idx);
89 if (dp)
90 mutex_lock(&dp->mutex);
91 mutex_unlock(&dp_mutex);
92 return dp;
93 }
94
95 static struct tbl *get_table_protected(struct datapath *dp)
96 {
97 return rcu_dereference_protected(dp->table,
98 lockdep_is_held(&dp->mutex));
99 }
100
101 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
102 {
103 return rcu_dereference_protected(dp->ports[port_no],
104 lockdep_is_held(&dp->mutex));
105 }
106
107 /* Must be called with rcu_read_lock or RTNL lock. */
108 const char *dp_name(const struct datapath *dp)
109 {
110 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
111 }
112
113 static inline size_t br_nlmsg_size(void)
114 {
115 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
116 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
117 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
118 + nla_total_size(4) /* IFLA_MASTER */
119 + nla_total_size(4) /* IFLA_MTU */
120 + nla_total_size(4) /* IFLA_LINK */
121 + nla_total_size(1); /* IFLA_OPERSTATE */
122 }
123
124 static int dp_fill_ifinfo(struct sk_buff *skb,
125 const struct vport *port,
126 int event, unsigned int flags)
127 {
128 struct datapath *dp = port->dp;
129 int ifindex = vport_get_ifindex(port);
130 int iflink = vport_get_iflink(port);
131 struct ifinfomsg *hdr;
132 struct nlmsghdr *nlh;
133
134 if (ifindex < 0)
135 return ifindex;
136
137 if (iflink < 0)
138 return iflink;
139
140 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
141 if (nlh == NULL)
142 return -EMSGSIZE;
143
144 hdr = nlmsg_data(nlh);
145 hdr->ifi_family = AF_BRIDGE;
146 hdr->__ifi_pad = 0;
147 hdr->ifi_type = ARPHRD_ETHER;
148 hdr->ifi_index = ifindex;
149 hdr->ifi_flags = vport_get_flags(port);
150 hdr->ifi_change = 0;
151
152 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
153 NLA_PUT_U32(skb, IFLA_MASTER,
154 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
155 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
156 #ifdef IFLA_OPERSTATE
157 NLA_PUT_U8(skb, IFLA_OPERSTATE,
158 vport_is_running(port)
159 ? vport_get_operstate(port)
160 : IF_OPER_DOWN);
161 #endif
162
163 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
164
165 if (ifindex != iflink)
166 NLA_PUT_U32(skb, IFLA_LINK,iflink);
167
168 return nlmsg_end(skb, nlh);
169
170 nla_put_failure:
171 nlmsg_cancel(skb, nlh);
172 return -EMSGSIZE;
173 }
174
175 static void dp_ifinfo_notify(int event, struct vport *port)
176 {
177 struct sk_buff *skb;
178 int err = -ENOBUFS;
179
180 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
181 if (skb == NULL)
182 goto errout;
183
184 err = dp_fill_ifinfo(skb, port, event, 0);
185 if (err < 0) {
186 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
187 WARN_ON(err == -EMSGSIZE);
188 kfree_skb(skb);
189 goto errout;
190 }
191 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
192 return;
193 errout:
194 if (err < 0)
195 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
196 }
197
198 static void release_dp(struct kobject *kobj)
199 {
200 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
201 kfree(dp);
202 }
203
204 static struct kobj_type dp_ktype = {
205 .release = release_dp
206 };
207
208 static int create_dp(int dp_idx, const char __user *devnamep)
209 {
210 struct odp_port internal_dev_port;
211 char devname[IFNAMSIZ];
212 struct datapath *dp;
213 int err;
214 int i;
215
216 if (devnamep) {
217 int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
218 if (retval < 0) {
219 err = -EFAULT;
220 goto err;
221 } else if (retval >= IFNAMSIZ) {
222 err = -ENAMETOOLONG;
223 goto err;
224 }
225 } else {
226 snprintf(devname, sizeof devname, "of%d", dp_idx);
227 }
228
229 rtnl_lock();
230 mutex_lock(&dp_mutex);
231 err = -ENODEV;
232 if (!try_module_get(THIS_MODULE))
233 goto err_unlock;
234
235 /* Exit early if a datapath with that number already exists.
236 * (We don't use -EEXIST because that's ambiguous with 'devname'
237 * conflicting with an existing network device name.) */
238 err = -EBUSY;
239 if (get_dp(dp_idx))
240 goto err_put_module;
241
242 err = -ENOMEM;
243 dp = kzalloc(sizeof *dp, GFP_KERNEL);
244 if (dp == NULL)
245 goto err_put_module;
246 INIT_LIST_HEAD(&dp->port_list);
247 mutex_init(&dp->mutex);
248 mutex_lock(&dp->mutex);
249 dp->dp_idx = dp_idx;
250 for (i = 0; i < DP_N_QUEUES; i++)
251 skb_queue_head_init(&dp->queues[i]);
252 init_waitqueue_head(&dp->waitqueue);
253
254 /* Initialize kobject for bridge. This will be added as
255 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
256 dp->ifobj.kset = NULL;
257 kobject_init(&dp->ifobj, &dp_ktype);
258
259 /* Allocate table. */
260 err = -ENOMEM;
261 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
262 if (!dp->table)
263 goto err_free_dp;
264
265 /* Set up our datapath device. */
266 BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
267 strcpy(internal_dev_port.devname, devname);
268 strcpy(internal_dev_port.type, "internal");
269 err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
270 if (err) {
271 if (err == -EBUSY)
272 err = -EEXIST;
273
274 goto err_destroy_table;
275 }
276
277 dp->drop_frags = 0;
278 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
279 if (!dp->stats_percpu) {
280 err = -ENOMEM;
281 goto err_destroy_local_port;
282 }
283
284 rcu_assign_pointer(dps[dp_idx], dp);
285 dp_sysfs_add_dp(dp);
286
287 mutex_unlock(&dp->mutex);
288 mutex_unlock(&dp_mutex);
289 rtnl_unlock();
290
291 return 0;
292
293 err_destroy_local_port:
294 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
295 err_destroy_table:
296 tbl_destroy(get_table_protected(dp), NULL);
297 err_free_dp:
298 mutex_unlock(&dp->mutex);
299 kfree(dp);
300 err_put_module:
301 module_put(THIS_MODULE);
302 err_unlock:
303 mutex_unlock(&dp_mutex);
304 rtnl_unlock();
305 err:
306 return err;
307 }
308
309 static void destroy_dp_rcu(struct rcu_head *rcu)
310 {
311 struct datapath *dp = container_of(rcu, struct datapath, rcu);
312 int i;
313
314 for (i = 0; i < DP_N_QUEUES; i++)
315 skb_queue_purge(&dp->queues[i]);
316
317 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
318 free_percpu(dp->stats_percpu);
319 kobject_put(&dp->ifobj);
320 }
321
322 static int destroy_dp(int dp_idx)
323 {
324 struct datapath *dp;
325 int err = 0;
326 struct vport *p, *n;
327
328 rtnl_lock();
329 mutex_lock(&dp_mutex);
330 dp = get_dp(dp_idx);
331 if (!dp) {
332 err = -ENODEV;
333 goto out;
334 }
335
336 mutex_lock(&dp->mutex);
337
338 list_for_each_entry_safe (p, n, &dp->port_list, node)
339 if (p->port_no != ODPP_LOCAL)
340 dp_detach_port(p);
341
342 dp_sysfs_del_dp(dp);
343 rcu_assign_pointer(dps[dp->dp_idx], NULL);
344 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
345
346 mutex_unlock(&dp->mutex);
347 call_rcu(&dp->rcu, destroy_dp_rcu);
348 module_put(THIS_MODULE);
349
350 out:
351 mutex_unlock(&dp_mutex);
352 rtnl_unlock();
353 return err;
354 }
355
356 /* Called with RTNL lock and dp->mutex. */
357 static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
358 {
359 struct vport_parms parms;
360 struct vport *vport;
361
362 parms.name = odp_port->devname;
363 parms.type = odp_port->type;
364 parms.config = odp_port->config;
365 parms.dp = dp;
366 parms.port_no = port_no;
367
368 vport_lock();
369 vport = vport_add(&parms);
370 vport_unlock();
371
372 if (IS_ERR(vport))
373 return PTR_ERR(vport);
374
375 rcu_assign_pointer(dp->ports[port_no], vport);
376 list_add_rcu(&vport->node, &dp->port_list);
377 dp->n_ports++;
378
379 dp_ifinfo_notify(RTM_NEWLINK, vport);
380
381 return 0;
382 }
383
384 static int attach_port(int dp_idx, struct odp_port __user *portp)
385 {
386 struct datapath *dp;
387 struct odp_port port;
388 int port_no;
389 int err;
390
391 err = -EFAULT;
392 if (copy_from_user(&port, portp, sizeof port))
393 goto out;
394 port.devname[IFNAMSIZ - 1] = '\0';
395 port.type[VPORT_TYPE_SIZE - 1] = '\0';
396
397 rtnl_lock();
398 dp = get_dp_locked(dp_idx);
399 err = -ENODEV;
400 if (!dp)
401 goto out_unlock_rtnl;
402
403 for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
404 if (!dp->ports[port_no])
405 goto got_port_no;
406 err = -EFBIG;
407 goto out_unlock_dp;
408
409 got_port_no:
410 err = new_vport(dp, &port, port_no);
411 if (err)
412 goto out_unlock_dp;
413
414 set_internal_devs_mtu(dp);
415 dp_sysfs_add_if(get_vport_protected(dp, port_no));
416
417 err = put_user(port_no, &portp->port);
418
419 out_unlock_dp:
420 mutex_unlock(&dp->mutex);
421 out_unlock_rtnl:
422 rtnl_unlock();
423 out:
424 return err;
425 }
426
427 int dp_detach_port(struct vport *p)
428 {
429 int err;
430
431 ASSERT_RTNL();
432
433 if (p->port_no != ODPP_LOCAL)
434 dp_sysfs_del_if(p);
435 dp_ifinfo_notify(RTM_DELLINK, p);
436
437 /* First drop references to device. */
438 p->dp->n_ports--;
439 list_del_rcu(&p->node);
440 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
441
442 /* Then destroy it. */
443 vport_lock();
444 err = vport_del(p);
445 vport_unlock();
446
447 return err;
448 }
449
450 static int detach_port(int dp_idx, int port_no)
451 {
452 struct vport *p;
453 struct datapath *dp;
454 int err;
455
456 err = -EINVAL;
457 if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
458 goto out;
459
460 rtnl_lock();
461 dp = get_dp_locked(dp_idx);
462 err = -ENODEV;
463 if (!dp)
464 goto out_unlock_rtnl;
465
466 p = get_vport_protected(dp, port_no);
467 err = -ENOENT;
468 if (!p)
469 goto out_unlock_dp;
470
471 err = dp_detach_port(p);
472
473 out_unlock_dp:
474 mutex_unlock(&dp->mutex);
475 out_unlock_rtnl:
476 rtnl_unlock();
477 out:
478 return err;
479 }
480
481 /* Must be called with rcu_read_lock. */
482 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
483 {
484 struct datapath *dp = p->dp;
485 struct dp_stats_percpu *stats;
486 int stats_counter_off;
487 struct sw_flow_actions *acts;
488 struct loop_counter *loop;
489 int error;
490
491 OVS_CB(skb)->vport = p;
492
493 if (!OVS_CB(skb)->flow) {
494 struct odp_flow_key key;
495 struct tbl_node *flow_node;
496 bool is_frag;
497
498 /* Extract flow from 'skb' into 'key'. */
499 error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
500 if (unlikely(error)) {
501 kfree_skb(skb);
502 return;
503 }
504
505 if (is_frag && dp->drop_frags) {
506 kfree_skb(skb);
507 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
508 goto out;
509 }
510
511 /* Look up flow. */
512 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
513 flow_hash(&key), flow_cmp);
514 if (unlikely(!flow_node)) {
515 dp_output_control(dp, skb, _ODPL_MISS_NR,
516 (__force u64)OVS_CB(skb)->tun_id);
517 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
518 goto out;
519 }
520
521 OVS_CB(skb)->flow = flow_cast(flow_node);
522 }
523
524 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
525 flow_used(OVS_CB(skb)->flow, skb);
526
527 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
528
529 /* Check whether we've looped too much. */
530 loop = loop_get_counter();
531 if (unlikely(++loop->count > MAX_LOOPS))
532 loop->looping = true;
533 if (unlikely(loop->looping)) {
534 loop_suppress(dp, acts);
535 kfree_skb(skb);
536 goto out_loop;
537 }
538
539 /* Execute actions. */
540 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
541 acts->actions_len);
542
543 /* Check whether sub-actions looped too much. */
544 if (unlikely(loop->looping))
545 loop_suppress(dp, acts);
546
547 out_loop:
548 /* Decrement loop counter. */
549 if (!--loop->count)
550 loop->looping = false;
551 loop_put_counter();
552
553 out:
554 /* Update datapath statistics. */
555 local_bh_disable();
556 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
557
558 write_seqcount_begin(&stats->seqlock);
559 (*(u64 *)((u8 *)stats + stats_counter_off))++;
560 write_seqcount_end(&stats->seqlock);
561
562 local_bh_enable();
563 }
564
565 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
566 * unless we broke up a GSO packet. */
567 static int queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
568 int queue_no, u64 arg)
569 {
570 struct sk_buff *nskb;
571 int port_no;
572 int err;
573
574 if (OVS_CB(skb)->vport)
575 port_no = OVS_CB(skb)->vport->port_no;
576 else
577 port_no = ODPP_LOCAL;
578
579 do {
580 struct odp_msg *header;
581
582 nskb = skb->next;
583 skb->next = NULL;
584
585 err = skb_cow(skb, sizeof *header);
586 if (err)
587 goto err_kfree_skbs;
588
589 header = (struct odp_msg*)__skb_push(skb, sizeof *header);
590 header->type = queue_no;
591 header->length = skb->len;
592 header->port = port_no;
593 header->arg = arg;
594 skb_queue_tail(queue, skb);
595
596 skb = nskb;
597 } while (skb);
598 return 0;
599
600 err_kfree_skbs:
601 kfree_skb(skb);
602 while ((skb = nskb) != NULL) {
603 nskb = skb->next;
604 kfree_skb(skb);
605 }
606 return err;
607 }
608
609 int dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
610 u64 arg)
611 {
612 struct dp_stats_percpu *stats;
613 struct sk_buff_head *queue;
614 int err;
615
616 WARN_ON_ONCE(skb_shared(skb));
617 BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR && queue_no != _ODPL_SFLOW_NR);
618 queue = &dp->queues[queue_no];
619 err = -ENOBUFS;
620 if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
621 goto err_kfree_skb;
622
623 forward_ip_summed(skb);
624
625 err = vswitch_skb_checksum_setup(skb);
626 if (err)
627 goto err_kfree_skb;
628
629 /* Break apart GSO packets into their component pieces. Otherwise
630 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
631 if (skb_is_gso(skb)) {
632 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
633
634 kfree_skb(skb);
635 skb = nskb;
636 if (IS_ERR(skb)) {
637 err = PTR_ERR(skb);
638 goto err;
639 }
640 }
641
642 err = queue_control_packets(skb, queue, queue_no, arg);
643 wake_up_interruptible(&dp->waitqueue);
644 return err;
645
646 err_kfree_skb:
647 kfree_skb(skb);
648 err:
649 local_bh_disable();
650 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
651
652 write_seqcount_begin(&stats->seqlock);
653 stats->n_lost++;
654 write_seqcount_end(&stats->seqlock);
655
656 local_bh_enable();
657
658 return err;
659 }
660
661 static int flush_flows(struct datapath *dp)
662 {
663 struct tbl *old_table = get_table_protected(dp);
664 struct tbl *new_table;
665
666 new_table = tbl_create(TBL_MIN_BUCKETS);
667 if (!new_table)
668 return -ENOMEM;
669
670 rcu_assign_pointer(dp->table, new_table);
671
672 tbl_deferred_destroy(old_table, flow_free_tbl);
673
674 return 0;
675 }
676
677 static int validate_actions(const struct nlattr *actions, u32 actions_len)
678 {
679 const struct nlattr *a;
680 int rem;
681
682 nla_for_each_attr(a, actions, actions_len, rem) {
683 static const u32 action_lens[ODPAT_MAX + 1] = {
684 [ODPAT_OUTPUT] = 4,
685 [ODPAT_CONTROLLER] = 8,
686 [ODPAT_SET_DL_TCI] = 2,
687 [ODPAT_STRIP_VLAN] = 0,
688 [ODPAT_SET_DL_SRC] = ETH_ALEN,
689 [ODPAT_SET_DL_DST] = ETH_ALEN,
690 [ODPAT_SET_NW_SRC] = 4,
691 [ODPAT_SET_NW_DST] = 4,
692 [ODPAT_SET_NW_TOS] = 1,
693 [ODPAT_SET_TP_SRC] = 2,
694 [ODPAT_SET_TP_DST] = 2,
695 [ODPAT_SET_TUNNEL] = 8,
696 [ODPAT_SET_PRIORITY] = 4,
697 [ODPAT_POP_PRIORITY] = 0,
698 [ODPAT_DROP_SPOOFED_ARP] = 0,
699 };
700 int type = nla_type(a);
701
702 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
703 return -EINVAL;
704
705 switch (type) {
706 case ODPAT_UNSPEC:
707 return -EINVAL;
708
709 case ODPAT_CONTROLLER:
710 case ODPAT_STRIP_VLAN:
711 case ODPAT_SET_DL_SRC:
712 case ODPAT_SET_DL_DST:
713 case ODPAT_SET_NW_SRC:
714 case ODPAT_SET_NW_DST:
715 case ODPAT_SET_TP_SRC:
716 case ODPAT_SET_TP_DST:
717 case ODPAT_SET_TUNNEL:
718 case ODPAT_SET_PRIORITY:
719 case ODPAT_POP_PRIORITY:
720 case ODPAT_DROP_SPOOFED_ARP:
721 /* No validation needed. */
722 break;
723
724 case ODPAT_OUTPUT:
725 if (nla_get_u32(a) >= DP_MAX_PORTS)
726 return -EINVAL;
727
728 case ODPAT_SET_DL_TCI:
729 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
730 return -EINVAL;
731 break;
732
733 case ODPAT_SET_NW_TOS:
734 if (nla_get_u8(a) & INET_ECN_MASK)
735 return -EINVAL;
736 break;
737
738 default:
739 return -EOPNOTSUPP;
740 }
741 }
742
743 if (rem > 0)
744 return -EINVAL;
745
746 return 0;
747 }
748
749 static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
750 {
751 struct sw_flow_actions *actions;
752 int error;
753
754 actions = flow_actions_alloc(flow->actions_len);
755 error = PTR_ERR(actions);
756 if (IS_ERR(actions))
757 goto error;
758
759 error = -EFAULT;
760 if (copy_from_user(actions->actions,
761 (struct nlattr __user __force *)flow->actions,
762 flow->actions_len))
763 goto error_free_actions;
764 error = validate_actions(actions->actions, actions->actions_len);
765 if (error)
766 goto error_free_actions;
767
768 return actions;
769
770 error_free_actions:
771 kfree(actions);
772 error:
773 return ERR_PTR(error);
774 }
775
776 static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
777 {
778 if (flow->used) {
779 struct timespec offset_ts, used, now_mono;
780
781 ktime_get_ts(&now_mono);
782 jiffies_to_timespec(jiffies - flow->used, &offset_ts);
783 set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
784 now_mono.tv_nsec - offset_ts.tv_nsec);
785
786 stats->used_sec = used.tv_sec;
787 stats->used_nsec = used.tv_nsec;
788 } else {
789 stats->used_sec = 0;
790 stats->used_nsec = 0;
791 }
792
793 stats->n_packets = flow->packet_count;
794 stats->n_bytes = flow->byte_count;
795 stats->reserved = 0;
796 stats->tcp_flags = flow->tcp_flags;
797 stats->error = 0;
798 }
799
800 static void clear_stats(struct sw_flow *flow)
801 {
802 flow->used = 0;
803 flow->tcp_flags = 0;
804 flow->packet_count = 0;
805 flow->byte_count = 0;
806 }
807
808 static int expand_table(struct datapath *dp)
809 {
810 struct tbl *old_table = get_table_protected(dp);
811 struct tbl *new_table;
812
813 new_table = tbl_expand(old_table);
814 if (IS_ERR(new_table))
815 return PTR_ERR(new_table);
816
817 rcu_assign_pointer(dp->table, new_table);
818 tbl_deferred_destroy(old_table, NULL);
819
820 return 0;
821 }
822
823 static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
824 struct odp_flow_stats *stats)
825 {
826 struct tbl_node *flow_node;
827 struct sw_flow *flow;
828 struct tbl *table;
829 struct sw_flow_actions *acts = NULL;
830 int error;
831 u32 hash;
832
833 hash = flow_hash(&uf->flow.key);
834 table = get_table_protected(dp);
835 flow_node = tbl_lookup(table, &uf->flow.key, hash, flow_cmp);
836 if (!flow_node) {
837 /* No such flow. */
838 error = -ENOENT;
839 if (!(uf->flags & ODPPF_CREATE))
840 goto error;
841
842 /* Expand table, if necessary, to make room. */
843 if (tbl_count(table) >= tbl_n_buckets(table)) {
844 error = expand_table(dp);
845 if (error)
846 goto error;
847 table = get_table_protected(dp);
848 }
849
850 /* Allocate flow. */
851 flow = flow_alloc();
852 if (IS_ERR(flow)) {
853 error = PTR_ERR(flow);
854 goto error;
855 }
856 flow->key = uf->flow.key;
857 clear_stats(flow);
858
859 /* Obtain actions. */
860 acts = get_actions(&uf->flow);
861 error = PTR_ERR(acts);
862 if (IS_ERR(acts))
863 goto error_free_flow;
864 rcu_assign_pointer(flow->sf_acts, acts);
865
866 /* Put flow in bucket. */
867 error = tbl_insert(table, &flow->tbl_node, hash);
868 if (error)
869 goto error_free_flow_acts;
870
871 memset(stats, 0, sizeof(struct odp_flow_stats));
872 } else {
873 /* We found a matching flow. */
874 struct sw_flow_actions *old_acts, *new_acts;
875
876 flow = flow_cast(flow_node);
877
878 /* Bail out if we're not allowed to modify an existing flow. */
879 error = -EEXIST;
880 if (!(uf->flags & ODPPF_MODIFY))
881 goto error;
882
883 /* Swap actions. */
884 new_acts = get_actions(&uf->flow);
885 error = PTR_ERR(new_acts);
886 if (IS_ERR(new_acts))
887 goto error;
888
889 old_acts = rcu_dereference_protected(flow->sf_acts,
890 lockdep_is_held(&dp->mutex));
891 if (old_acts->actions_len != new_acts->actions_len ||
892 memcmp(old_acts->actions, new_acts->actions,
893 old_acts->actions_len)) {
894 rcu_assign_pointer(flow->sf_acts, new_acts);
895 flow_deferred_free_acts(old_acts);
896 } else {
897 kfree(new_acts);
898 }
899
900 /* Fetch stats, then clear them if necessary. */
901 spin_lock_bh(&flow->lock);
902 get_stats(flow, stats);
903 if (uf->flags & ODPPF_ZERO_STATS)
904 clear_stats(flow);
905 spin_unlock_bh(&flow->lock);
906 }
907
908 return 0;
909
910 error_free_flow_acts:
911 kfree(acts);
912 error_free_flow:
913 flow->sf_acts = NULL;
914 flow_put(flow);
915 error:
916 return error;
917 }
918
919 static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
920 {
921 struct odp_flow_stats stats;
922 struct odp_flow_put uf;
923 int error;
924
925 if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
926 return -EFAULT;
927
928 error = do_put_flow(dp, &uf, &stats);
929 if (error)
930 return error;
931
932 if (copy_to_user(&ufp->flow.stats, &stats,
933 sizeof(struct odp_flow_stats)))
934 return -EFAULT;
935
936 return 0;
937 }
938
939 static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
940 u32 query_flags,
941 struct odp_flow_stats __user *ustats,
942 struct nlattr __user *actions,
943 u32 __user *actions_lenp)
944 {
945 struct sw_flow_actions *sf_acts;
946 struct odp_flow_stats stats;
947 u32 actions_len;
948
949 spin_lock_bh(&flow->lock);
950 get_stats(flow, &stats);
951 if (query_flags & ODPFF_ZERO_TCP_FLAGS)
952 flow->tcp_flags = 0;
953
954 spin_unlock_bh(&flow->lock);
955
956 if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
957 get_user(actions_len, actions_lenp))
958 return -EFAULT;
959
960 if (!actions_len)
961 return 0;
962
963 sf_acts = rcu_dereference_protected(flow->sf_acts,
964 lockdep_is_held(&dp->mutex));
965 if (put_user(sf_acts->actions_len, actions_lenp) ||
966 (actions && copy_to_user(actions, sf_acts->actions,
967 min(sf_acts->actions_len, actions_len))))
968 return -EFAULT;
969
970 return 0;
971 }
972
973 static int answer_query(struct datapath *dp, struct sw_flow *flow,
974 u32 query_flags, struct odp_flow __user *ufp)
975 {
976 struct nlattr __user *actions;
977
978 if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
979 return -EFAULT;
980
981 return do_answer_query(dp, flow, query_flags,
982 &ufp->stats, actions, &ufp->actions_len);
983 }
984
985 static struct sw_flow *do_del_flow(struct datapath *dp, struct odp_flow_key *key)
986 {
987 struct tbl *table = get_table_protected(dp);
988 struct tbl_node *flow_node;
989 int error;
990
991 flow_node = tbl_lookup(table, key, flow_hash(key), flow_cmp);
992 if (!flow_node)
993 return ERR_PTR(-ENOENT);
994
995 error = tbl_remove(table, flow_node);
996 if (error)
997 return ERR_PTR(error);
998
999 /* XXX Returned flow_node's statistics might lose a few packets, since
1000 * other CPUs can be using this flow. We used to synchronize_rcu() to
1001 * make sure that we get completely accurate stats, but that blows our
1002 * performance, badly. */
1003 return flow_cast(flow_node);
1004 }
1005
1006 static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
1007 {
1008 struct sw_flow *flow;
1009 struct odp_flow uf;
1010 int error;
1011
1012 if (copy_from_user(&uf, ufp, sizeof uf))
1013 return -EFAULT;
1014
1015 flow = do_del_flow(dp, &uf.key);
1016 if (IS_ERR(flow))
1017 return PTR_ERR(flow);
1018
1019 error = answer_query(dp, flow, 0, ufp);
1020 flow_deferred_free(flow);
1021 return error;
1022 }
1023
1024 static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1025 {
1026 struct tbl *table = get_table_protected(dp);
1027 u32 i;
1028
1029 for (i = 0; i < flowvec->n_flows; i++) {
1030 struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
1031 struct odp_flow uf;
1032 struct tbl_node *flow_node;
1033 int error;
1034
1035 if (copy_from_user(&uf, ufp, sizeof uf))
1036 return -EFAULT;
1037
1038 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1039 if (!flow_node)
1040 error = put_user(ENOENT, &ufp->stats.error);
1041 else
1042 error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
1043 if (error)
1044 return -EFAULT;
1045 }
1046 return flowvec->n_flows;
1047 }
1048
1049 struct list_flows_cbdata {
1050 struct datapath *dp;
1051 struct odp_flow __user *uflows;
1052 u32 n_flows;
1053 u32 listed_flows;
1054 };
1055
1056 static int list_flow(struct tbl_node *node, void *cbdata_)
1057 {
1058 struct sw_flow *flow = flow_cast(node);
1059 struct list_flows_cbdata *cbdata = cbdata_;
1060 struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1061 int error;
1062
1063 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1064 return -EFAULT;
1065 error = answer_query(cbdata->dp, flow, 0, ufp);
1066 if (error)
1067 return error;
1068
1069 if (cbdata->listed_flows >= cbdata->n_flows)
1070 return cbdata->listed_flows;
1071 return 0;
1072 }
1073
1074 static int do_list_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
1075 {
1076 struct list_flows_cbdata cbdata;
1077 int error;
1078
1079 if (!flowvec->n_flows)
1080 return 0;
1081
1082 cbdata.dp = dp;
1083 cbdata.uflows = (struct odp_flow __user __force*)flowvec->flows;
1084 cbdata.n_flows = flowvec->n_flows;
1085 cbdata.listed_flows = 0;
1086
1087 error = tbl_foreach(get_table_protected(dp), list_flow, &cbdata);
1088 return error ? error : cbdata.listed_flows;
1089 }
1090
1091 static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1092 int (*function)(struct datapath *,
1093 const struct odp_flowvec *))
1094 {
1095 struct odp_flowvec __user *uflowvec;
1096 struct odp_flowvec flowvec;
1097 int retval;
1098
1099 uflowvec = (struct odp_flowvec __user *)argp;
1100 if (copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1101 return -EFAULT;
1102
1103 if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
1104 return -EINVAL;
1105
1106 retval = function(dp, &flowvec);
1107 return (retval < 0 ? retval
1108 : retval == flowvec.n_flows ? 0
1109 : put_user(retval, &uflowvec->n_flows));
1110 }
1111
1112 static int do_execute(struct datapath *dp, const struct odp_execute *execute)
1113 {
1114 struct odp_flow_key key;
1115 struct sk_buff *skb;
1116 struct sw_flow_actions *actions;
1117 struct ethhdr *eth;
1118 bool is_frag;
1119 int err;
1120
1121 err = -EINVAL;
1122 if (execute->length < ETH_HLEN || execute->length > 65535)
1123 goto error;
1124
1125 actions = flow_actions_alloc(execute->actions_len);
1126 if (IS_ERR(actions)) {
1127 err = PTR_ERR(actions);
1128 goto error;
1129 }
1130
1131 err = -EFAULT;
1132 if (copy_from_user(actions->actions,
1133 (struct nlattr __user __force *)execute->actions, execute->actions_len))
1134 goto error_free_actions;
1135
1136 err = validate_actions(actions->actions, execute->actions_len);
1137 if (err)
1138 goto error_free_actions;
1139
1140 err = -ENOMEM;
1141 skb = alloc_skb(execute->length, GFP_KERNEL);
1142 if (!skb)
1143 goto error_free_actions;
1144
1145 err = -EFAULT;
1146 if (copy_from_user(skb_put(skb, execute->length),
1147 (const void __user __force *)execute->data,
1148 execute->length))
1149 goto error_free_skb;
1150
1151 skb_reset_mac_header(skb);
1152 eth = eth_hdr(skb);
1153
1154 /* Normally, setting the skb 'protocol' field would be handled by a
1155 * call to eth_type_trans(), but it assumes there's a sending
1156 * device, which we may not have. */
1157 if (ntohs(eth->h_proto) >= 1536)
1158 skb->protocol = eth->h_proto;
1159 else
1160 skb->protocol = htons(ETH_P_802_2);
1161
1162 err = flow_extract(skb, -1, &key, &is_frag);
1163 if (err)
1164 goto error_free_skb;
1165
1166 rcu_read_lock();
1167 err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
1168 rcu_read_unlock();
1169
1170 kfree(actions);
1171 return err;
1172
1173 error_free_skb:
1174 kfree_skb(skb);
1175 error_free_actions:
1176 kfree(actions);
1177 error:
1178 return err;
1179 }
1180
1181 static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
1182 {
1183 struct odp_execute execute;
1184
1185 if (copy_from_user(&execute, executep, sizeof execute))
1186 return -EFAULT;
1187
1188 return do_execute(dp, &execute);
1189 }
1190
1191 static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
1192 {
1193 struct tbl *table = get_table_protected(dp);
1194 struct odp_stats stats;
1195 int i;
1196
1197 stats.n_flows = tbl_count(table);
1198 stats.cur_capacity = tbl_n_buckets(table);
1199 stats.max_capacity = TBL_MAX_BUCKETS;
1200 stats.n_ports = dp->n_ports;
1201 stats.max_ports = DP_MAX_PORTS;
1202 stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
1203 for_each_possible_cpu(i) {
1204 const struct dp_stats_percpu *percpu_stats;
1205 struct dp_stats_percpu local_stats;
1206 unsigned seqcount;
1207
1208 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
1209
1210 do {
1211 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
1212 local_stats = *percpu_stats;
1213 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
1214
1215 stats.n_frags += local_stats.n_frags;
1216 stats.n_hit += local_stats.n_hit;
1217 stats.n_missed += local_stats.n_missed;
1218 stats.n_lost += local_stats.n_lost;
1219 }
1220 stats.max_miss_queue = DP_MAX_QUEUE_LEN;
1221 stats.max_action_queue = DP_MAX_QUEUE_LEN;
1222 return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
1223 }
1224
1225 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1226 int dp_min_mtu(const struct datapath *dp)
1227 {
1228 struct vport *p;
1229 int mtu = 0;
1230
1231 ASSERT_RTNL();
1232
1233 list_for_each_entry_rcu (p, &dp->port_list, node) {
1234 int dev_mtu;
1235
1236 /* Skip any internal ports, since that's what we're trying to
1237 * set. */
1238 if (is_internal_vport(p))
1239 continue;
1240
1241 dev_mtu = vport_get_mtu(p);
1242 if (!mtu || dev_mtu < mtu)
1243 mtu = dev_mtu;
1244 }
1245
1246 return mtu ? mtu : ETH_DATA_LEN;
1247 }
1248
1249 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1250 * be called with RTNL lock. */
1251 void set_internal_devs_mtu(const struct datapath *dp)
1252 {
1253 struct vport *p;
1254 int mtu;
1255
1256 ASSERT_RTNL();
1257
1258 mtu = dp_min_mtu(dp);
1259
1260 list_for_each_entry_rcu (p, &dp->port_list, node) {
1261 if (is_internal_vport(p))
1262 vport_set_mtu(p, mtu);
1263 }
1264 }
1265
1266 static int put_port(const struct vport *p, struct odp_port __user *uop)
1267 {
1268 struct odp_port op;
1269
1270 memset(&op, 0, sizeof op);
1271
1272 rcu_read_lock();
1273 strncpy(op.devname, vport_get_name(p), sizeof op.devname);
1274 strncpy(op.type, vport_get_type(p), sizeof op.type);
1275 vport_get_config(p, op.config);
1276 rcu_read_unlock();
1277
1278 op.port = p->port_no;
1279
1280 return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0;
1281 }
1282
1283 static int query_port(struct datapath *dp, struct odp_port __user *uport)
1284 {
1285 struct odp_port port;
1286 struct vport *vport;
1287
1288 if (copy_from_user(&port, uport, sizeof port))
1289 return -EFAULT;
1290
1291 if (port.devname[0]) {
1292 port.devname[IFNAMSIZ - 1] = '\0';
1293
1294 vport_lock();
1295 vport = vport_locate(port.devname);
1296 vport_unlock();
1297
1298 if (!vport)
1299 return -ENODEV;
1300 if (vport->dp != dp)
1301 return -ENOENT;
1302 } else {
1303 if (port.port >= DP_MAX_PORTS)
1304 return -EINVAL;
1305
1306 vport = get_vport_protected(dp, port.port);
1307 if (!vport)
1308 return -ENOENT;
1309 }
1310
1311 return put_port(vport, uport);
1312 }
1313
1314 static int do_list_ports(struct datapath *dp, struct odp_port __user *uports,
1315 int n_ports)
1316 {
1317 int idx = 0;
1318 if (n_ports) {
1319 struct vport *p;
1320
1321 list_for_each_entry_rcu (p, &dp->port_list, node) {
1322 if (put_port(p, &uports[idx]))
1323 return -EFAULT;
1324 if (idx++ >= n_ports)
1325 break;
1326 }
1327 }
1328 return idx;
1329 }
1330
1331 static int list_ports(struct datapath *dp, struct odp_portvec __user *upv)
1332 {
1333 struct odp_portvec pv;
1334 int retval;
1335
1336 if (copy_from_user(&pv, upv, sizeof pv))
1337 return -EFAULT;
1338
1339 retval = do_list_ports(dp, (struct odp_port __user __force *)pv.ports,
1340 pv.n_ports);
1341 if (retval < 0)
1342 return retval;
1343
1344 return put_user(retval, &upv->n_ports);
1345 }
1346
1347 static int get_listen_mask(const struct file *f)
1348 {
1349 return (long)f->private_data;
1350 }
1351
1352 static void set_listen_mask(struct file *f, int listen_mask)
1353 {
1354 f->private_data = (void*)(long)listen_mask;
1355 }
1356
1357 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1358 unsigned long argp)
1359 {
1360 int dp_idx = iminor(f->f_dentry->d_inode);
1361 struct datapath *dp;
1362 int drop_frags, listeners, port_no;
1363 unsigned int sflow_probability;
1364 int err;
1365
1366 /* Handle commands with special locking requirements up front. */
1367 switch (cmd) {
1368 case ODP_DP_CREATE:
1369 err = create_dp(dp_idx, (char __user *)argp);
1370 goto exit;
1371
1372 case ODP_DP_DESTROY:
1373 err = destroy_dp(dp_idx);
1374 goto exit;
1375
1376 case ODP_VPORT_ATTACH:
1377 err = attach_port(dp_idx, (struct odp_port __user *)argp);
1378 goto exit;
1379
1380 case ODP_VPORT_DETACH:
1381 err = get_user(port_no, (int __user *)argp);
1382 if (!err)
1383 err = detach_port(dp_idx, port_no);
1384 goto exit;
1385
1386 case ODP_VPORT_MOD:
1387 err = vport_user_mod((struct odp_port __user *)argp);
1388 goto exit;
1389
1390 case ODP_VPORT_STATS_GET:
1391 err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
1392 goto exit;
1393
1394 case ODP_VPORT_STATS_SET:
1395 err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
1396 goto exit;
1397
1398 case ODP_VPORT_ETHER_GET:
1399 err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
1400 goto exit;
1401
1402 case ODP_VPORT_ETHER_SET:
1403 err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
1404 goto exit;
1405
1406 case ODP_VPORT_MTU_GET:
1407 err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
1408 goto exit;
1409
1410 case ODP_VPORT_MTU_SET:
1411 err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
1412 goto exit;
1413 }
1414
1415 dp = get_dp_locked(dp_idx);
1416 err = -ENODEV;
1417 if (!dp)
1418 goto exit;
1419
1420 switch (cmd) {
1421 case ODP_DP_STATS:
1422 err = get_dp_stats(dp, (struct odp_stats __user *)argp);
1423 break;
1424
1425 case ODP_GET_DROP_FRAGS:
1426 err = put_user(dp->drop_frags, (int __user *)argp);
1427 break;
1428
1429 case ODP_SET_DROP_FRAGS:
1430 err = get_user(drop_frags, (int __user *)argp);
1431 if (err)
1432 break;
1433 err = -EINVAL;
1434 if (drop_frags != 0 && drop_frags != 1)
1435 break;
1436 dp->drop_frags = drop_frags;
1437 err = 0;
1438 break;
1439
1440 case ODP_GET_LISTEN_MASK:
1441 err = put_user(get_listen_mask(f), (int __user *)argp);
1442 break;
1443
1444 case ODP_SET_LISTEN_MASK:
1445 err = get_user(listeners, (int __user *)argp);
1446 if (err)
1447 break;
1448 err = -EINVAL;
1449 if (listeners & ~ODPL_ALL)
1450 break;
1451 err = 0;
1452 set_listen_mask(f, listeners);
1453 break;
1454
1455 case ODP_GET_SFLOW_PROBABILITY:
1456 err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
1457 break;
1458
1459 case ODP_SET_SFLOW_PROBABILITY:
1460 err = get_user(sflow_probability, (unsigned int __user *)argp);
1461 if (!err)
1462 dp->sflow_probability = sflow_probability;
1463 break;
1464
1465 case ODP_VPORT_QUERY:
1466 err = query_port(dp, (struct odp_port __user *)argp);
1467 break;
1468
1469 case ODP_VPORT_LIST:
1470 err = list_ports(dp, (struct odp_portvec __user *)argp);
1471 break;
1472
1473 case ODP_FLOW_FLUSH:
1474 err = flush_flows(dp);
1475 break;
1476
1477 case ODP_FLOW_PUT:
1478 err = put_flow(dp, (struct odp_flow_put __user *)argp);
1479 break;
1480
1481 case ODP_FLOW_DEL:
1482 err = del_flow(dp, (struct odp_flow __user *)argp);
1483 break;
1484
1485 case ODP_FLOW_GET:
1486 err = do_flowvec_ioctl(dp, argp, do_query_flows);
1487 break;
1488
1489 case ODP_FLOW_LIST:
1490 err = do_flowvec_ioctl(dp, argp, do_list_flows);
1491 break;
1492
1493 case ODP_EXECUTE:
1494 err = execute_packet(dp, (struct odp_execute __user *)argp);
1495 break;
1496
1497 default:
1498 err = -ENOIOCTLCMD;
1499 break;
1500 }
1501 mutex_unlock(&dp->mutex);
1502 exit:
1503 return err;
1504 }
1505
1506 static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
1507 {
1508 int i;
1509 for (i = 0; i < DP_N_QUEUES; i++) {
1510 if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
1511 return 1;
1512 }
1513 return 0;
1514 }
1515
1516 #ifdef CONFIG_COMPAT
1517 static int compat_list_ports(struct datapath *dp, struct compat_odp_portvec __user *upv)
1518 {
1519 struct compat_odp_portvec pv;
1520 int retval;
1521
1522 if (copy_from_user(&pv, upv, sizeof pv))
1523 return -EFAULT;
1524
1525 retval = do_list_ports(dp, compat_ptr(pv.ports), pv.n_ports);
1526 if (retval < 0)
1527 return retval;
1528
1529 return put_user(retval, &upv->n_ports);
1530 }
1531
1532 static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
1533 {
1534 compat_uptr_t actions;
1535
1536 if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
1537 __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
1538 __copy_from_user(&flow->key, &compat->key, sizeof(struct odp_flow_key)) ||
1539 __get_user(actions, &compat->actions) ||
1540 __get_user(flow->actions_len, &compat->actions_len) ||
1541 __get_user(flow->flags, &compat->flags))
1542 return -EFAULT;
1543
1544 flow->actions = (struct nlattr __force *)compat_ptr(actions);
1545 return 0;
1546 }
1547
1548 static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
1549 {
1550 struct odp_flow_stats stats;
1551 struct odp_flow_put fp;
1552 int error;
1553
1554 if (compat_get_flow(&fp.flow, &ufp->flow) ||
1555 get_user(fp.flags, &ufp->flags))
1556 return -EFAULT;
1557
1558 error = do_put_flow(dp, &fp, &stats);
1559 if (error)
1560 return error;
1561
1562 if (copy_to_user(&ufp->flow.stats, &stats,
1563 sizeof(struct odp_flow_stats)))
1564 return -EFAULT;
1565
1566 return 0;
1567 }
1568
1569 static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
1570 u32 query_flags,
1571 struct compat_odp_flow __user *ufp)
1572 {
1573 compat_uptr_t actions;
1574
1575 if (get_user(actions, &ufp->actions))
1576 return -EFAULT;
1577
1578 return do_answer_query(dp, flow, query_flags, &ufp->stats,
1579 compat_ptr(actions), &ufp->actions_len);
1580 }
1581
1582 static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
1583 {
1584 struct sw_flow *flow;
1585 struct odp_flow uf;
1586 int error;
1587
1588 if (compat_get_flow(&uf, ufp))
1589 return -EFAULT;
1590
1591 flow = do_del_flow(dp, &uf.key);
1592 if (IS_ERR(flow))
1593 return PTR_ERR(flow);
1594
1595 error = compat_answer_query(dp, flow, 0, ufp);
1596 flow_deferred_free(flow);
1597 return error;
1598 }
1599
1600 static int compat_query_flows(struct datapath *dp,
1601 struct compat_odp_flow __user *flows,
1602 u32 n_flows)
1603 {
1604 struct tbl *table = get_table_protected(dp);
1605 u32 i;
1606
1607 for (i = 0; i < n_flows; i++) {
1608 struct compat_odp_flow __user *ufp = &flows[i];
1609 struct odp_flow uf;
1610 struct tbl_node *flow_node;
1611 int error;
1612
1613 if (compat_get_flow(&uf, ufp))
1614 return -EFAULT;
1615
1616 flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
1617 if (!flow_node)
1618 error = put_user(ENOENT, &ufp->stats.error);
1619 else
1620 error = compat_answer_query(dp, flow_cast(flow_node),
1621 uf.flags, ufp);
1622 if (error)
1623 return -EFAULT;
1624 }
1625 return n_flows;
1626 }
1627
1628 struct compat_list_flows_cbdata {
1629 struct datapath *dp;
1630 struct compat_odp_flow __user *uflows;
1631 u32 n_flows;
1632 u32 listed_flows;
1633 };
1634
1635 static int compat_list_flow(struct tbl_node *node, void *cbdata_)
1636 {
1637 struct sw_flow *flow = flow_cast(node);
1638 struct compat_list_flows_cbdata *cbdata = cbdata_;
1639 struct compat_odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
1640 int error;
1641
1642 if (copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
1643 return -EFAULT;
1644 error = compat_answer_query(cbdata->dp, flow, 0, ufp);
1645 if (error)
1646 return error;
1647
1648 if (cbdata->listed_flows >= cbdata->n_flows)
1649 return cbdata->listed_flows;
1650 return 0;
1651 }
1652
1653 static int compat_list_flows(struct datapath *dp,
1654 struct compat_odp_flow __user *flows, u32 n_flows)
1655 {
1656 struct compat_list_flows_cbdata cbdata;
1657 int error;
1658
1659 if (!n_flows)
1660 return 0;
1661
1662 cbdata.dp = dp;
1663 cbdata.uflows = flows;
1664 cbdata.n_flows = n_flows;
1665 cbdata.listed_flows = 0;
1666
1667 error = tbl_foreach(get_table_protected(dp), compat_list_flow, &cbdata);
1668 return error ? error : cbdata.listed_flows;
1669 }
1670
1671 static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
1672 int (*function)(struct datapath *,
1673 struct compat_odp_flow __user *,
1674 u32 n_flows))
1675 {
1676 struct compat_odp_flowvec __user *uflowvec;
1677 struct compat_odp_flow __user *flows;
1678 struct compat_odp_flowvec flowvec;
1679 int retval;
1680
1681 uflowvec = compat_ptr(argp);
1682 if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) ||
1683 copy_from_user(&flowvec, uflowvec, sizeof flowvec))
1684 return -EFAULT;
1685
1686 if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
1687 return -EINVAL;
1688
1689 flows = compat_ptr(flowvec.flows);
1690 if (!access_ok(VERIFY_WRITE, flows,
1691 flowvec.n_flows * sizeof(struct compat_odp_flow)))
1692 return -EFAULT;
1693
1694 retval = function(dp, flows, flowvec.n_flows);
1695 return (retval < 0 ? retval
1696 : retval == flowvec.n_flows ? 0
1697 : put_user(retval, &uflowvec->n_flows));
1698 }
1699
1700 static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
1701 {
1702 struct odp_execute execute;
1703 compat_uptr_t actions;
1704 compat_uptr_t data;
1705
1706 if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
1707 __get_user(actions, &uexecute->actions) ||
1708 __get_user(execute.actions_len, &uexecute->actions_len) ||
1709 __get_user(data, &uexecute->data) ||
1710 __get_user(execute.length, &uexecute->length))
1711 return -EFAULT;
1712
1713 execute.actions = (struct nlattr __force *)compat_ptr(actions);
1714 execute.data = (const void __force *)compat_ptr(data);
1715
1716 return do_execute(dp, &execute);
1717 }
1718
1719 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
1720 {
1721 int dp_idx = iminor(f->f_dentry->d_inode);
1722 struct datapath *dp;
1723 int err;
1724
1725 switch (cmd) {
1726 case ODP_DP_DESTROY:
1727 case ODP_FLOW_FLUSH:
1728 /* Ioctls that don't need any translation at all. */
1729 return openvswitch_ioctl(f, cmd, argp);
1730
1731 case ODP_DP_CREATE:
1732 case ODP_VPORT_ATTACH:
1733 case ODP_VPORT_DETACH:
1734 case ODP_VPORT_MOD:
1735 case ODP_VPORT_MTU_SET:
1736 case ODP_VPORT_MTU_GET:
1737 case ODP_VPORT_ETHER_SET:
1738 case ODP_VPORT_ETHER_GET:
1739 case ODP_VPORT_STATS_SET:
1740 case ODP_VPORT_STATS_GET:
1741 case ODP_DP_STATS:
1742 case ODP_GET_DROP_FRAGS:
1743 case ODP_SET_DROP_FRAGS:
1744 case ODP_SET_LISTEN_MASK:
1745 case ODP_GET_LISTEN_MASK:
1746 case ODP_SET_SFLOW_PROBABILITY:
1747 case ODP_GET_SFLOW_PROBABILITY:
1748 case ODP_VPORT_QUERY:
1749 /* Ioctls that just need their pointer argument extended. */
1750 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
1751 }
1752
1753 dp = get_dp_locked(dp_idx);
1754 err = -ENODEV;
1755 if (!dp)
1756 goto exit;
1757
1758 switch (cmd) {
1759 case ODP_VPORT_LIST32:
1760 err = compat_list_ports(dp, compat_ptr(argp));
1761 break;
1762
1763 case ODP_FLOW_PUT32:
1764 err = compat_put_flow(dp, compat_ptr(argp));
1765 break;
1766
1767 case ODP_FLOW_DEL32:
1768 err = compat_del_flow(dp, compat_ptr(argp));
1769 break;
1770
1771 case ODP_FLOW_GET32:
1772 err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
1773 break;
1774
1775 case ODP_FLOW_LIST32:
1776 err = compat_flowvec_ioctl(dp, argp, compat_list_flows);
1777 break;
1778
1779 case ODP_EXECUTE32:
1780 err = compat_execute(dp, compat_ptr(argp));
1781 break;
1782
1783 default:
1784 err = -ENOIOCTLCMD;
1785 break;
1786 }
1787 mutex_unlock(&dp->mutex);
1788 exit:
1789 return err;
1790 }
1791 #endif
1792
1793 /* Unfortunately this function is not exported so this is a verbatim copy
1794 * from net/core/datagram.c in 2.6.30. */
1795 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
1796 u8 __user *to, int len,
1797 __wsum *csump)
1798 {
1799 int start = skb_headlen(skb);
1800 int pos = 0;
1801 int i, copy = start - offset;
1802
1803 /* Copy header. */
1804 if (copy > 0) {
1805 int err = 0;
1806 if (copy > len)
1807 copy = len;
1808 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
1809 *csump, &err);
1810 if (err)
1811 goto fault;
1812 if ((len -= copy) == 0)
1813 return 0;
1814 offset += copy;
1815 to += copy;
1816 pos = copy;
1817 }
1818
1819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1820 int end;
1821
1822 WARN_ON(start > offset + len);
1823
1824 end = start + skb_shinfo(skb)->frags[i].size;
1825 if ((copy = end - offset) > 0) {
1826 __wsum csum2;
1827 int err = 0;
1828 u8 *vaddr;
1829 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1830 struct page *page = frag->page;
1831
1832 if (copy > len)
1833 copy = len;
1834 vaddr = kmap(page);
1835 csum2 = csum_and_copy_to_user(vaddr +
1836 frag->page_offset +
1837 offset - start,
1838 to, copy, 0, &err);
1839 kunmap(page);
1840 if (err)
1841 goto fault;
1842 *csump = csum_block_add(*csump, csum2, pos);
1843 if (!(len -= copy))
1844 return 0;
1845 offset += copy;
1846 to += copy;
1847 pos += copy;
1848 }
1849 start = end;
1850 }
1851
1852 if (skb_shinfo(skb)->frag_list) {
1853 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1854
1855 for (; list; list=list->next) {
1856 int end;
1857
1858 WARN_ON(start > offset + len);
1859
1860 end = start + list->len;
1861 if ((copy = end - offset) > 0) {
1862 __wsum csum2 = 0;
1863 if (copy > len)
1864 copy = len;
1865 if (skb_copy_and_csum_datagram(list,
1866 offset - start,
1867 to, copy,
1868 &csum2))
1869 goto fault;
1870 *csump = csum_block_add(*csump, csum2, pos);
1871 if ((len -= copy) == 0)
1872 return 0;
1873 offset += copy;
1874 to += copy;
1875 pos += copy;
1876 }
1877 start = end;
1878 }
1879 }
1880 if (!len)
1881 return 0;
1882
1883 fault:
1884 return -EFAULT;
1885 }
1886
1887 static ssize_t openvswitch_read(struct file *f, char __user *buf,
1888 size_t nbytes, loff_t *ppos)
1889 {
1890 int listeners = get_listen_mask(f);
1891 int dp_idx = iminor(f->f_dentry->d_inode);
1892 struct datapath *dp = get_dp_locked(dp_idx);
1893 struct sk_buff *skb;
1894 size_t copy_bytes, tot_copy_bytes;
1895 int retval;
1896
1897 if (!dp)
1898 return -ENODEV;
1899
1900 if (nbytes == 0 || !listeners)
1901 return 0;
1902
1903 for (;;) {
1904 int i;
1905
1906 for (i = 0; i < DP_N_QUEUES; i++) {
1907 if (listeners & (1 << i)) {
1908 skb = skb_dequeue(&dp->queues[i]);
1909 if (skb)
1910 goto success;
1911 }
1912 }
1913
1914 if (f->f_flags & O_NONBLOCK) {
1915 retval = -EAGAIN;
1916 goto error;
1917 }
1918
1919 wait_event_interruptible(dp->waitqueue,
1920 dp_has_packet_of_interest(dp,
1921 listeners));
1922
1923 if (signal_pending(current)) {
1924 retval = -ERESTARTSYS;
1925 goto error;
1926 }
1927 }
1928 success:
1929 mutex_unlock(&dp->mutex);
1930
1931 copy_bytes = tot_copy_bytes = min_t(size_t, skb->len, nbytes);
1932
1933 retval = 0;
1934 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1935 if (copy_bytes == skb->len) {
1936 __wsum csum = 0;
1937 u16 csum_start, csum_offset;
1938
1939 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
1940 csum_start -= skb_headroom(skb);
1941
1942 BUG_ON(csum_start >= skb_headlen(skb));
1943 retval = skb_copy_and_csum_datagram(skb, csum_start, buf + csum_start,
1944 copy_bytes - csum_start, &csum);
1945 if (!retval) {
1946 __sum16 __user *csump;
1947
1948 copy_bytes = csum_start;
1949 csump = (__sum16 __user *)(buf + csum_start + csum_offset);
1950
1951 BUG_ON((char __user *)csump + sizeof(__sum16) >
1952 buf + nbytes);
1953 put_user(csum_fold(csum), csump);
1954 }
1955 } else
1956 retval = skb_checksum_help(skb);
1957 }
1958
1959 if (!retval) {
1960 struct iovec iov;
1961
1962 iov.iov_base = buf;
1963 iov.iov_len = copy_bytes;
1964 retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
1965 }
1966
1967 if (!retval)
1968 retval = tot_copy_bytes;
1969
1970 kfree_skb(skb);
1971 return retval;
1972
1973 error:
1974 mutex_unlock(&dp->mutex);
1975 return retval;
1976 }
1977
1978 static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
1979 {
1980 int dp_idx = iminor(file->f_dentry->d_inode);
1981 struct datapath *dp = get_dp_locked(dp_idx);
1982 unsigned int mask;
1983
1984 if (dp) {
1985 mask = 0;
1986 poll_wait(file, &dp->waitqueue, wait);
1987 if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
1988 mask |= POLLIN | POLLRDNORM;
1989 mutex_unlock(&dp->mutex);
1990 } else {
1991 mask = POLLIN | POLLRDNORM | POLLHUP;
1992 }
1993 return mask;
1994 }
1995
1996 static struct file_operations openvswitch_fops = {
1997 .owner = THIS_MODULE,
1998 .read = openvswitch_read,
1999 .poll = openvswitch_poll,
2000 .unlocked_ioctl = openvswitch_ioctl,
2001 #ifdef CONFIG_COMPAT
2002 .compat_ioctl = openvswitch_compat_ioctl,
2003 #endif
2004 };
2005
2006 static int major;
2007
2008 static int __init dp_init(void)
2009 {
2010 struct sk_buff *dummy_skb;
2011 int err;
2012
2013 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2014
2015 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2016
2017 err = flow_init();
2018 if (err)
2019 goto error;
2020
2021 err = vport_init();
2022 if (err)
2023 goto error_flow_exit;
2024
2025 err = register_netdevice_notifier(&dp_device_notifier);
2026 if (err)
2027 goto error_vport_exit;
2028
2029 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2030 if (err < 0)
2031 goto error_unreg_notifier;
2032
2033 return 0;
2034
2035 error_unreg_notifier:
2036 unregister_netdevice_notifier(&dp_device_notifier);
2037 error_vport_exit:
2038 vport_exit();
2039 error_flow_exit:
2040 flow_exit();
2041 error:
2042 return err;
2043 }
2044
2045 static void dp_cleanup(void)
2046 {
2047 rcu_barrier();
2048 unregister_chrdev(major, "openvswitch");
2049 unregister_netdevice_notifier(&dp_device_notifier);
2050 vport_exit();
2051 flow_exit();
2052 }
2053
2054 module_init(dp_init);
2055 module_exit(dp_cleanup);
2056
2057 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2058 MODULE_LICENSE("GPL");