2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_bridge.h>
16 #include <linux/if_vlan.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/etherdevice.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/llc.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/random.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/workqueue.h>
43 #include <linux/dmi.h>
46 #include "openvswitch/datapath-protocol.h"
55 int (*dp_ioctl_hook
)(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
56 EXPORT_SYMBOL(dp_ioctl_hook
);
58 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
61 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
64 * It is safe to access the datapath and net_bridge_port structures with just
67 static struct datapath
*dps
[ODP_MAX
];
68 static DEFINE_MUTEX(dp_mutex
);
70 /* Number of milliseconds between runs of the maintenance thread. */
71 #define MAINT_SLEEP_MSECS 1000
73 static int new_nbp(struct datapath
*, struct net_device
*, int port_no
);
75 /* Must be called with rcu_read_lock or dp_mutex. */
76 struct datapath
*get_dp(int dp_idx
)
78 if (dp_idx
< 0 || dp_idx
>= ODP_MAX
)
80 return rcu_dereference(dps
[dp_idx
]);
82 EXPORT_SYMBOL_GPL(get_dp
);
84 static struct datapath
*get_dp_locked(int dp_idx
)
88 mutex_lock(&dp_mutex
);
91 mutex_lock(&dp
->mutex
);
92 mutex_unlock(&dp_mutex
);
96 static inline size_t br_nlmsg_size(void)
98 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
99 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
100 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
101 + nla_total_size(4) /* IFLA_MASTER */
102 + nla_total_size(4) /* IFLA_MTU */
103 + nla_total_size(4) /* IFLA_LINK */
104 + nla_total_size(1); /* IFLA_OPERSTATE */
107 static int dp_fill_ifinfo(struct sk_buff
*skb
,
108 const struct net_bridge_port
*port
,
109 int event
, unsigned int flags
)
111 const struct datapath
*dp
= port
->dp
;
112 const struct net_device
*dev
= port
->dev
;
113 struct ifinfomsg
*hdr
;
114 struct nlmsghdr
*nlh
;
116 nlh
= nlmsg_put(skb
, 0, 0, event
, sizeof(*hdr
), flags
);
120 hdr
= nlmsg_data(nlh
);
121 hdr
->ifi_family
= AF_BRIDGE
;
123 hdr
->ifi_type
= dev
->type
;
124 hdr
->ifi_index
= dev
->ifindex
;
125 hdr
->ifi_flags
= dev_get_flags(dev
);
128 NLA_PUT_STRING(skb
, IFLA_IFNAME
, dev
->name
);
129 NLA_PUT_U32(skb
, IFLA_MASTER
, dp
->ports
[ODPP_LOCAL
]->dev
->ifindex
);
130 NLA_PUT_U32(skb
, IFLA_MTU
, dev
->mtu
);
131 #ifdef IFLA_OPERSTATE
132 NLA_PUT_U8(skb
, IFLA_OPERSTATE
,
133 netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
);
137 NLA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
139 if (dev
->ifindex
!= dev
->iflink
)
140 NLA_PUT_U32(skb
, IFLA_LINK
, dev
->iflink
);
142 return nlmsg_end(skb
, nlh
);
145 nlmsg_cancel(skb
, nlh
);
149 static void dp_ifinfo_notify(int event
, struct net_bridge_port
*port
)
151 struct net
*net
= dev_net(port
->dev
);
155 skb
= nlmsg_new(br_nlmsg_size(), GFP_KERNEL
);
159 err
= dp_fill_ifinfo(skb
, port
, event
, 0);
161 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
162 WARN_ON(err
== -EMSGSIZE
);
166 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, GFP_KERNEL
);
170 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
173 static void release_dp(struct kobject
*kobj
)
175 struct datapath
*dp
= container_of(kobj
, struct datapath
, ifobj
);
179 static struct kobj_type dp_ktype
= {
180 .release
= release_dp
183 static int create_dp(int dp_idx
, const char __user
*devnamep
)
185 struct net_device
*dp_dev
;
186 char devname
[IFNAMSIZ
];
193 if (strncpy_from_user(devname
, devnamep
, IFNAMSIZ
- 1) < 0)
195 devname
[IFNAMSIZ
- 1] = '\0';
197 snprintf(devname
, sizeof devname
, "of%d", dp_idx
);
201 mutex_lock(&dp_mutex
);
203 if (!try_module_get(THIS_MODULE
))
206 /* Exit early if a datapath with that number already exists.
207 * (We don't use -EEXIST because that's ambiguous with 'devname'
208 * conflicting with an existing network device name.) */
214 dp
= kzalloc(sizeof *dp
, GFP_KERNEL
);
217 INIT_LIST_HEAD(&dp
->port_list
);
218 mutex_init(&dp
->mutex
);
220 for (i
= 0; i
< DP_N_QUEUES
; i
++)
221 skb_queue_head_init(&dp
->queues
[i
]);
222 init_waitqueue_head(&dp
->waitqueue
);
224 /* Initialize kobject for bridge. This will be added as
225 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
226 dp
->ifobj
.kset
= NULL
;
227 kobject_init(&dp
->ifobj
, &dp_ktype
);
229 /* Allocate table. */
231 rcu_assign_pointer(dp
->table
, dp_table_create(DP_L1_SIZE
));
235 /* Set up our datapath device. */
236 dp_dev
= dp_dev_create(dp
, devname
, ODPP_LOCAL
);
237 err
= PTR_ERR(dp_dev
);
239 goto err_destroy_table
;
241 err
= new_nbp(dp
, dp_dev
, ODPP_LOCAL
);
243 dp_dev_destroy(dp_dev
);
244 goto err_destroy_table
;
248 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
249 if (!dp
->stats_percpu
)
250 goto err_destroy_local_port
;
252 rcu_assign_pointer(dps
[dp_idx
], dp
);
253 mutex_unlock(&dp_mutex
);
260 err_destroy_local_port
:
261 dp_del_port(dp
->ports
[ODPP_LOCAL
]);
263 dp_table_destroy(dp
->table
, 0);
267 module_put(THIS_MODULE
);
269 mutex_unlock(&dp_mutex
);
275 static void do_destroy_dp(struct datapath
*dp
)
277 struct net_bridge_port
*p
, *n
;
280 list_for_each_entry_safe (p
, n
, &dp
->port_list
, node
)
281 if (p
->port_no
!= ODPP_LOCAL
)
286 rcu_assign_pointer(dps
[dp
->dp_idx
], NULL
);
288 dp_del_port(dp
->ports
[ODPP_LOCAL
]);
290 dp_table_destroy(dp
->table
, 1);
292 for (i
= 0; i
< DP_N_QUEUES
; i
++)
293 skb_queue_purge(&dp
->queues
[i
]);
294 for (i
= 0; i
< DP_MAX_GROUPS
; i
++)
295 kfree(dp
->groups
[i
]);
296 free_percpu(dp
->stats_percpu
);
297 kobject_put(&dp
->ifobj
);
298 module_put(THIS_MODULE
);
301 static int destroy_dp(int dp_idx
)
307 mutex_lock(&dp_mutex
);
317 mutex_unlock(&dp_mutex
);
322 static void release_nbp(struct kobject
*kobj
)
324 struct net_bridge_port
*p
= container_of(kobj
, struct net_bridge_port
, kobj
);
328 static struct kobj_type brport_ktype
= {
330 .sysfs_ops
= &brport_sysfs_ops
,
332 .release
= release_nbp
335 /* Called with RTNL lock and dp_mutex. */
336 static int new_nbp(struct datapath
*dp
, struct net_device
*dev
, int port_no
)
338 struct net_bridge_port
*p
;
340 if (dev
->br_port
!= NULL
)
343 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
347 dev_set_promiscuity(dev
, 1);
349 p
->port_no
= port_no
;
352 atomic_set(&p
->sflow_pool
, 0);
354 rcu_assign_pointer(dev
->br_port
, p
);
356 /* It would make sense to assign dev->br_port here too, but
357 * that causes packets received on internal ports to get caught
358 * in dp_frame_hook(). In turn dp_frame_hook() can reject them
359 * back to network stack, but that's a waste of time. */
361 dev_disable_lro(dev
);
362 rcu_assign_pointer(dp
->ports
[port_no
], p
);
363 list_add_rcu(&p
->node
, &dp
->port_list
);
366 /* Initialize kobject for bridge. This will be added as
367 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
369 kobject_init(&p
->kobj
, &brport_ktype
);
371 dp_ifinfo_notify(RTM_NEWLINK
, p
);
376 static int add_port(int dp_idx
, struct odp_port __user
*portp
)
378 struct net_device
*dev
;
380 struct odp_port port
;
385 if (copy_from_user(&port
, portp
, sizeof port
))
387 port
.devname
[IFNAMSIZ
- 1] = '\0';
390 dp
= get_dp_locked(dp_idx
);
393 goto out_unlock_rtnl
;
395 for (port_no
= 1; port_no
< DP_MAX_PORTS
; port_no
++)
396 if (!dp
->ports
[port_no
])
402 if (!(port
.flags
& ODP_PORT_INTERNAL
)) {
404 dev
= dev_get_by_name(&init_net
, port
.devname
);
409 if (dev
->flags
& IFF_LOOPBACK
|| dev
->type
!= ARPHRD_ETHER
||
413 dev
= dp_dev_create(dp
, port
.devname
, port_no
);
420 err
= new_nbp(dp
, dev
, port_no
);
424 set_dp_devs_mtu(dp
, dev
);
425 dp_sysfs_add_if(dp
->ports
[port_no
]);
427 err
= __put_user(port_no
, &portp
->port
);
432 mutex_unlock(&dp
->mutex
);
439 int dp_del_port(struct net_bridge_port
*p
)
443 if (p
->port_no
!= ODPP_LOCAL
)
445 dp_ifinfo_notify(RTM_DELLINK
, p
);
449 if (is_dp_dev(p
->dev
)) {
450 /* Make sure that no packets arrive from now on, since
451 * dp_dev_xmit() will try to find itself through
452 * p->dp->ports[], and we're about to set that to null. */
453 netif_tx_disable(p
->dev
);
456 /* First drop references to device. */
457 dev_set_promiscuity(p
->dev
, -1);
458 list_del_rcu(&p
->node
);
459 rcu_assign_pointer(p
->dp
->ports
[p
->port_no
], NULL
);
460 rcu_assign_pointer(p
->dev
->br_port
, NULL
);
462 /* Then wait until no one is still using it, and destroy it. */
465 if (is_dp_dev(p
->dev
))
466 dp_dev_destroy(p
->dev
);
468 kobject_put(&p
->kobj
);
473 static int del_port(int dp_idx
, int port_no
)
475 struct net_bridge_port
*p
;
481 if (port_no
< 0 || port_no
>= DP_MAX_PORTS
|| port_no
== ODPP_LOCAL
)
485 dp
= get_dp_locked(dp_idx
);
488 goto out_unlock_rtnl
;
490 p
= dp
->ports
[port_no
];
495 err
= dp_del_port(p
);
498 mutex_unlock(&dp
->mutex
);
505 /* Must be called with rcu_read_lock. */
507 do_port_input(struct net_bridge_port
*p
, struct sk_buff
*skb
)
509 /* LRO isn't suitable for bridging. We turn it off but make sure
510 * that it wasn't reactivated. */
511 if (skb_warn_if_lro(skb
))
514 /* Make our own copy of the packet. Otherwise we will mangle the
515 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
516 * (No one comes after us, since we tell handle_bridge() that we took
518 skb
= skb_share_check(skb
, GFP_ATOMIC
);
522 /* Push the Ethernet header back on. */
523 skb_push(skb
, ETH_HLEN
);
524 skb_reset_mac_header(skb
);
525 dp_process_received_packet(skb
, p
);
528 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
529 void dp_process_received_packet(struct sk_buff
*skb
, struct net_bridge_port
*p
)
531 struct datapath
*dp
= p
->dp
;
532 struct dp_stats_percpu
*stats
;
533 struct odp_flow_key key
;
534 struct sw_flow
*flow
;
536 WARN_ON_ONCE(skb_shared(skb
));
538 compute_ip_summed(skb
, false);
540 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
541 stats
= percpu_ptr(dp
->stats_percpu
, smp_processor_id());
543 if (flow_extract(skb
, p
? p
->port_no
: ODPP_NONE
, &key
)) {
544 if (dp
->drop_frags
) {
551 flow
= dp_table_lookup(rcu_dereference(dp
->table
), &key
);
553 struct sw_flow_actions
*acts
= rcu_dereference(flow
->sf_acts
);
554 flow_used(flow
, skb
);
555 execute_actions(dp
, skb
, &key
, acts
->actions
, acts
->n_actions
,
560 dp_output_control(dp
, skb
, _ODPL_MISS_NR
, 0);
565 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
566 * different set of devices!)
568 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
569 /* Called with rcu_read_lock and bottom-halves disabled. */
570 static struct sk_buff
*dp_frame_hook(struct net_bridge_port
*p
,
573 do_port_input(p
, skb
);
576 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
577 /* Called with rcu_read_lock and bottom-halves disabled. */
578 static int dp_frame_hook(struct net_bridge_port
*p
, struct sk_buff
**pskb
)
580 do_port_input(p
, *pskb
);
587 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
588 /* This code is based on a skb_checksum_setup from net/dev/core.c from a
589 * combination of Lenny's 2.6.26 Xen kernel and Xen's
590 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
591 * directly because it isn't exported in all versions. */
592 static int skb_pull_up_to(struct sk_buff
*skb
, void *ptr
)
594 if (ptr
< (void *)skb
->tail
)
596 if (__pskb_pull_tail(skb
,
597 ptr
- (void *)skb
->data
- skb_headlen(skb
))) {
604 int vswitch_skb_checksum_setup(struct sk_buff
*skb
)
609 __u16 csum_start
, csum_offset
;
611 if (!skb
->proto_csum_blank
)
614 if (skb
->protocol
!= htons(ETH_P_IP
))
617 if (!skb_pull_up_to(skb
, skb_network_header(skb
) + sizeof(struct iphdr
)))
621 th
= skb_network_header(skb
) + 4 * iph
->ihl
;
623 csum_start
= th
- skb
->head
;
624 switch (iph
->protocol
) {
626 csum_offset
= offsetof(struct tcphdr
, check
);
629 csum_offset
= offsetof(struct udphdr
, check
);
633 printk(KERN_ERR
"Attempting to checksum a non-"
634 "TCP/UDP packet, dropping a protocol"
635 " %d packet", iph
->protocol
);
639 if (!skb_pull_up_to(skb
, th
+ csum_offset
+ 2))
642 skb
->ip_summed
= CHECKSUM_PARTIAL
;
643 skb
->proto_csum_blank
= 0;
645 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
646 skb
->csum_start
= csum_start
;
647 skb
->csum_offset
= csum_offset
;
649 skb_set_transport_header(skb
, csum_start
- skb_headroom(skb
));
650 skb
->csum
= csum_offset
;
658 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
660 /* Types of checksums that we can receive (these all refer to L4 checksums):
661 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
662 * (though not verified) checksum in packet but not in skb->csum. Packets
663 * from the bridge local port will also have this type.
664 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
665 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
666 * a valid skb->csum. Importantly, both contain a full checksum (not
667 * verified) in the packet itself. The only difference is that if the
668 * packet gets to L4 processing on this machine (not in DomU) we won't
669 * have to recompute the checksum to verify. Most hardware devices do not
670 * produce packets with this type, even if they support receive checksum
671 * offloading (they produce type #5).
672 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
673 * be computed if it is sent off box. Unfortunately on earlier kernels,
674 * this case is impossible to distinguish from #2, despite having opposite
675 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
676 * to distinguish the different states. The only real user of this type
677 * with bridging is Xen (on later kernels).
678 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
679 * generated locally by a Xen DomU and has a partial checksum. If it is
680 * handled on this machine (Dom0 or DomU), then the checksum will not be
681 * computed. If it goes off box, the checksum in the packet needs to be
682 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
683 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
684 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
685 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
686 * full checksum or using a protocol without a checksum. skb->csum is
687 * undefined. This is common from devices with receive checksum
688 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
689 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
691 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
692 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
693 * based on whether it is on the transmit or receive path. After the datapath
694 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
695 * checksum, we will panic. Since we can receive packets with checksums, we
696 * assume that all CHECKSUM_HW packets have checksums and map them to
697 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
698 * packet is processed by the local IP stack, in which case it will need to
699 * be reverified). If we receive a packet with CHECKSUM_HW that really means
700 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
701 * shouldn't be any devices that do this with bridging.
703 * The bridge has similar behavior and this function closely resembles
704 * skb_forward_csum(). It is slightly different because we are only concerned
705 * with bridging and not other types of forwarding and can get away with
706 * slightly more optimal behavior.*/
708 compute_ip_summed(struct sk_buff
*skb
, bool xmit
)
710 /* For our convenience these defines change repeatedly between kernel
711 * versions, so we can't just copy them over... */
712 switch (skb
->ip_summed
) {
714 OVS_CB(skb
)->ip_summed
= OVS_CSUM_NONE
;
716 case CHECKSUM_UNNECESSARY
:
717 OVS_CB(skb
)->ip_summed
= OVS_CSUM_UNNECESSARY
;
720 /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
721 * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
722 * uses some special fields to represent this (see below). Since we
723 * can only make one type work, pick the one that actually happens in
726 * The one exception to this is if we are on the transmit path
727 * (basically after skb_checksum_setup() has been run) the type has
728 * already been converted, so we should stay with that. */
731 OVS_CB(skb
)->ip_summed
= OVS_CSUM_COMPLETE
;
733 OVS_CB(skb
)->ip_summed
= OVS_CSUM_PARTIAL
;
737 case CHECKSUM_COMPLETE
:
738 OVS_CB(skb
)->ip_summed
= OVS_CSUM_COMPLETE
;
740 case CHECKSUM_PARTIAL
:
741 OVS_CB(skb
)->ip_summed
= OVS_CSUM_PARTIAL
;
745 printk(KERN_ERR
"openvswitch: unknown checksum type %d\n",
747 /* None seems the safest... */
748 OVS_CB(skb
)->ip_summed
= OVS_CSUM_NONE
;
751 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
752 /* Xen has a special way of representing CHECKSUM_PARTIAL on older
753 * kernels. It should not be set on the transmit path though. */
754 if (skb
->proto_csum_blank
)
755 OVS_CB(skb
)->ip_summed
= OVS_CSUM_PARTIAL
;
757 WARN_ON_ONCE(skb
->proto_csum_blank
&& xmit
);
762 forward_ip_summed(struct sk_buff
*skb
)
765 if (OVS_CB(skb
)->ip_summed
== OVS_CSUM_COMPLETE
)
766 skb
->ip_summed
= CHECKSUM_NONE
;
770 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
771 * unless we broke up a GSO packet. */
773 queue_control_packets(struct sk_buff
*skb
, struct sk_buff_head
*queue
,
774 int queue_no
, u32 arg
)
776 struct sk_buff
*nskb
;
780 port_no
= ODPP_LOCAL
;
782 if (skb
->dev
->br_port
)
783 port_no
= skb
->dev
->br_port
->port_no
;
784 else if (is_dp_dev(skb
->dev
))
785 port_no
= dp_dev_priv(skb
->dev
)->port_no
;
789 struct odp_msg
*header
;
794 /* If a checksum-deferred packet is forwarded to the
795 * controller, correct the pointers and checksum. This happens
796 * on a regular basis only on Xen, on which VMs can pass up
797 * packets that do not have their checksum computed.
799 err
= vswitch_skb_checksum_setup(skb
);
803 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
804 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
805 /* Until 2.6.22, the start of the transport header was
806 * also the start of data to be checksummed. Linux
807 * 2.6.22 introduced the csum_start field for this
808 * purpose, but we should point the transport header to
809 * it anyway for backward compatibility, as
810 * dev_queue_xmit() does even in 2.6.28. */
811 skb_set_transport_header(skb
, skb
->csum_start
-
814 err
= skb_checksum_help(skb
);
819 if (skb
->ip_summed
== CHECKSUM_HW
) {
820 err
= skb_checksum_help(skb
, 0);
826 err
= skb_cow(skb
, sizeof *header
);
830 header
= (struct odp_msg
*)__skb_push(skb
, sizeof *header
);
831 header
->type
= queue_no
;
832 header
->length
= skb
->len
;
833 header
->port
= port_no
;
834 header
->reserved
= 0;
836 skb_queue_tail(queue
, skb
);
844 while ((skb
= nskb
) != NULL
) {
852 dp_output_control(struct datapath
*dp
, struct sk_buff
*skb
, int queue_no
,
855 struct dp_stats_percpu
*stats
;
856 struct sk_buff_head
*queue
;
859 WARN_ON_ONCE(skb_shared(skb
));
860 BUG_ON(queue_no
!= _ODPL_MISS_NR
&& queue_no
!= _ODPL_ACTION_NR
&& queue_no
!= _ODPL_SFLOW_NR
);
861 queue
= &dp
->queues
[queue_no
];
863 if (skb_queue_len(queue
) >= DP_MAX_QUEUE_LEN
)
866 forward_ip_summed(skb
);
868 /* Break apart GSO packets into their component pieces. Otherwise
869 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
870 if (skb_is_gso(skb
)) {
871 struct sk_buff
*nskb
= skb_gso_segment(skb
, 0);
875 if (unlikely(IS_ERR(skb
))) {
880 /* XXX This case might not be possible. It's hard to
881 * tell from the skb_gso_segment() code and comment. */
885 err
= queue_control_packets(skb
, queue
, queue_no
, arg
);
886 wake_up_interruptible(&dp
->waitqueue
);
892 stats
= percpu_ptr(dp
->stats_percpu
, get_cpu());
899 static int flush_flows(struct datapath
*dp
)
902 return dp_table_flush(dp
);
905 static int validate_actions(const struct sw_flow_actions
*actions
)
909 for (i
= 0; i
< actions
->n_actions
; i
++) {
910 const union odp_action
*a
= &actions
->actions
[i
];
913 if (a
->output
.port
>= DP_MAX_PORTS
)
917 case ODPAT_OUTPUT_GROUP
:
918 if (a
->output_group
.group
>= DP_MAX_GROUPS
)
922 case ODPAT_SET_VLAN_VID
:
923 if (a
->vlan_vid
.vlan_vid
& htons(~VLAN_VID_MASK
))
927 case ODPAT_SET_VLAN_PCP
:
928 if (a
->vlan_pcp
.vlan_pcp
929 & ~(VLAN_PCP_MASK
>> VLAN_PCP_SHIFT
))
934 if (a
->type
>= ODPAT_N_ACTIONS
)
943 static struct sw_flow_actions
*get_actions(const struct odp_flow
*flow
)
945 struct sw_flow_actions
*actions
;
948 actions
= flow_actions_alloc(flow
->n_actions
);
949 error
= PTR_ERR(actions
);
954 if (copy_from_user(actions
->actions
, flow
->actions
,
955 flow
->n_actions
* sizeof(union odp_action
)))
956 goto error_free_actions
;
957 error
= validate_actions(actions
);
959 goto error_free_actions
;
966 return ERR_PTR(error
);
969 static void get_stats(struct sw_flow
*flow
, struct odp_flow_stats
*stats
)
971 if (flow
->used
.tv_sec
) {
972 stats
->used_sec
= flow
->used
.tv_sec
;
973 stats
->used_nsec
= flow
->used
.tv_nsec
;
976 stats
->used_nsec
= 0;
978 stats
->n_packets
= flow
->packet_count
;
979 stats
->n_bytes
= flow
->byte_count
;
980 stats
->ip_tos
= flow
->ip_tos
;
981 stats
->tcp_flags
= flow
->tcp_flags
;
985 static void clear_stats(struct sw_flow
*flow
)
987 flow
->used
.tv_sec
= flow
->used
.tv_nsec
= 0;
990 flow
->packet_count
= 0;
991 flow
->byte_count
= 0;
994 static int put_flow(struct datapath
*dp
, struct odp_flow_put __user
*ufp
)
996 struct odp_flow_put uf
;
997 struct sw_flow
*flow
;
998 struct dp_table
*table
;
999 struct odp_flow_stats stats
;
1003 if (copy_from_user(&uf
, ufp
, sizeof(struct odp_flow_put
)))
1005 memset(uf
.flow
.key
.reserved
, 0, sizeof uf
.flow
.key
.reserved
);
1007 table
= rcu_dereference(dp
->table
);
1008 flow
= dp_table_lookup(table
, &uf
.flow
.key
);
1011 struct sw_flow_actions
*acts
;
1014 if (!(uf
.flags
& ODPPF_CREATE
))
1017 /* Expand table, if necessary, to make room. */
1018 if (dp
->n_flows
>= table
->n_buckets
) {
1020 if (table
->n_buckets
>= DP_MAX_BUCKETS
)
1023 error
= dp_table_expand(dp
);
1026 table
= rcu_dereference(dp
->table
);
1029 /* Allocate flow. */
1031 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
1034 flow
->key
= uf
.flow
.key
;
1035 spin_lock_init(&flow
->lock
);
1038 /* Obtain actions. */
1039 acts
= get_actions(&uf
.flow
);
1040 error
= PTR_ERR(acts
);
1042 goto error_free_flow
;
1043 rcu_assign_pointer(flow
->sf_acts
, acts
);
1045 /* Put flow in bucket. */
1046 error
= dp_table_insert(table
, flow
);
1048 goto error_free_flow_acts
;
1050 memset(&stats
, 0, sizeof(struct odp_flow_stats
));
1052 /* We found a matching flow. */
1053 struct sw_flow_actions
*old_acts
, *new_acts
;
1054 unsigned long int flags
;
1056 /* Bail out if we're not allowed to modify an existing flow. */
1058 if (!(uf
.flags
& ODPPF_MODIFY
))
1062 new_acts
= get_actions(&uf
.flow
);
1063 error
= PTR_ERR(new_acts
);
1064 if (IS_ERR(new_acts
))
1066 old_acts
= rcu_dereference(flow
->sf_acts
);
1067 if (old_acts
->n_actions
!= new_acts
->n_actions
||
1068 memcmp(old_acts
->actions
, new_acts
->actions
,
1069 sizeof(union odp_action
) * old_acts
->n_actions
)) {
1070 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
1071 flow_deferred_free_acts(old_acts
);
1076 /* Fetch stats, then clear them if necessary. */
1077 spin_lock_irqsave(&flow
->lock
, flags
);
1078 get_stats(flow
, &stats
);
1079 if (uf
.flags
& ODPPF_ZERO_STATS
)
1081 spin_unlock_irqrestore(&flow
->lock
, flags
);
1084 /* Copy stats to userspace. */
1085 if (__copy_to_user(&ufp
->flow
.stats
, &stats
,
1086 sizeof(struct odp_flow_stats
)))
1090 error_free_flow_acts
:
1091 kfree(flow
->sf_acts
);
1093 kmem_cache_free(flow_cache
, flow
);
1098 static int put_actions(const struct sw_flow
*flow
, struct odp_flow __user
*ufp
)
1100 union odp_action __user
*actions
;
1101 struct sw_flow_actions
*sf_acts
;
1104 if (__get_user(actions
, &ufp
->actions
) ||
1105 __get_user(n_actions
, &ufp
->n_actions
))
1111 sf_acts
= rcu_dereference(flow
->sf_acts
);
1112 if (__put_user(sf_acts
->n_actions
, &ufp
->n_actions
) ||
1113 (actions
&& copy_to_user(actions
, sf_acts
->actions
,
1114 sizeof(union odp_action
) *
1115 min(sf_acts
->n_actions
, n_actions
))))
1121 static int answer_query(struct sw_flow
*flow
, u32 query_flags
,
1122 struct odp_flow __user
*ufp
)
1124 struct odp_flow_stats stats
;
1125 unsigned long int flags
;
1127 spin_lock_irqsave(&flow
->lock
, flags
);
1128 get_stats(flow
, &stats
);
1130 if (query_flags
& ODPFF_ZERO_TCP_FLAGS
) {
1131 flow
->tcp_flags
= 0;
1133 spin_unlock_irqrestore(&flow
->lock
, flags
);
1135 if (__copy_to_user(&ufp
->stats
, &stats
, sizeof(struct odp_flow_stats
)))
1137 return put_actions(flow
, ufp
);
1140 static int del_flow(struct datapath
*dp
, struct odp_flow __user
*ufp
)
1142 struct dp_table
*table
= rcu_dereference(dp
->table
);
1144 struct sw_flow
*flow
;
1148 if (copy_from_user(&uf
, ufp
, sizeof uf
))
1150 memset(uf
.key
.reserved
, 0, sizeof uf
.key
.reserved
);
1152 flow
= dp_table_lookup(table
, &uf
.key
);
1157 /* XXX redundant lookup */
1158 error
= dp_table_delete(table
, flow
);
1162 /* XXX These statistics might lose a few packets, since other CPUs can
1163 * be using this flow. We used to synchronize_rcu() to make sure that
1164 * we get completely accurate stats, but that blows our performance,
1167 error
= answer_query(flow
, 0, ufp
);
1168 flow_deferred_free(flow
);
1174 static int query_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1176 struct dp_table
*table
= rcu_dereference(dp
->table
);
1178 for (i
= 0; i
< flowvec
->n_flows
; i
++) {
1179 struct __user odp_flow
*ufp
= &flowvec
->flows
[i
];
1181 struct sw_flow
*flow
;
1184 if (__copy_from_user(&uf
, ufp
, sizeof uf
))
1186 memset(uf
.key
.reserved
, 0, sizeof uf
.key
.reserved
);
1188 flow
= dp_table_lookup(table
, &uf
.key
);
1190 error
= __put_user(ENOENT
, &ufp
->stats
.error
);
1192 error
= answer_query(flow
, uf
.flags
, ufp
);
1196 return flowvec
->n_flows
;
1199 struct list_flows_cbdata
{
1200 struct odp_flow __user
*uflows
;
1205 static int list_flow(struct sw_flow
*flow
, void *cbdata_
)
1207 struct list_flows_cbdata
*cbdata
= cbdata_
;
1208 struct odp_flow __user
*ufp
= &cbdata
->uflows
[cbdata
->listed_flows
++];
1211 if (__copy_to_user(&ufp
->key
, &flow
->key
, sizeof flow
->key
))
1213 error
= answer_query(flow
, 0, ufp
);
1217 if (cbdata
->listed_flows
>= cbdata
->n_flows
)
1218 return cbdata
->listed_flows
;
1222 static int list_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1224 struct list_flows_cbdata cbdata
;
1227 if (!flowvec
->n_flows
)
1230 cbdata
.uflows
= flowvec
->flows
;
1231 cbdata
.n_flows
= flowvec
->n_flows
;
1232 cbdata
.listed_flows
= 0;
1233 error
= dp_table_foreach(rcu_dereference(dp
->table
),
1234 list_flow
, &cbdata
);
1235 return error
? error
: cbdata
.listed_flows
;
1238 static int do_flowvec_ioctl(struct datapath
*dp
, unsigned long argp
,
1239 int (*function
)(struct datapath
*,
1240 const struct odp_flowvec
*))
1242 struct odp_flowvec __user
*uflowvec
;
1243 struct odp_flowvec flowvec
;
1246 uflowvec
= (struct odp_flowvec __user
*)argp
;
1247 if (!access_ok(VERIFY_WRITE
, uflowvec
, sizeof *uflowvec
) ||
1248 copy_from_user(&flowvec
, uflowvec
, sizeof flowvec
))
1251 if (flowvec
.n_flows
> INT_MAX
/ sizeof(struct odp_flow
))
1254 if (!access_ok(VERIFY_WRITE
, flowvec
.flows
,
1255 flowvec
.n_flows
* sizeof(struct odp_flow
)))
1258 retval
= function(dp
, &flowvec
);
1259 return (retval
< 0 ? retval
1260 : retval
== flowvec
.n_flows
? 0
1261 : __put_user(retval
, &uflowvec
->n_flows
));
1264 static int do_execute(struct datapath
*dp
, const struct odp_execute
*executep
)
1266 struct odp_execute execute
;
1267 struct odp_flow_key key
;
1268 struct sk_buff
*skb
;
1269 struct sw_flow_actions
*actions
;
1274 if (copy_from_user(&execute
, executep
, sizeof execute
))
1278 if (execute
.length
< ETH_HLEN
|| execute
.length
> 65535)
1282 actions
= flow_actions_alloc(execute
.n_actions
);
1287 if (copy_from_user(actions
->actions
, execute
.actions
,
1288 execute
.n_actions
* sizeof *execute
.actions
))
1289 goto error_free_actions
;
1291 err
= validate_actions(actions
);
1293 goto error_free_actions
;
1296 skb
= alloc_skb(execute
.length
, GFP_KERNEL
);
1298 goto error_free_actions
;
1299 if (execute
.in_port
< DP_MAX_PORTS
) {
1300 struct net_bridge_port
*p
= dp
->ports
[execute
.in_port
];
1306 if (copy_from_user(skb_put(skb
, execute
.length
), execute
.data
,
1308 goto error_free_skb
;
1310 skb_reset_mac_header(skb
);
1313 /* Normally, setting the skb 'protocol' field would be handled by a
1314 * call to eth_type_trans(), but it assumes there's a sending
1315 * device, which we may not have. */
1316 if (ntohs(eth
->h_proto
) >= 1536)
1317 skb
->protocol
= eth
->h_proto
;
1319 skb
->protocol
= htons(ETH_P_802_2
);
1321 flow_extract(skb
, execute
.in_port
, &key
);
1322 err
= execute_actions(dp
, skb
, &key
, actions
->actions
,
1323 actions
->n_actions
, GFP_KERNEL
);
1335 static int get_dp_stats(struct datapath
*dp
, struct odp_stats __user
*statsp
)
1337 struct odp_stats stats
;
1340 stats
.n_flows
= dp
->n_flows
;
1341 stats
.cur_capacity
= rcu_dereference(dp
->table
)->n_buckets
;
1342 stats
.max_capacity
= DP_MAX_BUCKETS
;
1343 stats
.n_ports
= dp
->n_ports
;
1344 stats
.max_ports
= DP_MAX_PORTS
;
1345 stats
.max_groups
= DP_MAX_GROUPS
;
1346 stats
.n_frags
= stats
.n_hit
= stats
.n_missed
= stats
.n_lost
= 0;
1347 for_each_possible_cpu(i
) {
1348 const struct dp_stats_percpu
*s
;
1349 s
= percpu_ptr(dp
->stats_percpu
, i
);
1350 stats
.n_frags
+= s
->n_frags
;
1351 stats
.n_hit
+= s
->n_hit
;
1352 stats
.n_missed
+= s
->n_missed
;
1353 stats
.n_lost
+= s
->n_lost
;
1355 stats
.max_miss_queue
= DP_MAX_QUEUE_LEN
;
1356 stats
.max_action_queue
= DP_MAX_QUEUE_LEN
;
1357 return copy_to_user(statsp
, &stats
, sizeof stats
) ? -EFAULT
: 0;
1360 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1361 int dp_min_mtu(const struct datapath
*dp
)
1363 struct net_bridge_port
*p
;
1368 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1369 struct net_device
*dev
= p
->dev
;
1371 /* Skip any internal ports, since that's what we're trying to
1376 if (!mtu
|| dev
->mtu
< mtu
)
1380 return mtu
? mtu
: ETH_DATA_LEN
;
1383 /* Sets the MTU of all datapath devices to the minimum of the ports. 'dev'
1384 * is the device whose MTU may have changed. Must be called with RTNL lock
1386 void set_dp_devs_mtu(const struct datapath
*dp
, struct net_device
*dev
)
1388 struct net_bridge_port
*p
;
1396 mtu
= dp_min_mtu(dp
);
1398 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1399 struct net_device
*br_dev
= p
->dev
;
1401 if (is_dp_dev(br_dev
))
1402 dev_set_mtu(br_dev
, mtu
);
1407 put_port(const struct net_bridge_port
*p
, struct odp_port __user
*uop
)
1410 memset(&op
, 0, sizeof op
);
1411 strncpy(op
.devname
, p
->dev
->name
, sizeof op
.devname
);
1412 op
.port
= p
->port_no
;
1413 op
.flags
= is_dp_dev(p
->dev
) ? ODP_PORT_INTERNAL
: 0;
1414 return copy_to_user(uop
, &op
, sizeof op
) ? -EFAULT
: 0;
1418 query_port(struct datapath
*dp
, struct odp_port __user
*uport
)
1420 struct odp_port port
;
1422 if (copy_from_user(&port
, uport
, sizeof port
))
1424 if (port
.devname
[0]) {
1425 struct net_bridge_port
*p
;
1426 struct net_device
*dev
;
1429 port
.devname
[IFNAMSIZ
- 1] = '\0';
1431 dev
= dev_get_by_name(&init_net
, port
.devname
);
1436 if (!p
&& is_dp_dev(dev
)) {
1437 struct dp_dev
*dp_dev
= dp_dev_priv(dev
);
1438 if (dp_dev
->dp
== dp
)
1439 p
= dp
->ports
[dp_dev
->port_no
];
1441 err
= p
&& p
->dp
== dp
? put_port(p
, uport
) : -ENOENT
;
1446 if (port
.port
>= DP_MAX_PORTS
)
1448 if (!dp
->ports
[port
.port
])
1450 return put_port(dp
->ports
[port
.port
], uport
);
1455 list_ports(struct datapath
*dp
, struct odp_portvec __user
*pvp
)
1457 struct odp_portvec pv
;
1458 struct net_bridge_port
*p
;
1461 if (copy_from_user(&pv
, pvp
, sizeof pv
))
1466 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1467 if (put_port(p
, &pv
.ports
[idx
]))
1469 if (idx
++ >= pv
.n_ports
)
1473 return put_user(dp
->n_ports
, &pvp
->n_ports
);
1476 /* RCU callback for freeing a dp_port_group */
1477 static void free_port_group(struct rcu_head
*rcu
)
1479 struct dp_port_group
*g
= container_of(rcu
, struct dp_port_group
, rcu
);
1484 set_port_group(struct datapath
*dp
, const struct odp_port_group __user
*upg
)
1486 struct odp_port_group pg
;
1487 struct dp_port_group
*new_group
, *old_group
;
1491 if (copy_from_user(&pg
, upg
, sizeof pg
))
1495 if (pg
.n_ports
> DP_MAX_PORTS
|| pg
.group
>= DP_MAX_GROUPS
)
1499 new_group
= kmalloc(sizeof *new_group
+ sizeof(u16
) * pg
.n_ports
,
1504 new_group
->n_ports
= pg
.n_ports
;
1506 if (copy_from_user(new_group
->ports
, pg
.ports
,
1507 sizeof(u16
) * pg
.n_ports
))
1510 old_group
= rcu_dereference(dp
->groups
[pg
.group
]);
1511 rcu_assign_pointer(dp
->groups
[pg
.group
], new_group
);
1513 call_rcu(&old_group
->rcu
, free_port_group
);
1523 get_port_group(struct datapath
*dp
, struct odp_port_group
*upg
)
1525 struct odp_port_group pg
;
1526 struct dp_port_group
*g
;
1529 if (copy_from_user(&pg
, upg
, sizeof pg
))
1532 if (pg
.group
>= DP_MAX_GROUPS
)
1535 g
= dp
->groups
[pg
.group
];
1536 n_copy
= g
? min_t(int, g
->n_ports
, pg
.n_ports
) : 0;
1537 if (n_copy
&& copy_to_user(pg
.ports
, g
->ports
, n_copy
* sizeof(u16
)))
1540 if (put_user(g
? g
->n_ports
: 0, &upg
->n_ports
))
1546 static int get_listen_mask(const struct file
*f
)
1548 return (long)f
->private_data
;
1551 static void set_listen_mask(struct file
*f
, int listen_mask
)
1553 f
->private_data
= (void*)(long)listen_mask
;
1556 static long openvswitch_ioctl(struct file
*f
, unsigned int cmd
,
1559 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1560 struct datapath
*dp
;
1561 int drop_frags
, listeners
, port_no
;
1562 unsigned int sflow_probability
;
1565 /* Handle commands with special locking requirements up front. */
1568 err
= create_dp(dp_idx
, (char __user
*)argp
);
1571 case ODP_DP_DESTROY
:
1572 err
= destroy_dp(dp_idx
);
1576 err
= add_port(dp_idx
, (struct odp_port __user
*)argp
);
1580 err
= get_user(port_no
, (int __user
*)argp
);
1582 err
= del_port(dp_idx
, port_no
);
1586 dp
= get_dp_locked(dp_idx
);
1593 err
= get_dp_stats(dp
, (struct odp_stats __user
*)argp
);
1596 case ODP_GET_DROP_FRAGS
:
1597 err
= put_user(dp
->drop_frags
, (int __user
*)argp
);
1600 case ODP_SET_DROP_FRAGS
:
1601 err
= get_user(drop_frags
, (int __user
*)argp
);
1605 if (drop_frags
!= 0 && drop_frags
!= 1)
1607 dp
->drop_frags
= drop_frags
;
1611 case ODP_GET_LISTEN_MASK
:
1612 err
= put_user(get_listen_mask(f
), (int __user
*)argp
);
1615 case ODP_SET_LISTEN_MASK
:
1616 err
= get_user(listeners
, (int __user
*)argp
);
1620 if (listeners
& ~ODPL_ALL
)
1623 set_listen_mask(f
, listeners
);
1626 case ODP_GET_SFLOW_PROBABILITY
:
1627 err
= put_user(dp
->sflow_probability
, (unsigned int __user
*)argp
);
1630 case ODP_SET_SFLOW_PROBABILITY
:
1631 err
= get_user(sflow_probability
, (unsigned int __user
*)argp
);
1633 dp
->sflow_probability
= sflow_probability
;
1636 case ODP_PORT_QUERY
:
1637 err
= query_port(dp
, (struct odp_port __user
*)argp
);
1641 err
= list_ports(dp
, (struct odp_portvec __user
*)argp
);
1644 case ODP_PORT_GROUP_SET
:
1645 err
= set_port_group(dp
, (struct odp_port_group __user
*)argp
);
1648 case ODP_PORT_GROUP_GET
:
1649 err
= get_port_group(dp
, (struct odp_port_group __user
*)argp
);
1652 case ODP_FLOW_FLUSH
:
1653 err
= flush_flows(dp
);
1657 err
= put_flow(dp
, (struct odp_flow_put __user
*)argp
);
1661 err
= del_flow(dp
, (struct odp_flow __user
*)argp
);
1665 err
= do_flowvec_ioctl(dp
, argp
, query_flows
);
1669 err
= do_flowvec_ioctl(dp
, argp
, list_flows
);
1673 err
= do_execute(dp
, (struct odp_execute __user
*)argp
);
1680 mutex_unlock(&dp
->mutex
);
1685 static int dp_has_packet_of_interest(struct datapath
*dp
, int listeners
)
1688 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1689 if (listeners
& (1 << i
) && !skb_queue_empty(&dp
->queues
[i
]))
1695 ssize_t
openvswitch_read(struct file
*f
, char __user
*buf
, size_t nbytes
,
1698 /* XXX is there sufficient synchronization here? */
1699 int listeners
= get_listen_mask(f
);
1700 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1701 struct datapath
*dp
= get_dp(dp_idx
);
1702 struct sk_buff
*skb
;
1703 struct iovec __user iov
;
1710 if (nbytes
== 0 || !listeners
)
1716 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1717 if (listeners
& (1 << i
)) {
1718 skb
= skb_dequeue(&dp
->queues
[i
]);
1724 if (f
->f_flags
& O_NONBLOCK
) {
1729 wait_event_interruptible(dp
->waitqueue
,
1730 dp_has_packet_of_interest(dp
,
1733 if (signal_pending(current
)) {
1734 retval
= -ERESTARTSYS
;
1739 copy_bytes
= min_t(size_t, skb
->len
, nbytes
);
1741 iov
.iov_len
= copy_bytes
;
1742 retval
= skb_copy_datagram_iovec(skb
, 0, &iov
, iov
.iov_len
);
1744 retval
= copy_bytes
;
1751 static unsigned int openvswitch_poll(struct file
*file
, poll_table
*wait
)
1753 /* XXX is there sufficient synchronization here? */
1754 int dp_idx
= iminor(file
->f_dentry
->d_inode
);
1755 struct datapath
*dp
= get_dp(dp_idx
);
1760 poll_wait(file
, &dp
->waitqueue
, wait
);
1761 if (dp_has_packet_of_interest(dp
, get_listen_mask(file
)))
1762 mask
|= POLLIN
| POLLRDNORM
;
1764 mask
= POLLIN
| POLLRDNORM
| POLLHUP
;
1769 struct file_operations openvswitch_fops
= {
1770 /* XXX .aio_read = openvswitch_aio_read, */
1771 .read
= openvswitch_read
,
1772 .poll
= openvswitch_poll
,
1773 .unlocked_ioctl
= openvswitch_ioctl
,
1774 /* XXX .fasync = openvswitch_fasync, */
1779 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
1780 static struct llc_sap
*dp_stp_sap
;
1782 static int dp_stp_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
1783 struct packet_type
*pt
, struct net_device
*orig_dev
)
1785 /* We don't really care about STP packets, we just listen for them for
1786 * mutual exclusion with the bridge module, so this just discards
1792 static int dp_avoid_bridge_init(void)
1794 /* Register to receive STP packets because the bridge module also
1795 * attempts to do so. Since there can only be a single listener for a
1796 * given protocol, this provides mutual exclusion against the bridge
1797 * module, preventing both of them from being loaded at the same
1799 dp_stp_sap
= llc_sap_open(LLC_SAP_BSPAN
, dp_stp_rcv
);
1801 printk(KERN_ERR
"openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
1807 static void dp_avoid_bridge_exit(void)
1809 llc_sap_put(dp_stp_sap
);
1811 #else /* Linux 2.6.27 or later. */
1812 static int dp_avoid_bridge_init(void)
1814 /* Linux 2.6.27 introduces a way for multiple clients to register for
1815 * STP packets, which interferes with what we try to do above.
1816 * Instead, just check whether there's a bridge hook defined. This is
1817 * not as safe--the bridge module is willing to load over the top of
1818 * us--but it provides a little bit of protection. */
1819 if (br_handle_frame_hook
) {
1820 printk(KERN_ERR
"openvswitch: bridge module is loaded, cannot load over it\n");
1826 static void dp_avoid_bridge_exit(void)
1828 /* Nothing to do. */
1830 #endif /* Linux 2.6.27 or later */
1832 static int __init
dp_init(void)
1836 printk("Open vSwitch %s, built "__DATE__
" "__TIME__
"\n", VERSION BUILDNR
);
1838 err
= dp_avoid_bridge_init();
1846 err
= register_netdevice_notifier(&dp_device_notifier
);
1848 goto error_flow_exit
;
1850 major
= register_chrdev(0, "openvswitch", &openvswitch_fops
);
1852 goto error_unreg_notifier
;
1854 /* Hook into callback used by the bridge to intercept packets.
1855 * Parasites we are. */
1856 br_handle_frame_hook
= dp_frame_hook
;
1860 error_unreg_notifier
:
1861 unregister_netdevice_notifier(&dp_device_notifier
);
1868 static void dp_cleanup(void)
1871 unregister_chrdev(major
, "openvswitch");
1872 unregister_netdevice_notifier(&dp_device_notifier
);
1874 br_handle_frame_hook
= NULL
;
1875 dp_avoid_bridge_exit();
1878 module_init(dp_init
);
1879 module_exit(dp_cleanup
);
1881 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1882 MODULE_LICENSE("GPL");