2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #include <linux/init.h>
12 #include <linux/module.h>
14 #include <linux/if_arp.h>
15 #include <linux/if_bridge.h>
16 #include <linux/if_vlan.h>
19 #include <linux/delay.h>
20 #include <linux/time.h>
21 #include <linux/etherdevice.h>
22 #include <linux/kernel.h>
23 #include <linux/kthread.h>
24 #include <linux/llc.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/random.h>
33 #include <linux/wait.h>
34 #include <asm/system.h>
35 #include <asm/div64.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/workqueue.h>
43 #include <linux/dmi.h>
46 #include "openvswitch/datapath-protocol.h"
55 int (*dp_ioctl_hook
)(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
56 EXPORT_SYMBOL(dp_ioctl_hook
);
58 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
61 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
64 * It is safe to access the datapath and net_bridge_port structures with just
67 static struct datapath
*dps
[ODP_MAX
];
68 static DEFINE_MUTEX(dp_mutex
);
70 /* Number of milliseconds between runs of the maintenance thread. */
71 #define MAINT_SLEEP_MSECS 1000
73 static int new_nbp(struct datapath
*, struct net_device
*, int port_no
);
75 /* Must be called with rcu_read_lock or dp_mutex. */
76 struct datapath
*get_dp(int dp_idx
)
78 if (dp_idx
< 0 || dp_idx
>= ODP_MAX
)
80 return rcu_dereference(dps
[dp_idx
]);
82 EXPORT_SYMBOL_GPL(get_dp
);
84 struct datapath
*get_dp_locked(int dp_idx
)
88 mutex_lock(&dp_mutex
);
91 mutex_lock(&dp
->mutex
);
92 mutex_unlock(&dp_mutex
);
96 static inline size_t br_nlmsg_size(void)
98 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
99 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
100 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
101 + nla_total_size(4) /* IFLA_MASTER */
102 + nla_total_size(4) /* IFLA_MTU */
103 + nla_total_size(4) /* IFLA_LINK */
104 + nla_total_size(1); /* IFLA_OPERSTATE */
107 static int dp_fill_ifinfo(struct sk_buff
*skb
,
108 const struct net_bridge_port
*port
,
109 int event
, unsigned int flags
)
111 const struct datapath
*dp
= port
->dp
;
112 const struct net_device
*dev
= port
->dev
;
113 struct ifinfomsg
*hdr
;
114 struct nlmsghdr
*nlh
;
116 nlh
= nlmsg_put(skb
, 0, 0, event
, sizeof(*hdr
), flags
);
120 hdr
= nlmsg_data(nlh
);
121 hdr
->ifi_family
= AF_BRIDGE
;
123 hdr
->ifi_type
= dev
->type
;
124 hdr
->ifi_index
= dev
->ifindex
;
125 hdr
->ifi_flags
= dev_get_flags(dev
);
128 NLA_PUT_STRING(skb
, IFLA_IFNAME
, dev
->name
);
129 NLA_PUT_U32(skb
, IFLA_MASTER
, dp
->ports
[ODPP_LOCAL
]->dev
->ifindex
);
130 NLA_PUT_U32(skb
, IFLA_MTU
, dev
->mtu
);
131 #ifdef IFLA_OPERSTATE
132 NLA_PUT_U8(skb
, IFLA_OPERSTATE
,
133 netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
);
137 NLA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
139 if (dev
->ifindex
!= dev
->iflink
)
140 NLA_PUT_U32(skb
, IFLA_LINK
, dev
->iflink
);
142 return nlmsg_end(skb
, nlh
);
145 nlmsg_cancel(skb
, nlh
);
149 static void dp_ifinfo_notify(int event
, struct net_bridge_port
*port
)
151 struct net
*net
= dev_net(port
->dev
);
155 skb
= nlmsg_new(br_nlmsg_size(), GFP_KERNEL
);
159 err
= dp_fill_ifinfo(skb
, port
, event
, 0);
161 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
162 WARN_ON(err
== -EMSGSIZE
);
166 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, GFP_KERNEL
);
170 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
173 static void release_dp(struct kobject
*kobj
)
175 struct datapath
*dp
= container_of(kobj
, struct datapath
, ifobj
);
179 struct kobj_type dp_ktype
= {
180 .release
= release_dp
183 static int create_dp(int dp_idx
, const char __user
*devnamep
)
185 struct net_device
*dp_dev
;
186 char devname
[IFNAMSIZ
];
193 if (strncpy_from_user(devname
, devnamep
, IFNAMSIZ
- 1) < 0)
195 devname
[IFNAMSIZ
- 1] = '\0';
197 snprintf(devname
, sizeof devname
, "of%d", dp_idx
);
201 mutex_lock(&dp_mutex
);
203 if (!try_module_get(THIS_MODULE
))
206 /* Exit early if a datapath with that number already exists.
207 * (We don't use -EEXIST because that's ambiguous with 'devname'
208 * conflicting with an existing network device name.) */
214 dp
= kzalloc(sizeof *dp
, GFP_KERNEL
);
217 INIT_LIST_HEAD(&dp
->port_list
);
218 mutex_init(&dp
->mutex
);
220 for (i
= 0; i
< DP_N_QUEUES
; i
++)
221 skb_queue_head_init(&dp
->queues
[i
]);
222 init_waitqueue_head(&dp
->waitqueue
);
224 /* Initialize kobject for bridge. This will be added as
225 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
226 dp
->ifobj
.kset
= NULL
;
227 kobject_init(&dp
->ifobj
, &dp_ktype
);
229 /* Allocate table. */
231 rcu_assign_pointer(dp
->table
, dp_table_create(DP_L1_SIZE
));
235 /* Set up our datapath device. */
236 dp_dev
= dp_dev_create(dp
, devname
, ODPP_LOCAL
);
237 err
= PTR_ERR(dp_dev
);
239 goto err_destroy_table
;
241 err
= new_nbp(dp
, dp_dev
, ODPP_LOCAL
);
243 dp_dev_destroy(dp_dev
);
244 goto err_destroy_table
;
248 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
249 if (!dp
->stats_percpu
)
250 goto err_destroy_local_port
;
252 rcu_assign_pointer(dps
[dp_idx
], dp
);
253 mutex_unlock(&dp_mutex
);
260 err_destroy_local_port
:
261 dp_del_port(dp
->ports
[ODPP_LOCAL
]);
263 dp_table_destroy(dp
->table
, 0);
267 module_put(THIS_MODULE
);
269 mutex_unlock(&dp_mutex
);
275 static void do_destroy_dp(struct datapath
*dp
)
277 struct net_bridge_port
*p
, *n
;
280 list_for_each_entry_safe (p
, n
, &dp
->port_list
, node
)
281 if (p
->port_no
!= ODPP_LOCAL
)
286 rcu_assign_pointer(dps
[dp
->dp_idx
], NULL
);
288 dp_del_port(dp
->ports
[ODPP_LOCAL
]);
290 dp_table_destroy(dp
->table
, 1);
292 for (i
= 0; i
< DP_N_QUEUES
; i
++)
293 skb_queue_purge(&dp
->queues
[i
]);
294 for (i
= 0; i
< DP_MAX_GROUPS
; i
++)
295 kfree(dp
->groups
[i
]);
296 free_percpu(dp
->stats_percpu
);
297 kobject_put(&dp
->ifobj
);
298 module_put(THIS_MODULE
);
301 static int destroy_dp(int dp_idx
)
307 mutex_lock(&dp_mutex
);
317 mutex_unlock(&dp_mutex
);
322 static void release_nbp(struct kobject
*kobj
)
324 struct net_bridge_port
*p
= container_of(kobj
, struct net_bridge_port
, kobj
);
328 struct kobj_type brport_ktype
= {
330 .sysfs_ops
= &brport_sysfs_ops
,
332 .release
= release_nbp
335 /* Called with RTNL lock and dp_mutex. */
336 static int new_nbp(struct datapath
*dp
, struct net_device
*dev
, int port_no
)
338 struct net_bridge_port
*p
;
340 if (dev
->br_port
!= NULL
)
343 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
347 dev_set_promiscuity(dev
, 1);
349 p
->port_no
= port_no
;
352 atomic_set(&p
->sflow_pool
, 0);
354 rcu_assign_pointer(dev
->br_port
, p
);
356 /* It would make sense to assign dev->br_port here too, but
357 * that causes packets received on internal ports to get caught
358 * in dp_frame_hook(). In turn dp_frame_hook() can reject them
359 * back to network stack, but that's a waste of time. */
361 rcu_assign_pointer(dp
->ports
[port_no
], p
);
362 list_add_rcu(&p
->node
, &dp
->port_list
);
365 /* Initialize kobject for bridge. This will be added as
366 * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
368 kobject_init(&p
->kobj
, &brport_ktype
);
370 dp_ifinfo_notify(RTM_NEWLINK
, p
);
375 static int add_port(int dp_idx
, struct odp_port __user
*portp
)
377 struct net_device
*dev
;
379 struct odp_port port
;
384 if (copy_from_user(&port
, portp
, sizeof port
))
386 port
.devname
[IFNAMSIZ
- 1] = '\0';
389 dp
= get_dp_locked(dp_idx
);
392 goto out_unlock_rtnl
;
394 for (port_no
= 1; port_no
< DP_MAX_PORTS
; port_no
++)
395 if (!dp
->ports
[port_no
])
401 if (!(port
.flags
& ODP_PORT_INTERNAL
)) {
403 dev
= dev_get_by_name(&init_net
, port
.devname
);
408 if (dev
->flags
& IFF_LOOPBACK
|| dev
->type
!= ARPHRD_ETHER
||
412 dev
= dp_dev_create(dp
, port
.devname
, port_no
);
419 err
= new_nbp(dp
, dev
, port_no
);
423 set_dp_devs_mtu(dp
, dev
);
424 dp_sysfs_add_if(dp
->ports
[port_no
]);
426 err
= __put_user(port_no
, &port
.port
);
431 mutex_unlock(&dp
->mutex
);
438 int dp_del_port(struct net_bridge_port
*p
)
442 if (p
->port_no
!= ODPP_LOCAL
)
444 dp_ifinfo_notify(RTM_DELLINK
, p
);
448 if (is_dp_dev(p
->dev
)) {
449 /* Make sure that no packets arrive from now on, since
450 * dp_dev_xmit() will try to find itself through
451 * p->dp->ports[], and we're about to set that to null. */
452 netif_tx_disable(p
->dev
);
455 /* First drop references to device. */
456 dev_set_promiscuity(p
->dev
, -1);
457 list_del_rcu(&p
->node
);
458 rcu_assign_pointer(p
->dp
->ports
[p
->port_no
], NULL
);
459 rcu_assign_pointer(p
->dev
->br_port
, NULL
);
461 /* Then wait until no one is still using it, and destroy it. */
464 if (is_dp_dev(p
->dev
))
465 dp_dev_destroy(p
->dev
);
467 kobject_put(&p
->kobj
);
472 static int del_port(int dp_idx
, int port_no
)
474 struct net_bridge_port
*p
;
480 if (port_no
< 0 || port_no
>= DP_MAX_PORTS
|| port_no
== ODPP_LOCAL
)
484 dp
= get_dp_locked(dp_idx
);
487 goto out_unlock_rtnl
;
489 p
= dp
->ports
[port_no
];
494 err
= dp_del_port(p
);
497 mutex_unlock(&dp
->mutex
);
504 /* Must be called with rcu_read_lock. */
506 do_port_input(struct net_bridge_port
*p
, struct sk_buff
*skb
)
508 /* Make our own copy of the packet. Otherwise we will mangle the
509 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
510 * (No one comes after us, since we tell handle_bridge() that we took
512 skb
= skb_share_check(skb
, GFP_ATOMIC
);
516 /* Push the Ethernet header back on. */
517 skb_push(skb
, ETH_HLEN
);
518 skb_reset_mac_header(skb
);
519 dp_process_received_packet(skb
, p
);
522 /* Must be called with rcu_read_lock and with bottom-halves disabled. */
523 void dp_process_received_packet(struct sk_buff
*skb
, struct net_bridge_port
*p
)
525 struct datapath
*dp
= p
->dp
;
526 struct dp_stats_percpu
*stats
;
527 struct odp_flow_key key
;
528 struct sw_flow
*flow
;
530 WARN_ON_ONCE(skb_shared(skb
));
532 /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
533 stats
= percpu_ptr(dp
->stats_percpu
, smp_processor_id());
535 if (flow_extract(skb
, p
? p
->port_no
: ODPP_NONE
, &key
)) {
536 if (dp
->drop_frags
) {
543 flow
= dp_table_lookup(rcu_dereference(dp
->table
), &key
);
545 struct sw_flow_actions
*acts
= rcu_dereference(flow
->sf_acts
);
546 flow_used(flow
, skb
);
547 execute_actions(dp
, skb
, &key
, acts
->actions
, acts
->n_actions
,
552 dp_output_control(dp
, skb
, _ODPL_MISS_NR
, 0);
557 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
558 * different set of devices!)
560 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
561 /* Called with rcu_read_lock and bottom-halves disabled. */
562 static struct sk_buff
*dp_frame_hook(struct net_bridge_port
*p
,
565 do_port_input(p
, skb
);
568 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
569 /* Called with rcu_read_lock and bottom-halves disabled. */
570 static int dp_frame_hook(struct net_bridge_port
*p
, struct sk_buff
**pskb
)
572 do_port_input(p
, *pskb
);
579 #if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
580 /* This code is based on a skb_checksum_setup from net/dev/core.c from a
581 * combination of Lenny's 2.6.26 Xen kernel and Xen's
582 * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
583 * directly because it isn't exported in all versions. */
584 static int skb_pull_up_to(struct sk_buff
*skb
, void *ptr
)
586 if (ptr
< (void *)skb
->tail
)
588 if (__pskb_pull_tail(skb
,
589 ptr
- (void *)skb
->data
- skb_headlen(skb
))) {
596 int vswitch_skb_checksum_setup(struct sk_buff
*skb
)
601 __u16 csum_start
, csum_offset
;
603 if (!skb
->proto_csum_blank
)
606 if (skb
->protocol
!= htons(ETH_P_IP
))
609 if (!skb_pull_up_to(skb
, skb_network_header(skb
) + 1))
613 th
= skb_network_header(skb
) + 4 * iph
->ihl
;
615 csum_start
= th
- skb
->head
;
616 switch (iph
->protocol
) {
618 csum_offset
= offsetof(struct tcphdr
, check
);
621 csum_offset
= offsetof(struct udphdr
, check
);
625 printk(KERN_ERR
"Attempting to checksum a non-"
626 "TCP/UDP packet, dropping a protocol"
627 " %d packet", iph
->protocol
);
631 if (!skb_pull_up_to(skb
, th
+ csum_offset
+ 2))
634 skb
->ip_summed
= CHECKSUM_PARTIAL
;
635 skb
->proto_csum_blank
= 0;
637 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
638 skb
->csum_start
= csum_start
;
639 skb
->csum_offset
= csum_offset
;
641 skb_set_transport_header(skb
, csum_start
- skb_headroom(skb
));
642 skb
->csum
= csum_offset
;
650 #endif /* CONFIG_XEN && HAVE_PROTO_DATA_VALID */
652 /* Types of checksums that we can receive (these all refer to L4 checksums):
653 * 1. CHECKSUM_NONE: Device that did not compute checksum, contains full
654 * (though not verified) checksum in packet but not in skb->csum. Packets
655 * from the bridge local port will also have this type.
656 * 2. CHECKSUM_COMPLETE (CHECKSUM_HW): Good device that computes checksums,
657 * also the GRE module. This is the same as CHECKSUM_NONE, except it has
658 * a valid skb->csum. Importantly, both contain a full checksum (not
659 * verified) in the packet itself. The only difference is that if the
660 * packet gets to L4 processing on this machine (not in DomU) we won't
661 * have to recompute the checksum to verify. Most hardware devices do not
662 * produce packets with this type, even if they support receive checksum
663 * offloading (they produce type #5).
664 * 3. CHECKSUM_PARTIAL (CHECKSUM_HW): Packet without full checksum and needs to
665 * be computed if it is sent off box. Unfortunately on earlier kernels,
666 * this case is impossible to distinguish from #2, despite having opposite
667 * meanings. Xen adds an extra field on earlier kernels (see #4) in order
668 * to distinguish the different states. The only real user of this type
669 * with bridging is Xen (on later kernels).
670 * 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
671 * generated locally by a Xen DomU and has a partial checksum. If it is
672 * handled on this machine (Dom0 or DomU), then the checksum will not be
673 * computed. If it goes off box, the checksum in the packet needs to
674 * completed. Calling skb_checksum_setup converts this to CHECKSUM_HW
675 * (CHECKSUM_PARTIAL) so that the checksum can be completed. In later
676 * kernels, this combination is replaced with CHECKSUM_PARTIAL.
677 * 5. CHECKSUM_UNNECESSARY (with proto_csum_blank false): Packet with a correct
678 * full checksum or using a protocol without a checksum. skb->csum is
679 * undefined. This is common from devices with receive checksum
680 * offloading. This is somewhat similar to CHECKSUM_NONE, except that
681 * nobody will try to verify the checksum with CHECKSUM_UNNECESSARY.
683 * Note that on earlier kernels, CHECKSUM_COMPLETE and CHECKSUM_PARTIAL are
684 * both defined as CHECKSUM_HW. Normally the meaning of CHECKSUM_HW is clear
685 * based on whether it is on the transmit or receive path. After the datapath
686 * it will be intepreted as CHECKSUM_PARTIAL. If the packet already has a
687 * checksum, we will panic. Since we can receive packets with checksums, we
688 * assume that all CHECKSUM_HW packets have checksums and map them to
689 * CHECKSUM_NONE, which has a similar meaning (the it is only different if the
690 * packet is processed by the local IP stack, in which case it will need to
691 * be reverified). If we receive a packet with CHECKSUM_HW that really means
692 * CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
693 * shouldn't be any devices that do this with bridging.
695 * The bridge has similar behavior and this function closely resembles
696 * skb_forward_csum(). It is slightly different because we are only concerned
697 * with bridging and not other types of forwarding and can get away with
698 * slightly more optimal behavior.*/
700 forward_ip_summed(struct sk_buff
*skb
)
703 if (skb
->ip_summed
== CHECKSUM_HW
)
704 skb
->ip_summed
= CHECKSUM_NONE
;
708 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
709 * unless we broke up a GSO packet. */
711 queue_control_packets(struct sk_buff
*skb
, struct sk_buff_head
*queue
,
712 int queue_no
, u32 arg
)
714 struct sk_buff
*nskb
;
718 port_no
= ODPP_LOCAL
;
720 if (skb
->dev
->br_port
)
721 port_no
= skb
->dev
->br_port
->port_no
;
722 else if (is_dp_dev(skb
->dev
))
723 port_no
= dp_dev_priv(skb
->dev
)->port_no
;
727 struct odp_msg
*header
;
732 /* If a checksum-deferred packet is forwarded to the
733 * controller, correct the pointers and checksum. This happens
734 * on a regular basis only on Xen, on which VMs can pass up
735 * packets that do not have their checksum computed.
737 err
= vswitch_skb_checksum_setup(skb
);
741 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
742 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
743 /* Until 2.6.22, the start of the transport header was
744 * also the start of data to be checksummed. Linux
745 * 2.6.22 introduced the csum_start field for this
746 * purpose, but we should point the transport header to
747 * it anyway for backward compatibility, as
748 * dev_queue_xmit() does even in 2.6.28. */
749 skb_set_transport_header(skb
, skb
->csum_start
-
752 err
= skb_checksum_help(skb
);
757 if (skb
->ip_summed
== CHECKSUM_HW
) {
758 err
= skb_checksum_help(skb
, 0);
764 err
= skb_cow(skb
, sizeof *header
);
768 header
= (struct odp_msg
*)__skb_push(skb
, sizeof *header
);
769 header
->type
= queue_no
;
770 header
->length
= skb
->len
;
771 header
->port
= port_no
;
772 header
->reserved
= 0;
774 skb_queue_tail(queue
, skb
);
782 while ((skb
= nskb
) != NULL
) {
790 dp_output_control(struct datapath
*dp
, struct sk_buff
*skb
, int queue_no
,
793 struct dp_stats_percpu
*stats
;
794 struct sk_buff_head
*queue
;
797 WARN_ON_ONCE(skb_shared(skb
));
798 BUG_ON(queue_no
!= _ODPL_MISS_NR
&& queue_no
!= _ODPL_ACTION_NR
&& queue_no
!= _ODPL_SFLOW_NR
);
799 queue
= &dp
->queues
[queue_no
];
801 if (skb_queue_len(queue
) >= DP_MAX_QUEUE_LEN
)
804 forward_ip_summed(skb
);
806 /* Break apart GSO packets into their component pieces. Otherwise
807 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
808 if (skb_is_gso(skb
)) {
809 struct sk_buff
*nskb
= skb_gso_segment(skb
, 0);
813 if (unlikely(IS_ERR(skb
))) {
818 /* XXX This case might not be possible. It's hard to
819 * tell from the skb_gso_segment() code and comment. */
823 err
= queue_control_packets(skb
, queue
, queue_no
, arg
);
824 wake_up_interruptible(&dp
->waitqueue
);
830 stats
= percpu_ptr(dp
->stats_percpu
, get_cpu());
837 static int flush_flows(struct datapath
*dp
)
840 return dp_table_flush(dp
);
843 static int validate_actions(const struct sw_flow_actions
*actions
)
847 for (i
= 0; i
< actions
->n_actions
; i
++) {
848 const union odp_action
*a
= &actions
->actions
[i
];
851 if (a
->output
.port
>= DP_MAX_PORTS
)
855 case ODPAT_OUTPUT_GROUP
:
856 if (a
->output_group
.group
>= DP_MAX_GROUPS
)
860 case ODPAT_SET_VLAN_VID
:
861 if (a
->vlan_vid
.vlan_vid
& htons(~VLAN_VID_MASK
))
865 case ODPAT_SET_VLAN_PCP
:
866 if (a
->vlan_pcp
.vlan_pcp
867 & ~(VLAN_PCP_MASK
>> VLAN_PCP_SHIFT
))
872 if (a
->type
>= ODPAT_N_ACTIONS
)
881 static struct sw_flow_actions
*get_actions(const struct odp_flow
*flow
)
883 struct sw_flow_actions
*actions
;
886 actions
= flow_actions_alloc(flow
->n_actions
);
887 error
= PTR_ERR(actions
);
892 if (copy_from_user(actions
->actions
, flow
->actions
,
893 flow
->n_actions
* sizeof(union odp_action
)))
894 goto error_free_actions
;
895 error
= validate_actions(actions
);
897 goto error_free_actions
;
904 return ERR_PTR(error
);
907 static void get_stats(struct sw_flow
*flow
, struct odp_flow_stats
*stats
)
909 if (flow
->used
.tv_sec
) {
910 stats
->used_sec
= flow
->used
.tv_sec
;
911 stats
->used_nsec
= flow
->used
.tv_nsec
;
914 stats
->used_nsec
= 0;
916 stats
->n_packets
= flow
->packet_count
;
917 stats
->n_bytes
= flow
->byte_count
;
918 stats
->ip_tos
= flow
->ip_tos
;
919 stats
->tcp_flags
= flow
->tcp_flags
;
923 static void clear_stats(struct sw_flow
*flow
)
925 flow
->used
.tv_sec
= flow
->used
.tv_nsec
= 0;
928 flow
->packet_count
= 0;
929 flow
->byte_count
= 0;
932 static int put_flow(struct datapath
*dp
, struct odp_flow_put __user
*ufp
)
934 struct odp_flow_put uf
;
935 struct sw_flow
*flow
;
936 struct dp_table
*table
;
937 struct odp_flow_stats stats
;
941 if (copy_from_user(&uf
, ufp
, sizeof(struct odp_flow_put
)))
943 uf
.flow
.key
.reserved
= 0;
945 table
= rcu_dereference(dp
->table
);
946 flow
= dp_table_lookup(table
, &uf
.flow
.key
);
949 struct sw_flow_actions
*acts
;
952 if (!(uf
.flags
& ODPPF_CREATE
))
955 /* Expand table, if necessary, to make room. */
956 if (dp
->n_flows
>= table
->n_buckets
) {
958 if (table
->n_buckets
>= DP_MAX_BUCKETS
)
961 error
= dp_table_expand(dp
);
964 table
= rcu_dereference(dp
->table
);
969 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
972 flow
->key
= uf
.flow
.key
;
973 spin_lock_init(&flow
->lock
);
976 /* Obtain actions. */
977 acts
= get_actions(&uf
.flow
);
978 error
= PTR_ERR(acts
);
980 goto error_free_flow
;
981 rcu_assign_pointer(flow
->sf_acts
, acts
);
983 /* Put flow in bucket. */
984 error
= dp_table_insert(table
, flow
);
986 goto error_free_flow_acts
;
988 memset(&stats
, 0, sizeof(struct odp_flow_stats
));
990 /* We found a matching flow. */
991 struct sw_flow_actions
*old_acts
, *new_acts
;
992 unsigned long int flags
;
994 /* Bail out if we're not allowed to modify an existing flow. */
996 if (!(uf
.flags
& ODPPF_MODIFY
))
1000 new_acts
= get_actions(&uf
.flow
);
1001 error
= PTR_ERR(new_acts
);
1002 if (IS_ERR(new_acts
))
1004 old_acts
= rcu_dereference(flow
->sf_acts
);
1005 if (old_acts
->n_actions
!= new_acts
->n_actions
||
1006 memcmp(old_acts
->actions
, new_acts
->actions
,
1007 sizeof(union odp_action
) * old_acts
->n_actions
)) {
1008 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
1009 flow_deferred_free_acts(old_acts
);
1014 /* Fetch stats, then clear them if necessary. */
1015 spin_lock_irqsave(&flow
->lock
, flags
);
1016 get_stats(flow
, &stats
);
1017 if (uf
.flags
& ODPPF_ZERO_STATS
)
1019 spin_unlock_irqrestore(&flow
->lock
, flags
);
1022 /* Copy stats to userspace. */
1023 if (__copy_to_user(&ufp
->flow
.stats
, &stats
,
1024 sizeof(struct odp_flow_stats
)))
1028 error_free_flow_acts
:
1029 kfree(flow
->sf_acts
);
1031 kmem_cache_free(flow_cache
, flow
);
1036 static int put_actions(const struct sw_flow
*flow
, struct odp_flow __user
*ufp
)
1038 union odp_action __user
*actions
;
1039 struct sw_flow_actions
*sf_acts
;
1042 if (__get_user(actions
, &ufp
->actions
) ||
1043 __get_user(n_actions
, &ufp
->n_actions
))
1049 sf_acts
= rcu_dereference(flow
->sf_acts
);
1050 if (__put_user(sf_acts
->n_actions
, &ufp
->n_actions
) ||
1051 (actions
&& copy_to_user(actions
, sf_acts
->actions
,
1052 sizeof(union odp_action
) *
1053 min(sf_acts
->n_actions
, n_actions
))))
1059 static int answer_query(struct sw_flow
*flow
, u32 query_flags
,
1060 struct odp_flow __user
*ufp
)
1062 struct odp_flow_stats stats
;
1063 unsigned long int flags
;
1065 spin_lock_irqsave(&flow
->lock
, flags
);
1066 get_stats(flow
, &stats
);
1068 if (query_flags
& ODPFF_ZERO_TCP_FLAGS
) {
1069 flow
->tcp_flags
= 0;
1071 spin_unlock_irqrestore(&flow
->lock
, flags
);
1073 if (__copy_to_user(&ufp
->stats
, &stats
, sizeof(struct odp_flow_stats
)))
1075 return put_actions(flow
, ufp
);
1078 static int del_flow(struct datapath
*dp
, struct odp_flow __user
*ufp
)
1080 struct dp_table
*table
= rcu_dereference(dp
->table
);
1082 struct sw_flow
*flow
;
1086 if (copy_from_user(&uf
, ufp
, sizeof uf
))
1088 uf
.key
.reserved
= 0;
1090 flow
= dp_table_lookup(table
, &uf
.key
);
1095 /* XXX redundant lookup */
1096 error
= dp_table_delete(table
, flow
);
1100 /* XXX These statistics might lose a few packets, since other CPUs can
1101 * be using this flow. We used to synchronize_rcu() to make sure that
1102 * we get completely accurate stats, but that blows our performance,
1105 error
= answer_query(flow
, 0, ufp
);
1106 flow_deferred_free(flow
);
1112 static int query_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1114 struct dp_table
*table
= rcu_dereference(dp
->table
);
1116 for (i
= 0; i
< flowvec
->n_flows
; i
++) {
1117 struct __user odp_flow
*ufp
= &flowvec
->flows
[i
];
1119 struct sw_flow
*flow
;
1122 if (__copy_from_user(&uf
, ufp
, sizeof uf
))
1124 uf
.key
.reserved
= 0;
1126 flow
= dp_table_lookup(table
, &uf
.key
);
1128 error
= __put_user(ENOENT
, &ufp
->stats
.error
);
1130 error
= answer_query(flow
, uf
.flags
, ufp
);
1134 return flowvec
->n_flows
;
1137 struct list_flows_cbdata
{
1138 struct odp_flow __user
*uflows
;
1143 static int list_flow(struct sw_flow
*flow
, void *cbdata_
)
1145 struct list_flows_cbdata
*cbdata
= cbdata_
;
1146 struct odp_flow __user
*ufp
= &cbdata
->uflows
[cbdata
->listed_flows
++];
1149 if (__copy_to_user(&ufp
->key
, &flow
->key
, sizeof flow
->key
))
1151 error
= answer_query(flow
, 0, ufp
);
1155 if (cbdata
->listed_flows
>= cbdata
->n_flows
)
1156 return cbdata
->listed_flows
;
1160 static int list_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1162 struct list_flows_cbdata cbdata
;
1165 if (!flowvec
->n_flows
)
1168 cbdata
.uflows
= flowvec
->flows
;
1169 cbdata
.n_flows
= flowvec
->n_flows
;
1170 cbdata
.listed_flows
= 0;
1171 error
= dp_table_foreach(rcu_dereference(dp
->table
),
1172 list_flow
, &cbdata
);
1173 return error
? error
: cbdata
.listed_flows
;
1176 static int do_flowvec_ioctl(struct datapath
*dp
, unsigned long argp
,
1177 int (*function
)(struct datapath
*,
1178 const struct odp_flowvec
*))
1180 struct odp_flowvec __user
*uflowvec
;
1181 struct odp_flowvec flowvec
;
1184 uflowvec
= (struct odp_flowvec __user
*)argp
;
1185 if (!access_ok(VERIFY_WRITE
, uflowvec
, sizeof *uflowvec
) ||
1186 copy_from_user(&flowvec
, uflowvec
, sizeof flowvec
))
1189 if (flowvec
.n_flows
> INT_MAX
/ sizeof(struct odp_flow
))
1192 if (!access_ok(VERIFY_WRITE
, flowvec
.flows
,
1193 flowvec
.n_flows
* sizeof(struct odp_flow
)))
1196 retval
= function(dp
, &flowvec
);
1197 return (retval
< 0 ? retval
1198 : retval
== flowvec
.n_flows
? 0
1199 : __put_user(retval
, &uflowvec
->n_flows
));
1202 static int do_execute(struct datapath
*dp
, const struct odp_execute
*executep
)
1204 struct odp_execute execute
;
1205 struct odp_flow_key key
;
1206 struct sk_buff
*skb
;
1207 struct sw_flow_actions
*actions
;
1212 if (copy_from_user(&execute
, executep
, sizeof execute
))
1216 if (execute
.length
< ETH_HLEN
|| execute
.length
> 65535)
1220 actions
= flow_actions_alloc(execute
.n_actions
);
1225 if (copy_from_user(actions
->actions
, execute
.actions
,
1226 execute
.n_actions
* sizeof *execute
.actions
))
1227 goto error_free_actions
;
1229 err
= validate_actions(actions
);
1231 goto error_free_actions
;
1234 skb
= alloc_skb(execute
.length
, GFP_KERNEL
);
1236 goto error_free_actions
;
1237 if (execute
.in_port
< DP_MAX_PORTS
) {
1238 struct net_bridge_port
*p
= dp
->ports
[execute
.in_port
];
1244 if (copy_from_user(skb_put(skb
, execute
.length
), execute
.data
,
1246 goto error_free_skb
;
1248 skb_reset_mac_header(skb
);
1251 /* Normally, setting the skb 'protocol' field would be handled by a
1252 * call to eth_type_trans(), but it assumes there's a sending
1253 * device, which we may not have. */
1254 if (ntohs(eth
->h_proto
) >= 1536)
1255 skb
->protocol
= eth
->h_proto
;
1257 skb
->protocol
= htons(ETH_P_802_2
);
1259 flow_extract(skb
, execute
.in_port
, &key
);
1260 err
= execute_actions(dp
, skb
, &key
, actions
->actions
,
1261 actions
->n_actions
, GFP_KERNEL
);
1273 static int get_dp_stats(struct datapath
*dp
, struct odp_stats __user
*statsp
)
1275 struct odp_stats stats
;
1278 stats
.n_flows
= dp
->n_flows
;
1279 stats
.cur_capacity
= rcu_dereference(dp
->table
)->n_buckets
;
1280 stats
.max_capacity
= DP_MAX_BUCKETS
;
1281 stats
.n_ports
= dp
->n_ports
;
1282 stats
.max_ports
= DP_MAX_PORTS
;
1283 stats
.max_groups
= DP_MAX_GROUPS
;
1284 stats
.n_frags
= stats
.n_hit
= stats
.n_missed
= stats
.n_lost
= 0;
1285 for_each_possible_cpu(i
) {
1286 const struct dp_stats_percpu
*s
;
1287 s
= percpu_ptr(dp
->stats_percpu
, i
);
1288 stats
.n_frags
+= s
->n_frags
;
1289 stats
.n_hit
+= s
->n_hit
;
1290 stats
.n_missed
+= s
->n_missed
;
1291 stats
.n_lost
+= s
->n_lost
;
1293 stats
.max_miss_queue
= DP_MAX_QUEUE_LEN
;
1294 stats
.max_action_queue
= DP_MAX_QUEUE_LEN
;
1295 return copy_to_user(statsp
, &stats
, sizeof stats
) ? -EFAULT
: 0;
1298 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1299 int dp_min_mtu(const struct datapath
*dp
)
1301 struct net_bridge_port
*p
;
1306 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1307 struct net_device
*dev
= p
->dev
;
1309 /* Skip any internal ports, since that's what we're trying to
1314 if (!mtu
|| dev
->mtu
< mtu
)
1318 return mtu
? mtu
: ETH_DATA_LEN
;
1321 /* Sets the MTU of all datapath devices to the minimum of the ports. 'dev'
1322 * is the device whose MTU may have changed. Must be called with RTNL lock
1324 void set_dp_devs_mtu(const struct datapath
*dp
, struct net_device
*dev
)
1326 struct net_bridge_port
*p
;
1334 mtu
= dp_min_mtu(dp
);
1336 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1337 struct net_device
*br_dev
= p
->dev
;
1339 if (is_dp_dev(br_dev
))
1340 dev_set_mtu(br_dev
, mtu
);
1345 put_port(const struct net_bridge_port
*p
, struct odp_port __user
*uop
)
1348 memset(&op
, 0, sizeof op
);
1349 strncpy(op
.devname
, p
->dev
->name
, sizeof op
.devname
);
1350 op
.port
= p
->port_no
;
1351 op
.flags
= is_dp_dev(p
->dev
) ? ODP_PORT_INTERNAL
: 0;
1352 return copy_to_user(uop
, &op
, sizeof op
) ? -EFAULT
: 0;
1356 query_port(struct datapath
*dp
, struct odp_port __user
*uport
)
1358 struct odp_port port
;
1360 if (copy_from_user(&port
, uport
, sizeof port
))
1362 if (port
.devname
[0]) {
1363 struct net_bridge_port
*p
;
1364 struct net_device
*dev
;
1367 port
.devname
[IFNAMSIZ
- 1] = '\0';
1369 dev
= dev_get_by_name(&init_net
, port
.devname
);
1374 if (!p
&& is_dp_dev(dev
)) {
1375 struct dp_dev
*dp_dev
= dp_dev_priv(dev
);
1376 if (dp_dev
->dp
== dp
)
1377 p
= dp
->ports
[dp_dev
->port_no
];
1379 err
= p
&& p
->dp
== dp
? put_port(p
, uport
) : -ENOENT
;
1384 if (port
.port
>= DP_MAX_PORTS
)
1386 if (!dp
->ports
[port
.port
])
1388 return put_port(dp
->ports
[port
.port
], uport
);
1393 list_ports(struct datapath
*dp
, struct odp_portvec __user
*pvp
)
1395 struct odp_portvec pv
;
1396 struct net_bridge_port
*p
;
1399 if (copy_from_user(&pv
, pvp
, sizeof pv
))
1404 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1405 if (put_port(p
, &pv
.ports
[idx
]))
1407 if (idx
++ >= pv
.n_ports
)
1411 return put_user(dp
->n_ports
, &pvp
->n_ports
);
1414 /* RCU callback for freeing a dp_port_group */
1415 static void free_port_group(struct rcu_head
*rcu
)
1417 struct dp_port_group
*g
= container_of(rcu
, struct dp_port_group
, rcu
);
1422 set_port_group(struct datapath
*dp
, const struct odp_port_group __user
*upg
)
1424 struct odp_port_group pg
;
1425 struct dp_port_group
*new_group
, *old_group
;
1429 if (copy_from_user(&pg
, upg
, sizeof pg
))
1433 if (pg
.n_ports
> DP_MAX_PORTS
|| pg
.group
>= DP_MAX_GROUPS
)
1437 new_group
= kmalloc(sizeof *new_group
+ sizeof(u16
) * pg
.n_ports
,
1442 new_group
->n_ports
= pg
.n_ports
;
1444 if (copy_from_user(new_group
->ports
, pg
.ports
,
1445 sizeof(u16
) * pg
.n_ports
))
1448 old_group
= rcu_dereference(dp
->groups
[pg
.group
]);
1449 rcu_assign_pointer(dp
->groups
[pg
.group
], new_group
);
1451 call_rcu(&old_group
->rcu
, free_port_group
);
1461 get_port_group(struct datapath
*dp
, struct odp_port_group
*upg
)
1463 struct odp_port_group pg
;
1464 struct dp_port_group
*g
;
1467 if (copy_from_user(&pg
, upg
, sizeof pg
))
1470 if (pg
.group
>= DP_MAX_GROUPS
)
1473 g
= dp
->groups
[pg
.group
];
1474 n_copy
= g
? min_t(int, g
->n_ports
, pg
.n_ports
) : 0;
1475 if (n_copy
&& copy_to_user(pg
.ports
, g
->ports
, n_copy
* sizeof(u16
)))
1478 if (put_user(g
? g
->n_ports
: 0, &upg
->n_ports
))
1484 static int get_listen_mask(const struct file
*f
)
1486 return (long)f
->private_data
;
1489 static void set_listen_mask(struct file
*f
, int listen_mask
)
1491 f
->private_data
= (void*)(long)listen_mask
;
1494 static long openvswitch_ioctl(struct file
*f
, unsigned int cmd
,
1497 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1498 struct datapath
*dp
;
1499 int drop_frags
, listeners
, port_no
;
1500 unsigned int sflow_probability
;
1503 /* Handle commands with special locking requirements up front. */
1506 err
= create_dp(dp_idx
, (char __user
*)argp
);
1509 case ODP_DP_DESTROY
:
1510 err
= destroy_dp(dp_idx
);
1514 err
= add_port(dp_idx
, (struct odp_port __user
*)argp
);
1518 err
= get_user(port_no
, (int __user
*)argp
);
1520 err
= del_port(dp_idx
, port_no
);
1524 dp
= get_dp_locked(dp_idx
);
1531 err
= get_dp_stats(dp
, (struct odp_stats __user
*)argp
);
1534 case ODP_GET_DROP_FRAGS
:
1535 err
= put_user(dp
->drop_frags
, (int __user
*)argp
);
1538 case ODP_SET_DROP_FRAGS
:
1539 err
= get_user(drop_frags
, (int __user
*)argp
);
1543 if (drop_frags
!= 0 && drop_frags
!= 1)
1545 dp
->drop_frags
= drop_frags
;
1549 case ODP_GET_LISTEN_MASK
:
1550 err
= put_user(get_listen_mask(f
), (int __user
*)argp
);
1553 case ODP_SET_LISTEN_MASK
:
1554 err
= get_user(listeners
, (int __user
*)argp
);
1558 if (listeners
& ~ODPL_ALL
)
1561 set_listen_mask(f
, listeners
);
1564 case ODP_GET_SFLOW_PROBABILITY
:
1565 err
= put_user(dp
->sflow_probability
, (unsigned int __user
*)argp
);
1568 case ODP_SET_SFLOW_PROBABILITY
:
1569 err
= get_user(sflow_probability
, (unsigned int __user
*)argp
);
1571 dp
->sflow_probability
= sflow_probability
;
1574 case ODP_PORT_QUERY
:
1575 err
= query_port(dp
, (struct odp_port __user
*)argp
);
1579 err
= list_ports(dp
, (struct odp_portvec __user
*)argp
);
1582 case ODP_PORT_GROUP_SET
:
1583 err
= set_port_group(dp
, (struct odp_port_group __user
*)argp
);
1586 case ODP_PORT_GROUP_GET
:
1587 err
= get_port_group(dp
, (struct odp_port_group __user
*)argp
);
1590 case ODP_FLOW_FLUSH
:
1591 err
= flush_flows(dp
);
1595 err
= put_flow(dp
, (struct odp_flow_put __user
*)argp
);
1599 err
= del_flow(dp
, (struct odp_flow __user
*)argp
);
1603 err
= do_flowvec_ioctl(dp
, argp
, query_flows
);
1607 err
= do_flowvec_ioctl(dp
, argp
, list_flows
);
1611 err
= do_execute(dp
, (struct odp_execute __user
*)argp
);
1618 mutex_unlock(&dp
->mutex
);
1623 static int dp_has_packet_of_interest(struct datapath
*dp
, int listeners
)
1626 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1627 if (listeners
& (1 << i
) && !skb_queue_empty(&dp
->queues
[i
]))
1633 ssize_t
openvswitch_read(struct file
*f
, char __user
*buf
, size_t nbytes
,
1636 /* XXX is there sufficient synchronization here? */
1637 int listeners
= get_listen_mask(f
);
1638 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1639 struct datapath
*dp
= get_dp(dp_idx
);
1640 struct sk_buff
*skb
;
1641 struct iovec __user iov
;
1648 if (nbytes
== 0 || !listeners
)
1654 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1655 if (listeners
& (1 << i
)) {
1656 skb
= skb_dequeue(&dp
->queues
[i
]);
1662 if (f
->f_flags
& O_NONBLOCK
) {
1667 wait_event_interruptible(dp
->waitqueue
,
1668 dp_has_packet_of_interest(dp
,
1671 if (signal_pending(current
)) {
1672 retval
= -ERESTARTSYS
;
1677 copy_bytes
= min_t(size_t, skb
->len
, nbytes
);
1679 iov
.iov_len
= copy_bytes
;
1680 retval
= skb_copy_datagram_iovec(skb
, 0, &iov
, iov
.iov_len
);
1682 retval
= copy_bytes
;
1689 static unsigned int openvswitch_poll(struct file
*file
, poll_table
*wait
)
1691 /* XXX is there sufficient synchronization here? */
1692 int dp_idx
= iminor(file
->f_dentry
->d_inode
);
1693 struct datapath
*dp
= get_dp(dp_idx
);
1698 poll_wait(file
, &dp
->waitqueue
, wait
);
1699 if (dp_has_packet_of_interest(dp
, get_listen_mask(file
)))
1700 mask
|= POLLIN
| POLLRDNORM
;
1702 mask
= POLLIN
| POLLRDNORM
| POLLHUP
;
1707 struct file_operations openvswitch_fops
= {
1708 /* XXX .aio_read = openvswitch_aio_read, */
1709 .read
= openvswitch_read
,
1710 .poll
= openvswitch_poll
,
1711 .unlocked_ioctl
= openvswitch_ioctl
,
1712 /* XXX .fasync = openvswitch_fasync, */
1717 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
1718 static struct llc_sap
*dp_stp_sap
;
1720 static int dp_stp_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
1721 struct packet_type
*pt
, struct net_device
*orig_dev
)
1723 /* We don't really care about STP packets, we just listen for them for
1724 * mutual exclusion with the bridge module, so this just discards
1730 static int dp_avoid_bridge_init(void)
1732 /* Register to receive STP packets because the bridge module also
1733 * attempts to do so. Since there can only be a single listener for a
1734 * given protocol, this provides mutual exclusion against the bridge
1735 * module, preventing both of them from being loaded at the same
1737 dp_stp_sap
= llc_sap_open(LLC_SAP_BSPAN
, dp_stp_rcv
);
1739 printk(KERN_ERR
"openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
1745 static void dp_avoid_bridge_exit(void)
1747 llc_sap_put(dp_stp_sap
);
1749 #else /* Linux 2.6.27 or later. */
1750 static int dp_avoid_bridge_init(void)
1752 /* Linux 2.6.27 introduces a way for multiple clients to register for
1753 * STP packets, which interferes with what we try to do above.
1754 * Instead, just check whether there's a bridge hook defined. This is
1755 * not as safe--the bridge module is willing to load over the top of
1756 * us--but it provides a little bit of protection. */
1757 if (br_handle_frame_hook
) {
1758 printk(KERN_ERR
"openvswitch: bridge module is loaded, cannot load over it\n");
1764 static void dp_avoid_bridge_exit(void)
1766 /* Nothing to do. */
1768 #endif /* Linux 2.6.27 or later */
1770 static int __init
dp_init(void)
1774 printk("Open vSwitch %s, built "__DATE__
" "__TIME__
"\n", VERSION BUILDNR
);
1776 err
= dp_avoid_bridge_init();
1784 err
= register_netdevice_notifier(&dp_device_notifier
);
1786 goto error_flow_exit
;
1788 major
= register_chrdev(0, "openvswitch", &openvswitch_fops
);
1790 goto error_unreg_notifier
;
1792 /* Hook into callback used by the bridge to intercept packets.
1793 * Parasites we are. */
1794 br_handle_frame_hook
= dp_frame_hook
;
1798 error_unreg_notifier
:
1799 unregister_netdevice_notifier(&dp_device_notifier
);
1806 static void dp_cleanup(void)
1809 unregister_chrdev(major
, "openvswitch");
1810 unregister_netdevice_notifier(&dp_device_notifier
);
1812 br_handle_frame_hook
= NULL
;
1813 dp_avoid_bridge_exit();
1816 module_init(dp_init
);
1817 module_exit(dp_cleanup
);
1819 MODULE_DESCRIPTION("Open vSwitch switching datapath");
1820 MODULE_LICENSE("GPL");