2 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/delay.h>
21 #include <linux/time.h>
22 #include <linux/etherdevice.h>
23 #include <linux/kernel.h>
24 #include <linux/kthread.h>
25 #include <linux/mutex.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/tcp.h>
29 #include <linux/udp.h>
30 #include <linux/version.h>
31 #include <linux/ethtool.h>
32 #include <linux/wait.h>
33 #include <asm/system.h>
34 #include <asm/div64.h>
36 #include <linux/highmem.h>
37 #include <linux/netfilter_bridge.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/inetdevice.h>
40 #include <linux/list.h>
41 #include <linux/rculist.h>
42 #include <linux/dmi.h>
43 #include <net/inet_ecn.h>
44 #include <linux/compat.h>
46 #include "openvswitch/datapath-protocol.h"
51 #include "loop_counter.h"
52 #include "odp-compat.h"
54 #include "vport-internal_dev.h"
58 int (*dp_ioctl_hook
)(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
59 EXPORT_SYMBOL(dp_ioctl_hook
);
61 /* Datapaths. Protected on the read side by rcu_read_lock, on the write side
64 * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
67 * It is safe to access the datapath and vport structures with just
70 static struct datapath __rcu
*dps
[ODP_MAX
];
71 static DEFINE_MUTEX(dp_mutex
);
73 static int new_vport(struct datapath
*, struct odp_port
*, int port_no
);
75 /* Must be called with rcu_read_lock or dp_mutex. */
76 struct datapath
*get_dp(int dp_idx
)
78 if (dp_idx
< 0 || dp_idx
>= ODP_MAX
)
80 return rcu_dereference_check(dps
[dp_idx
], rcu_read_lock_held() ||
81 lockdep_is_held(&dp_mutex
));
83 EXPORT_SYMBOL_GPL(get_dp
);
85 static struct datapath
*get_dp_locked(int dp_idx
)
89 mutex_lock(&dp_mutex
);
92 mutex_lock(&dp
->mutex
);
93 mutex_unlock(&dp_mutex
);
97 static struct tbl
*get_table_protected(struct datapath
*dp
)
99 return rcu_dereference_protected(dp
->table
, lockdep_is_held(&dp
->mutex
));
102 /* Must be called with rcu_read_lock or RTNL lock. */
103 const char *dp_name(const struct datapath
*dp
)
105 return vport_get_name(rcu_dereference_rtnl(dp
->ports
[ODPP_LOCAL
]));
108 static inline size_t br_nlmsg_size(void)
110 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
111 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
112 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
113 + nla_total_size(4) /* IFLA_MASTER */
114 + nla_total_size(4) /* IFLA_MTU */
115 + nla_total_size(4) /* IFLA_LINK */
116 + nla_total_size(1); /* IFLA_OPERSTATE */
119 static int dp_fill_ifinfo(struct sk_buff
*skb
,
120 const struct vport
*port
,
121 int event
, unsigned int flags
)
123 const struct datapath
*dp
= port
->dp
;
124 int ifindex
= vport_get_ifindex(port
);
125 int iflink
= vport_get_iflink(port
);
126 struct ifinfomsg
*hdr
;
127 struct nlmsghdr
*nlh
;
135 nlh
= nlmsg_put(skb
, 0, 0, event
, sizeof(*hdr
), flags
);
139 hdr
= nlmsg_data(nlh
);
140 hdr
->ifi_family
= AF_BRIDGE
;
142 hdr
->ifi_type
= ARPHRD_ETHER
;
143 hdr
->ifi_index
= ifindex
;
144 hdr
->ifi_flags
= vport_get_flags(port
);
147 NLA_PUT_STRING(skb
, IFLA_IFNAME
, vport_get_name(port
));
148 NLA_PUT_U32(skb
, IFLA_MASTER
,
149 vport_get_ifindex(rtnl_dereference(dp
->ports
[ODPP_LOCAL
])));
150 NLA_PUT_U32(skb
, IFLA_MTU
, vport_get_mtu(port
));
151 #ifdef IFLA_OPERSTATE
152 NLA_PUT_U8(skb
, IFLA_OPERSTATE
,
153 vport_is_running(port
)
154 ? vport_get_operstate(port
)
158 NLA_PUT(skb
, IFLA_ADDRESS
, ETH_ALEN
, vport_get_addr(port
));
160 if (ifindex
!= iflink
)
161 NLA_PUT_U32(skb
, IFLA_LINK
,iflink
);
163 return nlmsg_end(skb
, nlh
);
166 nlmsg_cancel(skb
, nlh
);
170 static void dp_ifinfo_notify(int event
, struct vport
*port
)
175 skb
= nlmsg_new(br_nlmsg_size(), GFP_KERNEL
);
179 err
= dp_fill_ifinfo(skb
, port
, event
, 0);
181 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
182 WARN_ON(err
== -EMSGSIZE
);
186 rtnl_notify(skb
, &init_net
, 0, RTNLGRP_LINK
, NULL
, GFP_KERNEL
);
190 rtnl_set_sk_err(&init_net
, RTNLGRP_LINK
, err
);
193 static void release_dp(struct kobject
*kobj
)
195 struct datapath
*dp
= container_of(kobj
, struct datapath
, ifobj
);
199 static struct kobj_type dp_ktype
= {
200 .release
= release_dp
203 static int create_dp(int dp_idx
, const char __user
*devnamep
)
205 struct odp_port internal_dev_port
;
206 char devname
[IFNAMSIZ
];
212 int retval
= strncpy_from_user(devname
, devnamep
, IFNAMSIZ
);
216 } else if (retval
>= IFNAMSIZ
) {
221 snprintf(devname
, sizeof devname
, "of%d", dp_idx
);
225 mutex_lock(&dp_mutex
);
227 if (!try_module_get(THIS_MODULE
))
230 /* Exit early if a datapath with that number already exists.
231 * (We don't use -EEXIST because that's ambiguous with 'devname'
232 * conflicting with an existing network device name.) */
238 dp
= kzalloc(sizeof *dp
, GFP_KERNEL
);
241 INIT_LIST_HEAD(&dp
->port_list
);
242 mutex_init(&dp
->mutex
);
244 for (i
= 0; i
< DP_N_QUEUES
; i
++)
245 skb_queue_head_init(&dp
->queues
[i
]);
246 init_waitqueue_head(&dp
->waitqueue
);
248 /* Initialize kobject for bridge. This will be added as
249 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
250 dp
->ifobj
.kset
= NULL
;
251 kobject_init(&dp
->ifobj
, &dp_ktype
);
253 /* Allocate table. */
255 rcu_assign_pointer(dp
->table
, tbl_create(TBL_MIN_BUCKETS
));
259 /* Set up our datapath device. */
260 BUILD_BUG_ON(sizeof(internal_dev_port
.devname
) != sizeof(devname
));
261 strcpy(internal_dev_port
.devname
, devname
);
262 strcpy(internal_dev_port
.type
, "internal");
263 err
= new_vport(dp
, &internal_dev_port
, ODPP_LOCAL
);
268 goto err_destroy_table
;
272 dp
->stats_percpu
= alloc_percpu(struct dp_stats_percpu
);
273 if (!dp
->stats_percpu
) {
275 goto err_destroy_local_port
;
278 rcu_assign_pointer(dps
[dp_idx
], dp
);
281 mutex_unlock(&dp_mutex
);
286 err_destroy_local_port
:
287 dp_detach_port(dp
->ports
[ODPP_LOCAL
]);
289 tbl_destroy(dp
->table
, NULL
);
293 module_put(THIS_MODULE
);
295 mutex_unlock(&dp_mutex
);
301 static void do_destroy_dp(struct datapath
*dp
)
306 list_for_each_entry_safe (p
, n
, &dp
->port_list
, node
)
307 if (p
->port_no
!= ODPP_LOCAL
)
312 rcu_assign_pointer(dps
[dp
->dp_idx
], NULL
);
314 dp_detach_port(dp
->ports
[ODPP_LOCAL
]);
316 tbl_destroy(dp
->table
, flow_free_tbl
);
318 for (i
= 0; i
< DP_N_QUEUES
; i
++)
319 skb_queue_purge(&dp
->queues
[i
]);
320 free_percpu(dp
->stats_percpu
);
321 kobject_put(&dp
->ifobj
);
322 module_put(THIS_MODULE
);
325 static int destroy_dp(int dp_idx
)
331 mutex_lock(&dp_mutex
);
341 mutex_unlock(&dp_mutex
);
346 /* Called with RTNL lock and dp_mutex. */
347 static int new_vport(struct datapath
*dp
, struct odp_port
*odp_port
, int port_no
)
349 struct vport_parms parms
;
352 parms
.name
= odp_port
->devname
;
353 parms
.type
= odp_port
->type
;
354 parms
.config
= odp_port
->config
;
356 parms
.port_no
= port_no
;
359 vport
= vport_add(&parms
);
363 return PTR_ERR(vport
);
365 rcu_assign_pointer(dp
->ports
[port_no
], vport
);
366 list_add_rcu(&vport
->node
, &dp
->port_list
);
369 dp_ifinfo_notify(RTM_NEWLINK
, vport
);
374 static int attach_port(int dp_idx
, struct odp_port __user
*portp
)
377 struct odp_port port
;
382 if (copy_from_user(&port
, portp
, sizeof port
))
384 port
.devname
[IFNAMSIZ
- 1] = '\0';
385 port
.type
[VPORT_TYPE_SIZE
- 1] = '\0';
388 dp
= get_dp_locked(dp_idx
);
391 goto out_unlock_rtnl
;
393 for (port_no
= 1; port_no
< DP_MAX_PORTS
; port_no
++)
394 if (!dp
->ports
[port_no
])
400 err
= new_vport(dp
, &port
, port_no
);
404 set_internal_devs_mtu(dp
);
405 dp_sysfs_add_if(dp
->ports
[port_no
]);
407 err
= put_user(port_no
, &portp
->port
);
410 mutex_unlock(&dp
->mutex
);
417 int dp_detach_port(struct vport
*p
)
423 if (p
->port_no
!= ODPP_LOCAL
)
425 dp_ifinfo_notify(RTM_DELLINK
, p
);
427 /* First drop references to device. */
429 list_del_rcu(&p
->node
);
430 rcu_assign_pointer(p
->dp
->ports
[p
->port_no
], NULL
);
432 /* Then destroy it. */
440 static int detach_port(int dp_idx
, int port_no
)
447 if (port_no
< 0 || port_no
>= DP_MAX_PORTS
|| port_no
== ODPP_LOCAL
)
451 dp
= get_dp_locked(dp_idx
);
454 goto out_unlock_rtnl
;
456 p
= dp
->ports
[port_no
];
461 err
= dp_detach_port(p
);
464 mutex_unlock(&dp
->mutex
);
471 /* Must be called with rcu_read_lock. */
472 void dp_process_received_packet(struct vport
*p
, struct sk_buff
*skb
)
474 struct datapath
*dp
= p
->dp
;
475 struct dp_stats_percpu
*stats
;
476 int stats_counter_off
;
477 struct sw_flow_actions
*acts
;
478 struct loop_counter
*loop
;
481 OVS_CB(skb
)->vport
= p
;
483 if (!OVS_CB(skb
)->flow
) {
484 struct odp_flow_key key
;
485 struct tbl_node
*flow_node
;
488 /* Extract flow from 'skb' into 'key'. */
489 error
= flow_extract(skb
, p
? p
->port_no
: ODPP_NONE
, &key
, &is_frag
);
490 if (unlikely(error
)) {
495 if (is_frag
&& dp
->drop_frags
) {
497 stats_counter_off
= offsetof(struct dp_stats_percpu
, n_frags
);
502 flow_node
= tbl_lookup(rcu_dereference(dp
->table
), &key
,
503 flow_hash(&key
), flow_cmp
);
504 if (unlikely(!flow_node
)) {
505 dp_output_control(dp
, skb
, _ODPL_MISS_NR
,
506 (__force u64
)OVS_CB(skb
)->tun_id
);
507 stats_counter_off
= offsetof(struct dp_stats_percpu
, n_missed
);
511 OVS_CB(skb
)->flow
= flow_cast(flow_node
);
514 stats_counter_off
= offsetof(struct dp_stats_percpu
, n_hit
);
515 flow_used(OVS_CB(skb
)->flow
, skb
);
517 acts
= rcu_dereference(OVS_CB(skb
)->flow
->sf_acts
);
519 /* Check whether we've looped too much. */
520 loop
= loop_get_counter();
521 if (unlikely(++loop
->count
> MAX_LOOPS
))
522 loop
->looping
= true;
523 if (unlikely(loop
->looping
)) {
524 loop_suppress(dp
, acts
);
529 /* Execute actions. */
530 execute_actions(dp
, skb
, &OVS_CB(skb
)->flow
->key
, acts
->actions
,
533 /* Check whether sub-actions looped too much. */
534 if (unlikely(loop
->looping
))
535 loop_suppress(dp
, acts
);
538 /* Decrement loop counter. */
540 loop
->looping
= false;
544 /* Update datapath statistics. */
546 stats
= per_cpu_ptr(dp
->stats_percpu
, smp_processor_id());
548 write_seqcount_begin(&stats
->seqlock
);
549 (*(u64
*)((u8
*)stats
+ stats_counter_off
))++;
550 write_seqcount_end(&stats
->seqlock
);
555 /* Append each packet in 'skb' list to 'queue'. There will be only one packet
556 * unless we broke up a GSO packet. */
557 static int queue_control_packets(struct sk_buff
*skb
, struct sk_buff_head
*queue
,
558 int queue_no
, u64 arg
)
560 struct sk_buff
*nskb
;
564 if (OVS_CB(skb
)->vport
)
565 port_no
= OVS_CB(skb
)->vport
->port_no
;
567 port_no
= ODPP_LOCAL
;
570 struct odp_msg
*header
;
575 err
= skb_cow(skb
, sizeof *header
);
579 header
= (struct odp_msg
*)__skb_push(skb
, sizeof *header
);
580 header
->type
= queue_no
;
581 header
->length
= skb
->len
;
582 header
->port
= port_no
;
584 skb_queue_tail(queue
, skb
);
592 while ((skb
= nskb
) != NULL
) {
599 int dp_output_control(struct datapath
*dp
, struct sk_buff
*skb
, int queue_no
,
602 struct dp_stats_percpu
*stats
;
603 struct sk_buff_head
*queue
;
606 WARN_ON_ONCE(skb_shared(skb
));
607 BUG_ON(queue_no
!= _ODPL_MISS_NR
&& queue_no
!= _ODPL_ACTION_NR
&& queue_no
!= _ODPL_SFLOW_NR
);
608 queue
= &dp
->queues
[queue_no
];
610 if (skb_queue_len(queue
) >= DP_MAX_QUEUE_LEN
)
613 forward_ip_summed(skb
);
615 err
= vswitch_skb_checksum_setup(skb
);
619 /* Break apart GSO packets into their component pieces. Otherwise
620 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
621 if (skb_is_gso(skb
)) {
622 struct sk_buff
*nskb
= skb_gso_segment(skb
, NETIF_F_SG
| NETIF_F_HW_CSUM
);
632 err
= queue_control_packets(skb
, queue
, queue_no
, arg
);
633 wake_up_interruptible(&dp
->waitqueue
);
640 stats
= per_cpu_ptr(dp
->stats_percpu
, smp_processor_id());
642 write_seqcount_begin(&stats
->seqlock
);
644 write_seqcount_end(&stats
->seqlock
);
651 static int flush_flows(struct datapath
*dp
)
653 struct tbl
*old_table
= get_table_protected(dp
);
654 struct tbl
*new_table
;
656 new_table
= tbl_create(TBL_MIN_BUCKETS
);
660 rcu_assign_pointer(dp
->table
, new_table
);
662 tbl_deferred_destroy(old_table
, flow_free_tbl
);
667 static int validate_actions(const struct nlattr
*actions
, u32 actions_len
)
669 const struct nlattr
*a
;
672 nla_for_each_attr(a
, actions
, actions_len
, rem
) {
673 static const u32 action_lens
[ODPAT_MAX
+ 1] = {
675 [ODPAT_CONTROLLER
] = 8,
676 [ODPAT_SET_DL_TCI
] = 2,
677 [ODPAT_STRIP_VLAN
] = 0,
678 [ODPAT_SET_DL_SRC
] = ETH_ALEN
,
679 [ODPAT_SET_DL_DST
] = ETH_ALEN
,
680 [ODPAT_SET_NW_SRC
] = 4,
681 [ODPAT_SET_NW_DST
] = 4,
682 [ODPAT_SET_NW_TOS
] = 1,
683 [ODPAT_SET_TP_SRC
] = 2,
684 [ODPAT_SET_TP_DST
] = 2,
685 [ODPAT_SET_TUNNEL
] = 8,
686 [ODPAT_SET_PRIORITY
] = 4,
687 [ODPAT_POP_PRIORITY
] = 0,
688 [ODPAT_DROP_SPOOFED_ARP
] = 0,
690 int type
= nla_type(a
);
692 if (type
> ODPAT_MAX
|| nla_len(a
) != action_lens
[type
])
699 case ODPAT_CONTROLLER
:
700 case ODPAT_STRIP_VLAN
:
701 case ODPAT_SET_DL_SRC
:
702 case ODPAT_SET_DL_DST
:
703 case ODPAT_SET_NW_SRC
:
704 case ODPAT_SET_NW_DST
:
705 case ODPAT_SET_TP_SRC
:
706 case ODPAT_SET_TP_DST
:
707 case ODPAT_SET_TUNNEL
:
708 case ODPAT_SET_PRIORITY
:
709 case ODPAT_POP_PRIORITY
:
710 case ODPAT_DROP_SPOOFED_ARP
:
711 /* No validation needed. */
715 if (nla_get_u32(a
) >= DP_MAX_PORTS
)
718 case ODPAT_SET_DL_TCI
:
719 if (nla_get_be16(a
) & htons(VLAN_CFI_MASK
))
723 case ODPAT_SET_NW_TOS
:
724 if (nla_get_u8(a
) & INET_ECN_MASK
)
739 static struct sw_flow_actions
*get_actions(const struct odp_flow
*flow
)
741 struct sw_flow_actions
*actions
;
744 actions
= flow_actions_alloc(flow
->actions_len
);
745 error
= PTR_ERR(actions
);
750 if (copy_from_user(actions
->actions
,
751 (struct nlattr __user __force
*)flow
->actions
,
753 goto error_free_actions
;
754 error
= validate_actions(actions
->actions
, actions
->actions_len
);
756 goto error_free_actions
;
763 return ERR_PTR(error
);
766 static void get_stats(struct sw_flow
*flow
, struct odp_flow_stats
*stats
)
769 struct timespec offset_ts
, used
, now_mono
;
771 ktime_get_ts(&now_mono
);
772 jiffies_to_timespec(jiffies
- flow
->used
, &offset_ts
);
773 set_normalized_timespec(&used
, now_mono
.tv_sec
- offset_ts
.tv_sec
,
774 now_mono
.tv_nsec
- offset_ts
.tv_nsec
);
776 stats
->used_sec
= used
.tv_sec
;
777 stats
->used_nsec
= used
.tv_nsec
;
780 stats
->used_nsec
= 0;
783 stats
->n_packets
= flow
->packet_count
;
784 stats
->n_bytes
= flow
->byte_count
;
786 stats
->tcp_flags
= flow
->tcp_flags
;
790 static void clear_stats(struct sw_flow
*flow
)
794 flow
->packet_count
= 0;
795 flow
->byte_count
= 0;
798 static int expand_table(struct datapath
*dp
)
800 struct tbl
*old_table
= get_table_protected(dp
);
801 struct tbl
*new_table
;
803 new_table
= tbl_expand(old_table
);
804 if (IS_ERR(new_table
))
805 return PTR_ERR(new_table
);
807 rcu_assign_pointer(dp
->table
, new_table
);
808 tbl_deferred_destroy(old_table
, NULL
);
813 static int do_put_flow(struct datapath
*dp
, struct odp_flow_put
*uf
,
814 struct odp_flow_stats
*stats
)
816 struct tbl_node
*flow_node
;
817 struct sw_flow
*flow
;
822 hash
= flow_hash(&uf
->flow
.key
);
823 table
= get_table_protected(dp
);
824 flow_node
= tbl_lookup(table
, &uf
->flow
.key
, hash
, flow_cmp
);
827 struct sw_flow_actions
*acts
;
830 if (!(uf
->flags
& ODPPF_CREATE
))
833 /* Expand table, if necessary, to make room. */
834 if (tbl_count(table
) >= tbl_n_buckets(table
)) {
835 error
= expand_table(dp
);
838 table
= get_table_protected(dp
);
844 error
= PTR_ERR(flow
);
847 flow
->key
= uf
->flow
.key
;
850 /* Obtain actions. */
851 acts
= get_actions(&uf
->flow
);
852 error
= PTR_ERR(acts
);
854 goto error_free_flow
;
855 rcu_assign_pointer(flow
->sf_acts
, acts
);
857 /* Put flow in bucket. */
858 error
= tbl_insert(table
, &flow
->tbl_node
, hash
);
860 goto error_free_flow_acts
;
862 memset(stats
, 0, sizeof(struct odp_flow_stats
));
864 /* We found a matching flow. */
865 struct sw_flow_actions
*old_acts
, *new_acts
;
867 flow
= flow_cast(flow_node
);
869 /* Bail out if we're not allowed to modify an existing flow. */
871 if (!(uf
->flags
& ODPPF_MODIFY
))
875 new_acts
= get_actions(&uf
->flow
);
876 error
= PTR_ERR(new_acts
);
877 if (IS_ERR(new_acts
))
880 old_acts
= rcu_dereference_protected(flow
->sf_acts
,
881 lockdep_is_held(&dp
->mutex
));
882 if (old_acts
->actions_len
!= new_acts
->actions_len
||
883 memcmp(old_acts
->actions
, new_acts
->actions
,
884 old_acts
->actions_len
)) {
885 rcu_assign_pointer(flow
->sf_acts
, new_acts
);
886 flow_deferred_free_acts(old_acts
);
891 /* Fetch stats, then clear them if necessary. */
892 spin_lock_bh(&flow
->lock
);
893 get_stats(flow
, stats
);
894 if (uf
->flags
& ODPPF_ZERO_STATS
)
896 spin_unlock_bh(&flow
->lock
);
901 error_free_flow_acts
:
902 kfree(flow
->sf_acts
);
904 flow
->sf_acts
= NULL
;
910 static int put_flow(struct datapath
*dp
, struct odp_flow_put __user
*ufp
)
912 struct odp_flow_stats stats
;
913 struct odp_flow_put uf
;
916 if (copy_from_user(&uf
, ufp
, sizeof(struct odp_flow_put
)))
919 error
= do_put_flow(dp
, &uf
, &stats
);
923 if (copy_to_user(&ufp
->flow
.stats
, &stats
,
924 sizeof(struct odp_flow_stats
)))
930 static int do_answer_query(struct datapath
*dp
, struct sw_flow
*flow
,
932 struct odp_flow_stats __user
*ustats
,
933 struct nlattr __user
*actions
,
934 u32 __user
*actions_lenp
)
936 struct sw_flow_actions
*sf_acts
;
937 struct odp_flow_stats stats
;
940 spin_lock_bh(&flow
->lock
);
941 get_stats(flow
, &stats
);
942 if (query_flags
& ODPFF_ZERO_TCP_FLAGS
)
945 spin_unlock_bh(&flow
->lock
);
947 if (copy_to_user(ustats
, &stats
, sizeof(struct odp_flow_stats
)) ||
948 get_user(actions_len
, actions_lenp
))
954 sf_acts
= rcu_dereference_protected(flow
->sf_acts
,
955 lockdep_is_held(&dp
->mutex
));
956 if (put_user(sf_acts
->actions_len
, actions_lenp
) ||
957 (actions
&& copy_to_user(actions
, sf_acts
->actions
,
958 min(sf_acts
->actions_len
, actions_len
))))
964 static int answer_query(struct datapath
*dp
, struct sw_flow
*flow
,
965 u32 query_flags
, struct odp_flow __user
*ufp
)
967 struct nlattr __user
*actions
;
969 if (get_user(actions
, (struct nlattr __user
* __user
*)&ufp
->actions
))
972 return do_answer_query(dp
, flow
, query_flags
,
973 &ufp
->stats
, actions
, &ufp
->actions_len
);
976 static struct sw_flow
*do_del_flow(struct datapath
*dp
, struct odp_flow_key
*key
)
978 struct tbl
*table
= get_table_protected(dp
);
979 struct tbl_node
*flow_node
;
982 flow_node
= tbl_lookup(table
, key
, flow_hash(key
), flow_cmp
);
984 return ERR_PTR(-ENOENT
);
986 error
= tbl_remove(table
, flow_node
);
988 return ERR_PTR(error
);
990 /* XXX Returned flow_node's statistics might lose a few packets, since
991 * other CPUs can be using this flow. We used to synchronize_rcu() to
992 * make sure that we get completely accurate stats, but that blows our
993 * performance, badly. */
994 return flow_cast(flow_node
);
997 static int del_flow(struct datapath
*dp
, struct odp_flow __user
*ufp
)
999 struct sw_flow
*flow
;
1003 if (copy_from_user(&uf
, ufp
, sizeof uf
))
1006 flow
= do_del_flow(dp
, &uf
.key
);
1008 return PTR_ERR(flow
);
1010 error
= answer_query(dp
, flow
, 0, ufp
);
1011 flow_deferred_free(flow
);
1015 static int do_query_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1017 struct tbl
*table
= get_table_protected(dp
);
1020 for (i
= 0; i
< flowvec
->n_flows
; i
++) {
1021 struct odp_flow __user
*ufp
= (struct odp_flow __user __force
*)&flowvec
->flows
[i
];
1023 struct tbl_node
*flow_node
;
1026 if (copy_from_user(&uf
, ufp
, sizeof uf
))
1029 flow_node
= tbl_lookup(table
, &uf
.key
, flow_hash(&uf
.key
), flow_cmp
);
1031 error
= put_user(ENOENT
, &ufp
->stats
.error
);
1033 error
= answer_query(dp
, flow_cast(flow_node
), uf
.flags
, ufp
);
1037 return flowvec
->n_flows
;
1040 struct list_flows_cbdata
{
1041 struct datapath
*dp
;
1042 struct odp_flow __user
*uflows
;
1047 static int list_flow(struct tbl_node
*node
, void *cbdata_
)
1049 struct sw_flow
*flow
= flow_cast(node
);
1050 struct list_flows_cbdata
*cbdata
= cbdata_
;
1051 struct odp_flow __user
*ufp
= &cbdata
->uflows
[cbdata
->listed_flows
++];
1054 if (copy_to_user(&ufp
->key
, &flow
->key
, sizeof flow
->key
))
1056 error
= answer_query(cbdata
->dp
, flow
, 0, ufp
);
1060 if (cbdata
->listed_flows
>= cbdata
->n_flows
)
1061 return cbdata
->listed_flows
;
1065 static int do_list_flows(struct datapath
*dp
, const struct odp_flowvec
*flowvec
)
1067 struct list_flows_cbdata cbdata
;
1070 if (!flowvec
->n_flows
)
1074 cbdata
.uflows
= (struct odp_flow __user __force
*)flowvec
->flows
;
1075 cbdata
.n_flows
= flowvec
->n_flows
;
1076 cbdata
.listed_flows
= 0;
1078 error
= tbl_foreach(get_table_protected(dp
), list_flow
, &cbdata
);
1079 return error
? error
: cbdata
.listed_flows
;
1082 static int do_flowvec_ioctl(struct datapath
*dp
, unsigned long argp
,
1083 int (*function
)(struct datapath
*,
1084 const struct odp_flowvec
*))
1086 struct odp_flowvec __user
*uflowvec
;
1087 struct odp_flowvec flowvec
;
1090 uflowvec
= (struct odp_flowvec __user
*)argp
;
1091 if (copy_from_user(&flowvec
, uflowvec
, sizeof flowvec
))
1094 if (flowvec
.n_flows
> INT_MAX
/ sizeof(struct odp_flow
))
1097 retval
= function(dp
, &flowvec
);
1098 return (retval
< 0 ? retval
1099 : retval
== flowvec
.n_flows
? 0
1100 : put_user(retval
, &uflowvec
->n_flows
));
1103 static int do_execute(struct datapath
*dp
, const struct odp_execute
*execute
)
1105 struct odp_flow_key key
;
1106 struct sk_buff
*skb
;
1107 struct sw_flow_actions
*actions
;
1113 if (execute
->length
< ETH_HLEN
|| execute
->length
> 65535)
1116 actions
= flow_actions_alloc(execute
->actions_len
);
1117 if (IS_ERR(actions
)) {
1118 err
= PTR_ERR(actions
);
1123 if (copy_from_user(actions
->actions
,
1124 (struct nlattr __user __force
*)execute
->actions
, execute
->actions_len
))
1125 goto error_free_actions
;
1127 err
= validate_actions(actions
->actions
, execute
->actions_len
);
1129 goto error_free_actions
;
1132 skb
= alloc_skb(execute
->length
, GFP_KERNEL
);
1134 goto error_free_actions
;
1137 if (copy_from_user(skb_put(skb
, execute
->length
),
1138 (const void __user __force
*)execute
->data
,
1140 goto error_free_skb
;
1142 skb_reset_mac_header(skb
);
1145 /* Normally, setting the skb 'protocol' field would be handled by a
1146 * call to eth_type_trans(), but it assumes there's a sending
1147 * device, which we may not have. */
1148 if (ntohs(eth
->h_proto
) >= 1536)
1149 skb
->protocol
= eth
->h_proto
;
1151 skb
->protocol
= htons(ETH_P_802_2
);
1153 err
= flow_extract(skb
, -1, &key
, &is_frag
);
1155 goto error_free_skb
;
1158 err
= execute_actions(dp
, skb
, &key
, actions
->actions
, actions
->actions_len
);
1172 static int execute_packet(struct datapath
*dp
, const struct odp_execute __user
*executep
)
1174 struct odp_execute execute
;
1176 if (copy_from_user(&execute
, executep
, sizeof execute
))
1179 return do_execute(dp
, &execute
);
1182 static int get_dp_stats(struct datapath
*dp
, struct odp_stats __user
*statsp
)
1184 struct tbl
*table
= get_table_protected(dp
);
1185 struct odp_stats stats
;
1188 stats
.n_flows
= tbl_count(table
);
1189 stats
.cur_capacity
= tbl_n_buckets(table
);
1190 stats
.max_capacity
= TBL_MAX_BUCKETS
;
1191 stats
.n_ports
= dp
->n_ports
;
1192 stats
.max_ports
= DP_MAX_PORTS
;
1193 stats
.n_frags
= stats
.n_hit
= stats
.n_missed
= stats
.n_lost
= 0;
1194 for_each_possible_cpu(i
) {
1195 const struct dp_stats_percpu
*percpu_stats
;
1196 struct dp_stats_percpu local_stats
;
1199 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
1202 seqcount
= read_seqcount_begin(&percpu_stats
->seqlock
);
1203 local_stats
= *percpu_stats
;
1204 } while (read_seqcount_retry(&percpu_stats
->seqlock
, seqcount
));
1206 stats
.n_frags
+= local_stats
.n_frags
;
1207 stats
.n_hit
+= local_stats
.n_hit
;
1208 stats
.n_missed
+= local_stats
.n_missed
;
1209 stats
.n_lost
+= local_stats
.n_lost
;
1211 stats
.max_miss_queue
= DP_MAX_QUEUE_LEN
;
1212 stats
.max_action_queue
= DP_MAX_QUEUE_LEN
;
1213 return copy_to_user(statsp
, &stats
, sizeof stats
) ? -EFAULT
: 0;
1216 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
1217 int dp_min_mtu(const struct datapath
*dp
)
1224 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1227 /* Skip any internal ports, since that's what we're trying to
1229 if (is_internal_vport(p
))
1232 dev_mtu
= vport_get_mtu(p
);
1233 if (!mtu
|| dev_mtu
< mtu
)
1237 return mtu
? mtu
: ETH_DATA_LEN
;
1240 /* Sets the MTU of all datapath devices to the minimum of the ports. Must
1241 * be called with RTNL lock. */
1242 void set_internal_devs_mtu(const struct datapath
*dp
)
1249 mtu
= dp_min_mtu(dp
);
1251 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1252 if (is_internal_vport(p
))
1253 vport_set_mtu(p
, mtu
);
1257 static int put_port(const struct vport
*p
, struct odp_port __user
*uop
)
1261 memset(&op
, 0, sizeof op
);
1264 strncpy(op
.devname
, vport_get_name(p
), sizeof op
.devname
);
1265 strncpy(op
.type
, vport_get_type(p
), sizeof op
.type
);
1266 vport_get_config(p
, op
.config
);
1269 op
.port
= p
->port_no
;
1271 return copy_to_user(uop
, &op
, sizeof op
) ? -EFAULT
: 0;
1274 static int query_port(struct datapath
*dp
, struct odp_port __user
*uport
)
1276 struct odp_port port
;
1278 if (copy_from_user(&port
, uport
, sizeof port
))
1281 if (port
.devname
[0]) {
1282 struct vport
*vport
;
1285 port
.devname
[IFNAMSIZ
- 1] = '\0';
1290 vport
= vport_locate(port
.devname
);
1295 if (vport
->dp
!= dp
) {
1300 port
.port
= vport
->port_no
;
1309 if (port
.port
>= DP_MAX_PORTS
)
1311 if (!dp
->ports
[port
.port
])
1315 return put_port(dp
->ports
[port
.port
], uport
);
1318 static int do_list_ports(struct datapath
*dp
, struct odp_port __user
*uports
,
1325 list_for_each_entry_rcu (p
, &dp
->port_list
, node
) {
1326 if (put_port(p
, &uports
[idx
]))
1328 if (idx
++ >= n_ports
)
1335 static int list_ports(struct datapath
*dp
, struct odp_portvec __user
*upv
)
1337 struct odp_portvec pv
;
1340 if (copy_from_user(&pv
, upv
, sizeof pv
))
1343 retval
= do_list_ports(dp
, (struct odp_port __user __force
*)pv
.ports
,
1348 return put_user(retval
, &upv
->n_ports
);
1351 static int get_listen_mask(const struct file
*f
)
1353 return (long)f
->private_data
;
1356 static void set_listen_mask(struct file
*f
, int listen_mask
)
1358 f
->private_data
= (void*)(long)listen_mask
;
1361 static long openvswitch_ioctl(struct file
*f
, unsigned int cmd
,
1364 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1365 struct datapath
*dp
;
1366 int drop_frags
, listeners
, port_no
;
1367 unsigned int sflow_probability
;
1370 /* Handle commands with special locking requirements up front. */
1373 err
= create_dp(dp_idx
, (char __user
*)argp
);
1376 case ODP_DP_DESTROY
:
1377 err
= destroy_dp(dp_idx
);
1380 case ODP_VPORT_ATTACH
:
1381 err
= attach_port(dp_idx
, (struct odp_port __user
*)argp
);
1384 case ODP_VPORT_DETACH
:
1385 err
= get_user(port_no
, (int __user
*)argp
);
1387 err
= detach_port(dp_idx
, port_no
);
1391 err
= vport_user_mod((struct odp_port __user
*)argp
);
1394 case ODP_VPORT_STATS_GET
:
1395 err
= vport_user_stats_get((struct odp_vport_stats_req __user
*)argp
);
1398 case ODP_VPORT_STATS_SET
:
1399 err
= vport_user_stats_set((struct odp_vport_stats_req __user
*)argp
);
1402 case ODP_VPORT_ETHER_GET
:
1403 err
= vport_user_ether_get((struct odp_vport_ether __user
*)argp
);
1406 case ODP_VPORT_ETHER_SET
:
1407 err
= vport_user_ether_set((struct odp_vport_ether __user
*)argp
);
1410 case ODP_VPORT_MTU_GET
:
1411 err
= vport_user_mtu_get((struct odp_vport_mtu __user
*)argp
);
1414 case ODP_VPORT_MTU_SET
:
1415 err
= vport_user_mtu_set((struct odp_vport_mtu __user
*)argp
);
1419 dp
= get_dp_locked(dp_idx
);
1426 err
= get_dp_stats(dp
, (struct odp_stats __user
*)argp
);
1429 case ODP_GET_DROP_FRAGS
:
1430 err
= put_user(dp
->drop_frags
, (int __user
*)argp
);
1433 case ODP_SET_DROP_FRAGS
:
1434 err
= get_user(drop_frags
, (int __user
*)argp
);
1438 if (drop_frags
!= 0 && drop_frags
!= 1)
1440 dp
->drop_frags
= drop_frags
;
1444 case ODP_GET_LISTEN_MASK
:
1445 err
= put_user(get_listen_mask(f
), (int __user
*)argp
);
1448 case ODP_SET_LISTEN_MASK
:
1449 err
= get_user(listeners
, (int __user
*)argp
);
1453 if (listeners
& ~ODPL_ALL
)
1456 set_listen_mask(f
, listeners
);
1459 case ODP_GET_SFLOW_PROBABILITY
:
1460 err
= put_user(dp
->sflow_probability
, (unsigned int __user
*)argp
);
1463 case ODP_SET_SFLOW_PROBABILITY
:
1464 err
= get_user(sflow_probability
, (unsigned int __user
*)argp
);
1466 dp
->sflow_probability
= sflow_probability
;
1469 case ODP_VPORT_QUERY
:
1470 err
= query_port(dp
, (struct odp_port __user
*)argp
);
1473 case ODP_VPORT_LIST
:
1474 err
= list_ports(dp
, (struct odp_portvec __user
*)argp
);
1477 case ODP_FLOW_FLUSH
:
1478 err
= flush_flows(dp
);
1482 err
= put_flow(dp
, (struct odp_flow_put __user
*)argp
);
1486 err
= del_flow(dp
, (struct odp_flow __user
*)argp
);
1490 err
= do_flowvec_ioctl(dp
, argp
, do_query_flows
);
1494 err
= do_flowvec_ioctl(dp
, argp
, do_list_flows
);
1498 err
= execute_packet(dp
, (struct odp_execute __user
*)argp
);
1505 mutex_unlock(&dp
->mutex
);
1510 static int dp_has_packet_of_interest(struct datapath
*dp
, int listeners
)
1513 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1514 if (listeners
& (1 << i
) && !skb_queue_empty(&dp
->queues
[i
]))
1520 #ifdef CONFIG_COMPAT
1521 static int compat_list_ports(struct datapath
*dp
, struct compat_odp_portvec __user
*upv
)
1523 struct compat_odp_portvec pv
;
1526 if (copy_from_user(&pv
, upv
, sizeof pv
))
1529 retval
= do_list_ports(dp
, compat_ptr(pv
.ports
), pv
.n_ports
);
1533 return put_user(retval
, &upv
->n_ports
);
1536 static int compat_get_flow(struct odp_flow
*flow
, const struct compat_odp_flow __user
*compat
)
1538 compat_uptr_t actions
;
1540 if (!access_ok(VERIFY_READ
, compat
, sizeof(struct compat_odp_flow
)) ||
1541 __copy_from_user(&flow
->stats
, &compat
->stats
, sizeof(struct odp_flow_stats
)) ||
1542 __copy_from_user(&flow
->key
, &compat
->key
, sizeof(struct odp_flow_key
)) ||
1543 __get_user(actions
, &compat
->actions
) ||
1544 __get_user(flow
->actions_len
, &compat
->actions_len
) ||
1545 __get_user(flow
->flags
, &compat
->flags
))
1548 flow
->actions
= (struct nlattr __force
*)compat_ptr(actions
);
1552 static int compat_put_flow(struct datapath
*dp
, struct compat_odp_flow_put __user
*ufp
)
1554 struct odp_flow_stats stats
;
1555 struct odp_flow_put fp
;
1558 if (compat_get_flow(&fp
.flow
, &ufp
->flow
) ||
1559 get_user(fp
.flags
, &ufp
->flags
))
1562 error
= do_put_flow(dp
, &fp
, &stats
);
1566 if (copy_to_user(&ufp
->flow
.stats
, &stats
,
1567 sizeof(struct odp_flow_stats
)))
1573 static int compat_answer_query(struct datapath
*dp
, struct sw_flow
*flow
,
1575 struct compat_odp_flow __user
*ufp
)
1577 compat_uptr_t actions
;
1579 if (get_user(actions
, &ufp
->actions
))
1582 return do_answer_query(dp
, flow
, query_flags
, &ufp
->stats
,
1583 compat_ptr(actions
), &ufp
->actions_len
);
1586 static int compat_del_flow(struct datapath
*dp
, struct compat_odp_flow __user
*ufp
)
1588 struct sw_flow
*flow
;
1592 if (compat_get_flow(&uf
, ufp
))
1595 flow
= do_del_flow(dp
, &uf
.key
);
1597 return PTR_ERR(flow
);
1599 error
= compat_answer_query(dp
, flow
, 0, ufp
);
1600 flow_deferred_free(flow
);
1604 static int compat_query_flows(struct datapath
*dp
,
1605 struct compat_odp_flow __user
*flows
,
1608 struct tbl
*table
= get_table_protected(dp
);
1611 for (i
= 0; i
< n_flows
; i
++) {
1612 struct compat_odp_flow __user
*ufp
= &flows
[i
];
1614 struct tbl_node
*flow_node
;
1617 if (compat_get_flow(&uf
, ufp
))
1620 flow_node
= tbl_lookup(table
, &uf
.key
, flow_hash(&uf
.key
), flow_cmp
);
1622 error
= put_user(ENOENT
, &ufp
->stats
.error
);
1624 error
= compat_answer_query(dp
, flow_cast(flow_node
),
1632 struct compat_list_flows_cbdata
{
1633 struct datapath
*dp
;
1634 struct compat_odp_flow __user
*uflows
;
1639 static int compat_list_flow(struct tbl_node
*node
, void *cbdata_
)
1641 struct sw_flow
*flow
= flow_cast(node
);
1642 struct compat_list_flows_cbdata
*cbdata
= cbdata_
;
1643 struct compat_odp_flow __user
*ufp
= &cbdata
->uflows
[cbdata
->listed_flows
++];
1646 if (copy_to_user(&ufp
->key
, &flow
->key
, sizeof flow
->key
))
1648 error
= compat_answer_query(cbdata
->dp
, flow
, 0, ufp
);
1652 if (cbdata
->listed_flows
>= cbdata
->n_flows
)
1653 return cbdata
->listed_flows
;
1657 static int compat_list_flows(struct datapath
*dp
,
1658 struct compat_odp_flow __user
*flows
, u32 n_flows
)
1660 struct compat_list_flows_cbdata cbdata
;
1667 cbdata
.uflows
= flows
;
1668 cbdata
.n_flows
= n_flows
;
1669 cbdata
.listed_flows
= 0;
1671 error
= tbl_foreach(get_table_protected(dp
), compat_list_flow
, &cbdata
);
1672 return error
? error
: cbdata
.listed_flows
;
1675 static int compat_flowvec_ioctl(struct datapath
*dp
, unsigned long argp
,
1676 int (*function
)(struct datapath
*,
1677 struct compat_odp_flow __user
*,
1680 struct compat_odp_flowvec __user
*uflowvec
;
1681 struct compat_odp_flow __user
*flows
;
1682 struct compat_odp_flowvec flowvec
;
1685 uflowvec
= compat_ptr(argp
);
1686 if (!access_ok(VERIFY_WRITE
, uflowvec
, sizeof *uflowvec
) ||
1687 copy_from_user(&flowvec
, uflowvec
, sizeof flowvec
))
1690 if (flowvec
.n_flows
> INT_MAX
/ sizeof(struct compat_odp_flow
))
1693 flows
= compat_ptr(flowvec
.flows
);
1694 if (!access_ok(VERIFY_WRITE
, flows
,
1695 flowvec
.n_flows
* sizeof(struct compat_odp_flow
)))
1698 retval
= function(dp
, flows
, flowvec
.n_flows
);
1699 return (retval
< 0 ? retval
1700 : retval
== flowvec
.n_flows
? 0
1701 : put_user(retval
, &uflowvec
->n_flows
));
1704 static int compat_execute(struct datapath
*dp
, const struct compat_odp_execute __user
*uexecute
)
1706 struct odp_execute execute
;
1707 compat_uptr_t actions
;
1710 if (!access_ok(VERIFY_READ
, uexecute
, sizeof(struct compat_odp_execute
)) ||
1711 __get_user(actions
, &uexecute
->actions
) ||
1712 __get_user(execute
.actions_len
, &uexecute
->actions_len
) ||
1713 __get_user(data
, &uexecute
->data
) ||
1714 __get_user(execute
.length
, &uexecute
->length
))
1717 execute
.actions
= (struct nlattr __force
*)compat_ptr(actions
);
1718 execute
.data
= (const void __force
*)compat_ptr(data
);
1720 return do_execute(dp
, &execute
);
1723 static long openvswitch_compat_ioctl(struct file
*f
, unsigned int cmd
, unsigned long argp
)
1725 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1726 struct datapath
*dp
;
1730 case ODP_DP_DESTROY
:
1731 case ODP_FLOW_FLUSH
:
1732 /* Ioctls that don't need any translation at all. */
1733 return openvswitch_ioctl(f
, cmd
, argp
);
1736 case ODP_VPORT_ATTACH
:
1737 case ODP_VPORT_DETACH
:
1739 case ODP_VPORT_MTU_SET
:
1740 case ODP_VPORT_MTU_GET
:
1741 case ODP_VPORT_ETHER_SET
:
1742 case ODP_VPORT_ETHER_GET
:
1743 case ODP_VPORT_STATS_SET
:
1744 case ODP_VPORT_STATS_GET
:
1746 case ODP_GET_DROP_FRAGS
:
1747 case ODP_SET_DROP_FRAGS
:
1748 case ODP_SET_LISTEN_MASK
:
1749 case ODP_GET_LISTEN_MASK
:
1750 case ODP_SET_SFLOW_PROBABILITY
:
1751 case ODP_GET_SFLOW_PROBABILITY
:
1752 case ODP_VPORT_QUERY
:
1753 /* Ioctls that just need their pointer argument extended. */
1754 return openvswitch_ioctl(f
, cmd
, (unsigned long)compat_ptr(argp
));
1757 dp
= get_dp_locked(dp_idx
);
1763 case ODP_VPORT_LIST32
:
1764 err
= compat_list_ports(dp
, compat_ptr(argp
));
1767 case ODP_FLOW_PUT32
:
1768 err
= compat_put_flow(dp
, compat_ptr(argp
));
1771 case ODP_FLOW_DEL32
:
1772 err
= compat_del_flow(dp
, compat_ptr(argp
));
1775 case ODP_FLOW_GET32
:
1776 err
= compat_flowvec_ioctl(dp
, argp
, compat_query_flows
);
1779 case ODP_FLOW_LIST32
:
1780 err
= compat_flowvec_ioctl(dp
, argp
, compat_list_flows
);
1784 err
= compat_execute(dp
, compat_ptr(argp
));
1791 mutex_unlock(&dp
->mutex
);
1797 /* Unfortunately this function is not exported so this is a verbatim copy
1798 * from net/core/datagram.c in 2.6.30. */
1799 static int skb_copy_and_csum_datagram(const struct sk_buff
*skb
, int offset
,
1800 u8 __user
*to
, int len
,
1803 int start
= skb_headlen(skb
);
1805 int i
, copy
= start
- offset
;
1812 *csump
= csum_and_copy_to_user(skb
->data
+ offset
, to
, copy
,
1816 if ((len
-= copy
) == 0)
1823 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1826 WARN_ON(start
> offset
+ len
);
1828 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
1829 if ((copy
= end
- offset
) > 0) {
1833 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1834 struct page
*page
= frag
->page
;
1839 csum2
= csum_and_copy_to_user(vaddr
+
1846 *csump
= csum_block_add(*csump
, csum2
, pos
);
1856 if (skb_shinfo(skb
)->frag_list
) {
1857 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
1859 for (; list
; list
=list
->next
) {
1862 WARN_ON(start
> offset
+ len
);
1864 end
= start
+ list
->len
;
1865 if ((copy
= end
- offset
) > 0) {
1869 if (skb_copy_and_csum_datagram(list
,
1874 *csump
= csum_block_add(*csump
, csum2
, pos
);
1875 if ((len
-= copy
) == 0)
1891 static ssize_t
openvswitch_read(struct file
*f
, char __user
*buf
,
1892 size_t nbytes
, loff_t
*ppos
)
1894 int listeners
= get_listen_mask(f
);
1895 int dp_idx
= iminor(f
->f_dentry
->d_inode
);
1896 struct datapath
*dp
= get_dp_locked(dp_idx
);
1897 struct sk_buff
*skb
;
1898 size_t copy_bytes
, tot_copy_bytes
;
1904 if (nbytes
== 0 || !listeners
)
1910 for (i
= 0; i
< DP_N_QUEUES
; i
++) {
1911 if (listeners
& (1 << i
)) {
1912 skb
= skb_dequeue(&dp
->queues
[i
]);
1918 if (f
->f_flags
& O_NONBLOCK
) {
1923 wait_event_interruptible(dp
->waitqueue
,
1924 dp_has_packet_of_interest(dp
,
1927 if (signal_pending(current
)) {
1928 retval
= -ERESTARTSYS
;
1933 mutex_unlock(&dp
->mutex
);
1935 copy_bytes
= tot_copy_bytes
= min_t(size_t, skb
->len
, nbytes
);
1938 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1939 if (copy_bytes
== skb
->len
) {
1941 u16 csum_start
, csum_offset
;
1943 get_skb_csum_pointers(skb
, &csum_start
, &csum_offset
);
1944 csum_start
-= skb_headroom(skb
);
1946 BUG_ON(csum_start
>= skb_headlen(skb
));
1947 retval
= skb_copy_and_csum_datagram(skb
, csum_start
, buf
+ csum_start
,
1948 copy_bytes
- csum_start
, &csum
);
1950 __sum16 __user
*csump
;
1952 copy_bytes
= csum_start
;
1953 csump
= (__sum16 __user
*)(buf
+ csum_start
+ csum_offset
);
1955 BUG_ON((char __user
*)csump
+ sizeof(__sum16
) >
1957 put_user(csum_fold(csum
), csump
);
1960 retval
= skb_checksum_help(skb
);
1967 iov
.iov_len
= copy_bytes
;
1968 retval
= skb_copy_datagram_iovec(skb
, 0, &iov
, iov
.iov_len
);
1972 retval
= tot_copy_bytes
;
1978 mutex_unlock(&dp
->mutex
);
1982 static unsigned int openvswitch_poll(struct file
*file
, poll_table
*wait
)
1984 int dp_idx
= iminor(file
->f_dentry
->d_inode
);
1985 struct datapath
*dp
= get_dp_locked(dp_idx
);
1990 poll_wait(file
, &dp
->waitqueue
, wait
);
1991 if (dp_has_packet_of_interest(dp
, get_listen_mask(file
)))
1992 mask
|= POLLIN
| POLLRDNORM
;
1993 mutex_unlock(&dp
->mutex
);
1995 mask
= POLLIN
| POLLRDNORM
| POLLHUP
;
2000 static struct file_operations openvswitch_fops
= {
2001 .read
= openvswitch_read
,
2002 .poll
= openvswitch_poll
,
2003 .unlocked_ioctl
= openvswitch_ioctl
,
2004 #ifdef CONFIG_COMPAT
2005 .compat_ioctl
= openvswitch_compat_ioctl
,
2011 static int __init
dp_init(void)
2013 struct sk_buff
*dummy_skb
;
2016 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > sizeof(dummy_skb
->cb
));
2018 printk("Open vSwitch %s, built "__DATE__
" "__TIME__
"\n", VERSION BUILDNR
);
2026 goto error_flow_exit
;
2028 err
= register_netdevice_notifier(&dp_device_notifier
);
2030 goto error_vport_exit
;
2032 major
= register_chrdev(0, "openvswitch", &openvswitch_fops
);
2034 goto error_unreg_notifier
;
2038 error_unreg_notifier
:
2039 unregister_netdevice_notifier(&dp_device_notifier
);
2048 static void dp_cleanup(void)
2051 unregister_chrdev(major
, "openvswitch");
2052 unregister_netdevice_notifier(&dp_device_notifier
);
2057 module_init(dp_init
);
2058 module_exit(dp_cleanup
);
2060 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2061 MODULE_LICENSE("GPL");