]>
Commit | Line | Data |
---|---|---|
064af421 BP |
1 | /* |
2 | * Distributed under the terms of the GNU GPL version 2. | |
3 | * Copyright (c) 2007, 2008, 2009 Nicira Networks. | |
4 | */ | |
5 | ||
6 | /* Functions for managing the dp interface/device. */ | |
7 | ||
8 | #include <linux/init.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/fs.h> | |
11 | #include <linux/if_arp.h> | |
12 | #include <linux/if_bridge.h> | |
13 | #include <linux/if_vlan.h> | |
14 | #include <linux/in.h> | |
15 | #include <linux/ip.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/time.h> | |
18 | #include <linux/etherdevice.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/kthread.h> | |
21 | #include <linux/llc.h> | |
22 | #include <linux/mutex.h> | |
23 | #include <linux/percpu.h> | |
24 | #include <linux/rcupdate.h> | |
25 | #include <linux/tcp.h> | |
26 | #include <linux/udp.h> | |
27 | #include <linux/version.h> | |
28 | #include <linux/ethtool.h> | |
29 | #include <linux/random.h> | |
30 | #include <linux/wait.h> | |
31 | #include <asm/system.h> | |
32 | #include <asm/div64.h> | |
33 | #include <asm/bug.h> | |
34 | #include <linux/netfilter_bridge.h> | |
35 | #include <linux/netfilter_ipv4.h> | |
36 | #include <linux/inetdevice.h> | |
37 | #include <linux/list.h> | |
38 | #include <linux/rculist.h> | |
39 | #include <linux/workqueue.h> | |
40 | #include <linux/dmi.h> | |
41 | #include <net/llc.h> | |
42 | ||
43 | #include "openvswitch/datapath-protocol.h" | |
44 | #include "datapath.h" | |
45 | #include "actions.h" | |
46 | #include "dp_dev.h" | |
47 | #include "flow.h" | |
48 | ||
49 | #include "compat.h" | |
50 | ||
51 | ||
52 | int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd); | |
53 | EXPORT_SYMBOL(dp_ioctl_hook); | |
54 | ||
55 | int (*dp_add_dp_hook)(struct datapath *dp); | |
56 | EXPORT_SYMBOL(dp_add_dp_hook); | |
57 | ||
58 | int (*dp_del_dp_hook)(struct datapath *dp); | |
59 | EXPORT_SYMBOL(dp_del_dp_hook); | |
60 | ||
61 | int (*dp_add_if_hook)(struct net_bridge_port *p); | |
62 | EXPORT_SYMBOL(dp_add_if_hook); | |
63 | ||
64 | int (*dp_del_if_hook)(struct net_bridge_port *p); | |
65 | EXPORT_SYMBOL(dp_del_if_hook); | |
66 | ||
67 | /* Datapaths. Protected on the read side by rcu_read_lock, on the write side | |
68 | * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex | |
69 | * maintained by the Generic Netlink code, but the timeout path needs mutual | |
70 | * exclusion too. | |
71 | * | |
72 | * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL | |
73 | * lock first. | |
74 | * | |
75 | * It is safe to access the datapath and net_bridge_port structures with just | |
76 | * dp_mutex. | |
77 | */ | |
78 | static struct datapath *dps[ODP_MAX]; | |
79 | static DEFINE_MUTEX(dp_mutex); | |
80 | ||
81 | /* Number of milliseconds between runs of the maintenance thread. */ | |
82 | #define MAINT_SLEEP_MSECS 1000 | |
83 | ||
84 | static int new_nbp(struct datapath *, struct net_device *, int port_no); | |
85 | ||
86 | /* Must be called with rcu_read_lock or dp_mutex. */ | |
87 | struct datapath *get_dp(int dp_idx) | |
88 | { | |
89 | if (dp_idx < 0 || dp_idx >= ODP_MAX) | |
90 | return NULL; | |
91 | return rcu_dereference(dps[dp_idx]); | |
92 | } | |
93 | EXPORT_SYMBOL_GPL(get_dp); | |
94 | ||
95 | struct datapath *get_dp_locked(int dp_idx) | |
96 | { | |
97 | struct datapath *dp; | |
98 | ||
99 | mutex_lock(&dp_mutex); | |
100 | dp = get_dp(dp_idx); | |
101 | if (dp) | |
102 | mutex_lock(&dp->mutex); | |
103 | mutex_unlock(&dp_mutex); | |
104 | return dp; | |
105 | } | |
106 | ||
107 | static inline size_t br_nlmsg_size(void) | |
108 | { | |
109 | return NLMSG_ALIGN(sizeof(struct ifinfomsg)) | |
110 | + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ | |
111 | + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ | |
112 | + nla_total_size(4) /* IFLA_MASTER */ | |
113 | + nla_total_size(4) /* IFLA_MTU */ | |
114 | + nla_total_size(4) /* IFLA_LINK */ | |
115 | + nla_total_size(1); /* IFLA_OPERSTATE */ | |
116 | } | |
117 | ||
118 | static int dp_fill_ifinfo(struct sk_buff *skb, | |
119 | const struct net_bridge_port *port, | |
120 | int event, unsigned int flags) | |
121 | { | |
122 | const struct datapath *dp = port->dp; | |
123 | const struct net_device *dev = port->dev; | |
124 | struct ifinfomsg *hdr; | |
125 | struct nlmsghdr *nlh; | |
126 | ||
127 | nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags); | |
128 | if (nlh == NULL) | |
129 | return -EMSGSIZE; | |
130 | ||
131 | hdr = nlmsg_data(nlh); | |
132 | hdr->ifi_family = AF_BRIDGE; | |
133 | hdr->__ifi_pad = 0; | |
134 | hdr->ifi_type = dev->type; | |
135 | hdr->ifi_index = dev->ifindex; | |
136 | hdr->ifi_flags = dev_get_flags(dev); | |
137 | hdr->ifi_change = 0; | |
138 | ||
139 | NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); | |
140 | NLA_PUT_U32(skb, IFLA_MASTER, dp->ports[ODPP_LOCAL]->dev->ifindex); | |
141 | NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); | |
142 | #ifdef IFLA_OPERSTATE | |
143 | NLA_PUT_U8(skb, IFLA_OPERSTATE, | |
144 | netif_running(dev) ? dev->operstate : IF_OPER_DOWN); | |
145 | #endif | |
146 | ||
147 | if (dev->addr_len) | |
148 | NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); | |
149 | ||
150 | if (dev->ifindex != dev->iflink) | |
151 | NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); | |
152 | ||
153 | return nlmsg_end(skb, nlh); | |
154 | ||
155 | nla_put_failure: | |
156 | nlmsg_cancel(skb, nlh); | |
157 | return -EMSGSIZE; | |
158 | } | |
159 | ||
160 | static void dp_ifinfo_notify(int event, struct net_bridge_port *port) | |
161 | { | |
162 | struct net *net = dev_net(port->dev); | |
163 | struct sk_buff *skb; | |
164 | int err = -ENOBUFS; | |
165 | ||
166 | skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL); | |
167 | if (skb == NULL) | |
168 | goto errout; | |
169 | ||
170 | err = dp_fill_ifinfo(skb, port, event, 0); | |
171 | if (err < 0) { | |
172 | /* -EMSGSIZE implies BUG in br_nlmsg_size() */ | |
173 | WARN_ON(err == -EMSGSIZE); | |
174 | kfree_skb(skb); | |
175 | goto errout; | |
176 | } | |
177 | err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); | |
178 | errout: | |
179 | if (err < 0) | |
180 | rtnl_set_sk_err(net, RTNLGRP_LINK, err); | |
181 | } | |
182 | ||
183 | static int create_dp(int dp_idx, const char __user *devnamep) | |
184 | { | |
185 | struct net_device *dp_dev; | |
186 | char devname[IFNAMSIZ]; | |
187 | struct datapath *dp; | |
188 | int err; | |
189 | int i; | |
190 | ||
191 | if (devnamep) { | |
192 | err = -EFAULT; | |
193 | if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0) | |
194 | goto err; | |
195 | devname[IFNAMSIZ - 1] = '\0'; | |
196 | } else { | |
197 | snprintf(devname, sizeof devname, "of%d", dp_idx); | |
198 | } | |
199 | ||
200 | rtnl_lock(); | |
201 | mutex_lock(&dp_mutex); | |
202 | err = -ENODEV; | |
203 | if (!try_module_get(THIS_MODULE)) | |
204 | goto err_unlock; | |
205 | ||
206 | /* Exit early if a datapath with that number already exists. | |
207 | * (We don't use -EEXIST because that's ambiguous with 'devname' | |
208 | * conflicting with an existing network device name.) */ | |
209 | err = -EBUSY; | |
210 | if (get_dp(dp_idx)) | |
211 | goto err_put_module; | |
212 | ||
213 | err = -ENOMEM; | |
214 | dp = kzalloc(sizeof *dp, GFP_KERNEL); | |
215 | if (dp == NULL) | |
216 | goto err_put_module; | |
217 | ||
218 | mutex_init(&dp->mutex); | |
219 | dp->dp_idx = dp_idx; | |
220 | for (i = 0; i < DP_N_QUEUES; i++) | |
221 | skb_queue_head_init(&dp->queues[i]); | |
222 | init_waitqueue_head(&dp->waitqueue); | |
223 | ||
224 | /* Setup our datapath device */ | |
225 | dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL); | |
226 | err = PTR_ERR(dp_dev); | |
227 | if (IS_ERR(dp_dev)) | |
228 | goto err_free_dp; | |
229 | ||
230 | err = -ENOMEM; | |
231 | rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE)); | |
232 | if (!dp->table) | |
233 | goto err_destroy_dp_dev; | |
234 | INIT_LIST_HEAD(&dp->port_list); | |
235 | ||
236 | err = new_nbp(dp, dp_dev, ODPP_LOCAL); | |
237 | if (err) | |
238 | goto err_destroy_table; | |
239 | ||
240 | dp->drop_frags = 0; | |
241 | dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); | |
242 | if (!dp->stats_percpu) | |
243 | goto err_destroy_local_port; | |
244 | ||
245 | rcu_assign_pointer(dps[dp_idx], dp); | |
246 | mutex_unlock(&dp_mutex); | |
247 | rtnl_unlock(); | |
248 | ||
249 | if (dp_add_dp_hook) | |
250 | dp_add_dp_hook(dp); | |
251 | ||
252 | return 0; | |
253 | ||
254 | err_destroy_local_port: | |
255 | dp_del_port(dp->ports[ODPP_LOCAL], NULL); | |
256 | err_destroy_table: | |
257 | dp_table_destroy(dp->table, 0); | |
258 | err_destroy_dp_dev: | |
259 | dp_dev_destroy(dp_dev); | |
260 | err_free_dp: | |
261 | kfree(dp); | |
262 | err_put_module: | |
263 | module_put(THIS_MODULE); | |
264 | err_unlock: | |
265 | mutex_unlock(&dp_mutex); | |
266 | rtnl_unlock(); | |
267 | err: | |
268 | return err; | |
269 | } | |
270 | ||
271 | static void do_destroy_dp(struct datapath *dp, struct list_head *dp_devs) | |
272 | { | |
273 | struct net_bridge_port *p, *n; | |
274 | int i; | |
275 | ||
276 | if (dp_del_dp_hook) | |
277 | dp_del_dp_hook(dp); | |
278 | ||
279 | /* Drop references to DP. */ | |
280 | list_for_each_entry_safe (p, n, &dp->port_list, node) | |
281 | dp_del_port(p, dp_devs); | |
282 | ||
283 | rcu_assign_pointer(dps[dp->dp_idx], NULL); | |
284 | synchronize_rcu(); | |
285 | ||
286 | /* Wait until no longer in use, then destroy it. */ | |
287 | synchronize_rcu(); | |
288 | dp_table_destroy(dp->table, 1); | |
289 | for (i = 0; i < DP_N_QUEUES; i++) | |
290 | skb_queue_purge(&dp->queues[i]); | |
291 | for (i = 0; i < DP_MAX_GROUPS; i++) | |
292 | kfree(dp->groups[i]); | |
293 | free_percpu(dp->stats_percpu); | |
294 | kfree(dp); | |
295 | module_put(THIS_MODULE); | |
296 | } | |
297 | ||
298 | static int destroy_dp(int dp_idx) | |
299 | { | |
300 | struct dp_dev *dp_dev, *next; | |
301 | struct datapath *dp; | |
302 | LIST_HEAD(dp_devs); | |
303 | int err; | |
304 | ||
305 | rtnl_lock(); | |
306 | mutex_lock(&dp_mutex); | |
307 | dp = get_dp(dp_idx); | |
308 | err = -ENODEV; | |
309 | if (!dp) | |
310 | goto err_unlock; | |
311 | ||
312 | do_destroy_dp(dp, &dp_devs); | |
313 | err = 0; | |
314 | ||
315 | err_unlock: | |
316 | mutex_unlock(&dp_mutex); | |
317 | rtnl_unlock(); | |
318 | list_for_each_entry_safe (dp_dev, next, &dp_devs, list) | |
319 | free_netdev(dp_dev->dev); | |
320 | return err; | |
321 | } | |
322 | ||
323 | /* Called with RTNL lock and dp_mutex. */ | |
324 | static int new_nbp(struct datapath *dp, struct net_device *dev, int port_no) | |
325 | { | |
326 | struct net_bridge_port *p; | |
327 | ||
328 | if (dev->br_port != NULL) | |
329 | return -EBUSY; | |
330 | ||
331 | p = kzalloc(sizeof(*p), GFP_KERNEL); | |
332 | if (!p) | |
333 | return -ENOMEM; | |
334 | ||
335 | dev_set_promiscuity(dev, 1); | |
336 | dev_hold(dev); | |
337 | p->port_no = port_no; | |
338 | p->dp = dp; | |
339 | p->dev = dev; | |
340 | if (!is_dp_dev(dev)) | |
341 | rcu_assign_pointer(dev->br_port, p); | |
342 | else { | |
343 | /* It would make sense to assign dev->br_port here too, but | |
344 | * that causes packets received on internal ports to get caught | |
345 | * in dp_frame_hook(). In turn dp_frame_hook() can reject them | |
346 | * back to network stack, but that's a waste of time. */ | |
347 | } | |
348 | rcu_assign_pointer(dp->ports[port_no], p); | |
349 | list_add_rcu(&p->node, &dp->port_list); | |
350 | dp->n_ports++; | |
351 | ||
352 | dp_ifinfo_notify(RTM_NEWLINK, p); | |
353 | ||
354 | return 0; | |
355 | } | |
356 | ||
357 | static int add_port(int dp_idx, struct odp_port __user *portp) | |
358 | { | |
359 | struct net_device *dev; | |
360 | struct datapath *dp; | |
361 | struct odp_port port; | |
362 | int port_no; | |
363 | int err; | |
364 | ||
365 | err = -EFAULT; | |
366 | if (copy_from_user(&port, portp, sizeof port)) | |
367 | goto out; | |
368 | port.devname[IFNAMSIZ - 1] = '\0'; | |
369 | port_no = port.port; | |
370 | ||
371 | err = -EINVAL; | |
372 | if (port_no < 0 || port_no >= DP_MAX_PORTS) | |
373 | goto out; | |
374 | ||
375 | rtnl_lock(); | |
376 | dp = get_dp_locked(dp_idx); | |
377 | err = -ENODEV; | |
378 | if (!dp) | |
379 | goto out_unlock_rtnl; | |
380 | ||
381 | err = -EEXIST; | |
382 | if (dp->ports[port_no]) | |
383 | goto out_unlock_dp; | |
384 | ||
385 | if (!(port.flags & ODP_PORT_INTERNAL)) { | |
386 | err = -ENODEV; | |
387 | dev = dev_get_by_name(&init_net, port.devname); | |
388 | if (!dev) | |
389 | goto out_unlock_dp; | |
390 | ||
391 | err = -EINVAL; | |
392 | if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER || | |
393 | is_dp_dev(dev)) | |
394 | goto out_put; | |
395 | } else { | |
396 | dev = dp_dev_create(dp, port.devname, port_no); | |
397 | err = PTR_ERR(dev); | |
398 | if (IS_ERR(dev)) | |
399 | goto out_unlock_dp; | |
400 | dev_hold(dev); | |
401 | } | |
402 | ||
403 | err = new_nbp(dp, dev, port_no); | |
404 | if (err) | |
405 | goto out_put; | |
406 | ||
407 | if (dp_add_if_hook) | |
408 | dp_add_if_hook(dp->ports[port_no]); | |
409 | ||
410 | out_put: | |
411 | dev_put(dev); | |
412 | out_unlock_dp: | |
413 | mutex_unlock(&dp->mutex); | |
414 | out_unlock_rtnl: | |
415 | rtnl_unlock(); | |
416 | out: | |
417 | return err; | |
418 | } | |
419 | ||
420 | int dp_del_port(struct net_bridge_port *p, struct list_head *dp_devs) | |
421 | { | |
422 | ASSERT_RTNL(); | |
423 | ||
424 | #ifdef SUPPORT_SYSFS | |
425 | if (p->port_no != ODPP_LOCAL && dp_del_if_hook) | |
426 | sysfs_remove_link(&p->dp->ifobj, p->dev->name); | |
427 | #endif | |
428 | dp_ifinfo_notify(RTM_DELLINK, p); | |
429 | ||
430 | p->dp->n_ports--; | |
431 | ||
432 | if (is_dp_dev(p->dev)) { | |
433 | /* Make sure that no packets arrive from now on, since | |
434 | * dp_dev_xmit() will try to find itself through | |
435 | * p->dp->ports[], and we're about to set that to null. */ | |
436 | netif_tx_disable(p->dev); | |
437 | } | |
438 | ||
439 | /* First drop references to device. */ | |
440 | dev_set_promiscuity(p->dev, -1); | |
441 | list_del_rcu(&p->node); | |
442 | rcu_assign_pointer(p->dp->ports[p->port_no], NULL); | |
443 | rcu_assign_pointer(p->dev->br_port, NULL); | |
444 | ||
445 | /* Then wait until no one is still using it, and destroy it. */ | |
446 | synchronize_rcu(); | |
447 | ||
448 | if (is_dp_dev(p->dev)) { | |
449 | dp_dev_destroy(p->dev); | |
450 | if (dp_devs) { | |
451 | struct dp_dev *dp_dev = dp_dev_priv(p->dev); | |
452 | list_add(&dp_dev->list, dp_devs); | |
453 | } | |
454 | } | |
455 | if (p->port_no != ODPP_LOCAL && dp_del_if_hook) { | |
456 | dp_del_if_hook(p); | |
457 | } else { | |
458 | dev_put(p->dev); | |
459 | kfree(p); | |
460 | } | |
461 | ||
462 | return 0; | |
463 | } | |
464 | ||
465 | static int del_port(int dp_idx, int port_no) | |
466 | { | |
467 | struct dp_dev *dp_dev, *next; | |
468 | struct net_bridge_port *p; | |
469 | struct datapath *dp; | |
470 | LIST_HEAD(dp_devs); | |
471 | int err; | |
472 | ||
473 | err = -EINVAL; | |
474 | if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL) | |
475 | goto out; | |
476 | ||
477 | rtnl_lock(); | |
478 | dp = get_dp_locked(dp_idx); | |
479 | err = -ENODEV; | |
480 | if (!dp) | |
481 | goto out_unlock_rtnl; | |
482 | ||
483 | p = dp->ports[port_no]; | |
484 | err = -ENOENT; | |
485 | if (!p) | |
486 | goto out_unlock_dp; | |
487 | ||
488 | err = dp_del_port(p, &dp_devs); | |
489 | ||
490 | out_unlock_dp: | |
491 | mutex_unlock(&dp->mutex); | |
492 | out_unlock_rtnl: | |
493 | rtnl_unlock(); | |
494 | out: | |
495 | list_for_each_entry_safe (dp_dev, next, &dp_devs, list) | |
496 | free_netdev(dp_dev->dev); | |
497 | return err; | |
498 | } | |
499 | ||
500 | /* Must be called with rcu_read_lock. */ | |
501 | static void | |
502 | do_port_input(struct net_bridge_port *p, struct sk_buff *skb) | |
503 | { | |
504 | /* Make our own copy of the packet. Otherwise we will mangle the | |
505 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). | |
506 | * (No one comes after us, since we tell handle_bridge() that we took | |
507 | * the packet.) */ | |
508 | skb = skb_share_check(skb, GFP_ATOMIC); | |
509 | if (!skb) | |
510 | return; | |
511 | ||
512 | /* Push the Ethernet header back on. */ | |
513 | skb_push(skb, ETH_HLEN); | |
514 | skb_reset_mac_header(skb); | |
515 | dp_process_received_packet(skb, p); | |
516 | } | |
517 | ||
518 | /* Must be called with rcu_read_lock and with bottom-halves disabled. */ | |
519 | void dp_process_received_packet(struct sk_buff *skb, struct net_bridge_port *p) | |
520 | { | |
521 | struct datapath *dp = p->dp; | |
522 | struct dp_stats_percpu *stats; | |
523 | struct odp_flow_key key; | |
524 | struct sw_flow *flow; | |
525 | ||
526 | WARN_ON_ONCE(skb_shared(skb)); | |
527 | WARN_ON_ONCE(skb->destructor); | |
528 | ||
529 | /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */ | |
530 | stats = percpu_ptr(dp->stats_percpu, smp_processor_id()); | |
531 | ||
532 | if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) { | |
533 | if (dp->drop_frags) { | |
534 | kfree_skb(skb); | |
535 | stats->n_frags++; | |
536 | return; | |
537 | } | |
538 | } | |
539 | ||
540 | flow = dp_table_lookup(rcu_dereference(dp->table), &key); | |
541 | if (flow) { | |
542 | struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts); | |
543 | flow_used(flow, skb); | |
544 | execute_actions(dp, skb, &key, acts->actions, acts->n_actions, | |
545 | GFP_ATOMIC); | |
546 | stats->n_hit++; | |
547 | } else { | |
548 | stats->n_missed++; | |
549 | dp_output_control(dp, skb, _ODPL_MISS_NR, 0); | |
550 | } | |
551 | } | |
552 | ||
553 | /* | |
554 | * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on | |
555 | * different set of devices!) | |
556 | */ | |
557 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) | |
558 | /* Called with rcu_read_lock and bottom-halves disabled. */ | |
559 | static struct sk_buff *dp_frame_hook(struct net_bridge_port *p, | |
560 | struct sk_buff *skb) | |
561 | { | |
562 | do_port_input(p, skb); | |
563 | return NULL; | |
564 | } | |
565 | #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) | |
566 | /* Called with rcu_read_lock and bottom-halves disabled. */ | |
567 | static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb) | |
568 | { | |
569 | do_port_input(p, *pskb); | |
570 | return 1; | |
571 | } | |
572 | #else | |
573 | #error | |
574 | #endif | |
575 | ||
576 | #ifdef CONFIG_XEN | |
577 | /* This code is copied verbatim from net/dev/core.c in Xen's | |
578 | * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions | |
579 | * directly because they aren't exported. */ | |
580 | static int skb_pull_up_to(struct sk_buff *skb, void *ptr) | |
581 | { | |
582 | if (ptr < (void *)skb->tail) | |
583 | return 1; | |
584 | if (__pskb_pull_tail(skb, | |
585 | ptr - (void *)skb->data - skb_headlen(skb))) { | |
586 | return 1; | |
587 | } else { | |
588 | return 0; | |
589 | } | |
590 | } | |
591 | ||
592 | int skb_checksum_setup(struct sk_buff *skb) | |
593 | { | |
594 | if (skb->proto_csum_blank) { | |
595 | if (skb->protocol != htons(ETH_P_IP)) | |
596 | goto out; | |
597 | if (!skb_pull_up_to(skb, skb->nh.iph + 1)) | |
598 | goto out; | |
599 | skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl; | |
600 | switch (skb->nh.iph->protocol) { | |
601 | case IPPROTO_TCP: | |
602 | skb->csum = offsetof(struct tcphdr, check); | |
603 | break; | |
604 | case IPPROTO_UDP: | |
605 | skb->csum = offsetof(struct udphdr, check); | |
606 | break; | |
607 | default: | |
608 | if (net_ratelimit()) | |
609 | printk(KERN_ERR "Attempting to checksum a non-" | |
610 | "TCP/UDP packet, dropping a protocol" | |
611 | " %d packet", skb->nh.iph->protocol); | |
612 | goto out; | |
613 | } | |
614 | if (!skb_pull_up_to(skb, skb->h.raw + skb->csum + 2)) | |
615 | goto out; | |
616 | skb->ip_summed = CHECKSUM_HW; | |
617 | skb->proto_csum_blank = 0; | |
618 | } | |
619 | return 0; | |
620 | out: | |
621 | return -EPROTO; | |
622 | } | |
623 | #endif | |
624 | ||
625 | int | |
626 | dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no, | |
627 | u32 arg) | |
628 | { | |
629 | struct dp_stats_percpu *stats; | |
630 | struct sk_buff_head *queue; | |
631 | int port_no; | |
632 | int err; | |
633 | ||
634 | WARN_ON_ONCE(skb_shared(skb)); | |
635 | BUG_ON(queue_no != _ODPL_MISS_NR && queue_no != _ODPL_ACTION_NR); | |
636 | ||
637 | queue = &dp->queues[queue_no]; | |
638 | err = -ENOBUFS; | |
639 | if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN) | |
640 | goto err_kfree_skb; | |
641 | ||
642 | /* If a checksum-deferred packet is forwarded to the controller, | |
643 | * correct the pointers and checksum. This happens on a regular basis | |
644 | * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets | |
645 | * that do not have their checksum computed. We also implement it for | |
646 | * the non-Xen case, but it is difficult to trigger or test this case | |
647 | * there, hence the WARN_ON_ONCE(). | |
648 | */ | |
649 | err = skb_checksum_setup(skb); | |
650 | if (err) | |
651 | goto err_kfree_skb; | |
652 | #ifndef CHECKSUM_HW | |
653 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
654 | WARN_ON_ONCE(1); | |
655 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) | |
656 | /* Until 2.6.22, the start of the transport header was also the | |
657 | * start of data to be checksummed. Linux 2.6.22 introduced | |
658 | * the csum_start field for this purpose, but we should point | |
659 | * the transport header to it anyway for backward | |
660 | * compatibility, as dev_queue_xmit() does even in 2.6.28. */ | |
661 | skb_set_transport_header(skb, skb->csum_start - | |
662 | skb_headroom(skb)); | |
663 | #endif | |
664 | err = skb_checksum_help(skb); | |
665 | if (err) | |
666 | goto err_kfree_skb; | |
667 | } | |
668 | #else | |
669 | if (skb->ip_summed == CHECKSUM_HW) { | |
670 | err = skb_checksum_help(skb, 0); | |
671 | if (err) | |
672 | goto err_kfree_skb; | |
673 | } | |
674 | #endif | |
675 | ||
676 | /* Break apart GSO packets into their component pieces. Otherwise | |
677 | * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */ | |
678 | if (skb_is_gso(skb)) { | |
679 | struct sk_buff *nskb = skb_gso_segment(skb, 0); | |
680 | if (nskb) { | |
681 | kfree_skb(skb); | |
682 | skb = nskb; | |
683 | if (unlikely(IS_ERR(skb))) { | |
684 | err = PTR_ERR(skb); | |
685 | goto err; | |
686 | } | |
687 | } else { | |
688 | /* XXX This case might not be possible. It's hard to | |
689 | * tell from the skb_gso_segment() code and comment. */ | |
690 | } | |
691 | } | |
692 | ||
693 | /* Figure out port number. */ | |
694 | port_no = ODPP_LOCAL; | |
695 | if (skb->dev) { | |
696 | if (skb->dev->br_port) | |
697 | port_no = skb->dev->br_port->port_no; | |
698 | else if (is_dp_dev(skb->dev)) | |
699 | port_no = dp_dev_priv(skb->dev)->port_no; | |
700 | } | |
701 | ||
702 | /* Append each packet to queue. There will be only one packet unless | |
703 | * we broke up a GSO packet above. */ | |
704 | do { | |
705 | struct odp_msg *header; | |
706 | struct sk_buff *nskb = skb->next; | |
707 | skb->next = NULL; | |
708 | ||
709 | err = skb_cow(skb, sizeof *header); | |
710 | if (err) { | |
711 | while (nskb) { | |
712 | kfree_skb(skb); | |
713 | skb = nskb; | |
714 | nskb = skb->next; | |
715 | } | |
716 | goto err_kfree_skb; | |
717 | } | |
718 | ||
719 | header = (struct odp_msg*)__skb_push(skb, sizeof *header); | |
720 | header->type = queue_no; | |
721 | header->length = skb->len; | |
722 | header->port = port_no; | |
723 | header->reserved = 0; | |
724 | header->arg = arg; | |
725 | skb_queue_tail(queue, skb); | |
726 | ||
727 | skb = nskb; | |
728 | } while (skb); | |
729 | ||
730 | wake_up_interruptible(&dp->waitqueue); | |
731 | return 0; | |
732 | ||
733 | err_kfree_skb: | |
734 | kfree_skb(skb); | |
735 | err: | |
736 | stats = percpu_ptr(dp->stats_percpu, get_cpu()); | |
737 | stats->n_lost++; | |
738 | put_cpu(); | |
739 | ||
740 | return err; | |
741 | } | |
742 | ||
743 | static int flush_flows(struct datapath *dp) | |
744 | { | |
745 | dp->n_flows = 0; | |
746 | return dp_table_flush(dp); | |
747 | } | |
748 | ||
749 | static int validate_actions(const struct sw_flow_actions *actions) | |
750 | { | |
751 | unsigned int i; | |
752 | ||
753 | for (i = 0; i < actions->n_actions; i++) { | |
754 | const union odp_action *a = &actions->actions[i]; | |
755 | switch (a->type) { | |
756 | case ODPAT_OUTPUT: | |
757 | if (a->output.port >= DP_MAX_PORTS) | |
758 | return -EINVAL; | |
759 | break; | |
760 | ||
761 | case ODPAT_OUTPUT_GROUP: | |
762 | if (a->output_group.group >= DP_MAX_GROUPS) | |
763 | return -EINVAL; | |
764 | break; | |
765 | ||
766 | case ODPAT_SET_VLAN_VID: | |
767 | if (a->vlan_vid.vlan_vid & htons(~VLAN_VID_MASK)) | |
768 | return -EINVAL; | |
769 | break; | |
770 | ||
771 | case ODPAT_SET_VLAN_PCP: | |
772 | if (a->vlan_pcp.vlan_pcp & ~VLAN_PCP_MASK) | |
773 | return -EINVAL; | |
774 | break; | |
775 | ||
776 | default: | |
777 | if (a->type >= ODPAT_N_ACTIONS) | |
778 | return -EOPNOTSUPP; | |
779 | break; | |
780 | } | |
781 | } | |
782 | ||
783 | return 0; | |
784 | } | |
785 | ||
786 | static struct sw_flow_actions *get_actions(const struct odp_flow *flow) | |
787 | { | |
788 | struct sw_flow_actions *actions; | |
789 | int error; | |
790 | ||
791 | actions = flow_actions_alloc(flow->n_actions); | |
792 | error = PTR_ERR(actions); | |
793 | if (IS_ERR(actions)) | |
794 | goto error; | |
795 | ||
796 | error = -EFAULT; | |
797 | if (copy_from_user(actions->actions, flow->actions, | |
798 | flow->n_actions * sizeof(union odp_action))) | |
799 | goto error_free_actions; | |
800 | error = validate_actions(actions); | |
801 | if (error) | |
802 | goto error_free_actions; | |
803 | ||
804 | return actions; | |
805 | ||
806 | error_free_actions: | |
807 | kfree(actions); | |
808 | error: | |
809 | return ERR_PTR(error); | |
810 | } | |
811 | ||
812 | static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats) | |
813 | { | |
814 | if (flow->used.tv_sec) { | |
815 | stats->used_sec = flow->used.tv_sec; | |
816 | stats->used_nsec = flow->used.tv_nsec; | |
817 | } else { | |
818 | stats->used_sec = 0; | |
819 | stats->used_nsec = 0; | |
820 | } | |
821 | stats->n_packets = flow->packet_count; | |
822 | stats->n_bytes = flow->byte_count; | |
823 | stats->ip_tos = flow->ip_tos; | |
824 | stats->tcp_flags = flow->tcp_flags; | |
825 | } | |
826 | ||
827 | static void clear_stats(struct sw_flow *flow) | |
828 | { | |
829 | flow->used.tv_sec = flow->used.tv_nsec = 0; | |
830 | flow->tcp_flags = 0; | |
831 | flow->ip_tos = 0; | |
832 | flow->packet_count = 0; | |
833 | flow->byte_count = 0; | |
834 | } | |
835 | ||
836 | static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp) | |
837 | { | |
838 | struct odp_flow_put uf; | |
839 | struct sw_flow *flow, **bucket; | |
840 | struct dp_table *table; | |
841 | struct odp_flow_stats stats; | |
842 | int error; | |
843 | ||
844 | error = -EFAULT; | |
845 | if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put))) | |
846 | goto error; | |
847 | uf.flow.key.reserved = 0; | |
848 | ||
849 | retry: | |
850 | table = rcu_dereference(dp->table); | |
851 | bucket = dp_table_lookup_for_insert(table, &uf.flow.key); | |
852 | if (!bucket) { | |
853 | /* No such flow, and the slots where it could go are full. */ | |
854 | error = uf.flags & ODPPF_CREATE ? -EXFULL : -ENOENT; | |
855 | goto error; | |
856 | } else if (!*bucket) { | |
857 | /* No such flow, but we found an available slot for it. */ | |
858 | struct sw_flow_actions *acts; | |
859 | ||
860 | error = -ENOENT; | |
861 | if (!(uf.flags & ODPPF_CREATE)) | |
862 | goto error; | |
863 | ||
864 | /* Expand table, if necessary, to make room. */ | |
865 | if (dp->n_flows * 4 >= table->n_buckets && | |
866 | table->n_buckets < DP_MAX_BUCKETS) { | |
867 | error = dp_table_expand(dp); | |
868 | if (error) | |
869 | goto error; | |
870 | ||
871 | /* The bucket's location has changed. Try again. */ | |
872 | goto retry; | |
873 | } | |
874 | ||
875 | /* Allocate flow. */ | |
876 | error = -ENOMEM; | |
877 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); | |
878 | if (flow == NULL) | |
879 | goto error; | |
880 | flow->key = uf.flow.key; | |
881 | spin_lock_init(&flow->lock); | |
882 | clear_stats(flow); | |
883 | ||
884 | /* Obtain actions. */ | |
885 | acts = get_actions(&uf.flow); | |
886 | error = PTR_ERR(acts); | |
887 | if (IS_ERR(acts)) | |
888 | goto error_free_flow; | |
889 | rcu_assign_pointer(flow->sf_acts, acts); | |
890 | ||
891 | /* Put flow in bucket. */ | |
892 | rcu_assign_pointer(*bucket, flow); | |
893 | dp->n_flows++; | |
894 | memset(&stats, 0, sizeof(struct odp_flow_stats)); | |
895 | } else { | |
896 | /* We found a matching flow. */ | |
897 | struct sw_flow *flow = *rcu_dereference(bucket); | |
898 | struct sw_flow_actions *old_acts, *new_acts; | |
899 | unsigned long int flags; | |
900 | ||
901 | /* Bail out if we're not allowed to modify an existing flow. */ | |
902 | error = -EEXIST; | |
903 | if (!(uf.flags & ODPPF_MODIFY)) | |
904 | goto error; | |
905 | ||
906 | /* Swap actions. */ | |
907 | new_acts = get_actions(&uf.flow); | |
908 | error = PTR_ERR(new_acts); | |
909 | if (IS_ERR(new_acts)) | |
910 | goto error; | |
911 | old_acts = rcu_dereference(flow->sf_acts); | |
912 | if (old_acts->n_actions != new_acts->n_actions || | |
913 | memcmp(old_acts->actions, new_acts->actions, | |
914 | sizeof(union odp_action) * old_acts->n_actions)) { | |
915 | rcu_assign_pointer(flow->sf_acts, new_acts); | |
916 | flow_deferred_free_acts(old_acts); | |
917 | } else { | |
918 | kfree(new_acts); | |
919 | } | |
920 | ||
921 | /* Fetch stats, then clear them if necessary. */ | |
922 | spin_lock_irqsave(&flow->lock, flags); | |
923 | get_stats(flow, &stats); | |
924 | if (uf.flags & ODPPF_ZERO_STATS) | |
925 | clear_stats(flow); | |
926 | spin_unlock_irqrestore(&flow->lock, flags); | |
927 | } | |
928 | ||
929 | /* Copy stats to userspace. */ | |
930 | if (__copy_to_user(&ufp->flow.stats, &stats, | |
931 | sizeof(struct odp_flow_stats))) | |
932 | return -EFAULT; | |
933 | return 0; | |
934 | ||
935 | error_free_flow: | |
936 | kmem_cache_free(flow_cache, flow); | |
937 | error: | |
938 | return error; | |
939 | } | |
940 | ||
941 | static int put_actions(const struct sw_flow *flow, struct odp_flow __user *ufp) | |
942 | { | |
943 | union odp_action __user *actions; | |
944 | struct sw_flow_actions *sf_acts; | |
945 | u32 n_actions; | |
946 | ||
947 | if (__get_user(actions, &ufp->actions) || | |
948 | __get_user(n_actions, &ufp->n_actions)) | |
949 | return -EFAULT; | |
950 | ||
951 | if (!n_actions) | |
952 | return 0; | |
953 | if (ufp->n_actions > INT_MAX / sizeof(union odp_action)) | |
954 | return -EINVAL; | |
955 | ||
956 | sf_acts = rcu_dereference(flow->sf_acts); | |
957 | if (__put_user(sf_acts->n_actions, &ufp->n_actions) || | |
958 | (actions && copy_to_user(actions, sf_acts->actions, | |
959 | sizeof(union odp_action) * | |
960 | min(sf_acts->n_actions, n_actions)))) | |
961 | return -EFAULT; | |
962 | ||
963 | return 0; | |
964 | } | |
965 | ||
966 | static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp) | |
967 | { | |
968 | struct odp_flow_stats stats; | |
969 | unsigned long int flags; | |
970 | ||
971 | spin_lock_irqsave(&flow->lock, flags); | |
972 | get_stats(flow, &stats); | |
973 | spin_unlock_irqrestore(&flow->lock, flags); | |
974 | ||
975 | if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats))) | |
976 | return -EFAULT; | |
977 | return put_actions(flow, ufp); | |
978 | } | |
979 | ||
980 | static int del_or_query_flow(struct datapath *dp, | |
981 | struct odp_flow __user *ufp, | |
982 | unsigned int cmd) | |
983 | { | |
984 | struct dp_table *table = rcu_dereference(dp->table); | |
985 | struct odp_flow uf; | |
986 | struct sw_flow *flow; | |
987 | int error; | |
988 | ||
989 | error = -EFAULT; | |
990 | if (copy_from_user(&uf, ufp, sizeof uf)) | |
991 | goto error; | |
992 | uf.key.reserved = 0; | |
993 | ||
994 | flow = dp_table_lookup(table, &uf.key); | |
995 | error = -ENOENT; | |
996 | if (!flow) | |
997 | goto error; | |
998 | ||
999 | if (cmd == ODP_FLOW_DEL) { | |
1000 | /* XXX redundant lookup */ | |
1001 | error = dp_table_delete(table, flow); | |
1002 | if (error) | |
1003 | goto error; | |
1004 | ||
1005 | /* XXX These statistics might lose a few packets, since other | |
1006 | * CPUs can be using this flow. We used to synchronize_rcu() | |
1007 | * to make sure that we get completely accurate stats, but that | |
1008 | * blows our performance, badly. */ | |
1009 | dp->n_flows--; | |
1010 | error = answer_query(flow, ufp); | |
1011 | flow_deferred_free(flow); | |
1012 | } else { | |
1013 | error = answer_query(flow, ufp); | |
1014 | } | |
1015 | ||
1016 | error: | |
1017 | return error; | |
1018 | } | |
1019 | ||
1020 | static int query_multiple_flows(struct datapath *dp, | |
1021 | const struct odp_flowvec *flowvec) | |
1022 | { | |
1023 | struct dp_table *table = rcu_dereference(dp->table); | |
1024 | int i; | |
1025 | for (i = 0; i < flowvec->n_flows; i++) { | |
1026 | struct __user odp_flow *ufp = &flowvec->flows[i]; | |
1027 | struct odp_flow uf; | |
1028 | struct sw_flow *flow; | |
1029 | int error; | |
1030 | ||
1031 | if (__copy_from_user(&uf, ufp, sizeof uf)) | |
1032 | return -EFAULT; | |
1033 | uf.key.reserved = 0; | |
1034 | ||
1035 | flow = dp_table_lookup(table, &uf.key); | |
1036 | if (!flow) | |
1037 | error = __clear_user(&ufp->stats, sizeof ufp->stats); | |
1038 | else | |
1039 | error = answer_query(flow, ufp); | |
1040 | if (error) | |
1041 | return -EFAULT; | |
1042 | } | |
1043 | return flowvec->n_flows; | |
1044 | } | |
1045 | ||
1046 | struct list_flows_cbdata { | |
1047 | struct odp_flow __user *uflows; | |
1048 | int n_flows; | |
1049 | int listed_flows; | |
1050 | }; | |
1051 | ||
1052 | static int list_flow(struct sw_flow *flow, void *cbdata_) | |
1053 | { | |
1054 | struct list_flows_cbdata *cbdata = cbdata_; | |
1055 | struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++]; | |
1056 | int error; | |
1057 | ||
1058 | if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key)) | |
1059 | return -EFAULT; | |
1060 | error = answer_query(flow, ufp); | |
1061 | if (error) | |
1062 | return error; | |
1063 | ||
1064 | if (cbdata->listed_flows >= cbdata->n_flows) | |
1065 | return cbdata->listed_flows; | |
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | static int list_flows(struct datapath *dp, const struct odp_flowvec *flowvec) | |
1070 | { | |
1071 | struct list_flows_cbdata cbdata; | |
1072 | int error; | |
1073 | ||
1074 | if (!flowvec->n_flows) | |
1075 | return 0; | |
1076 | ||
1077 | cbdata.uflows = flowvec->flows; | |
1078 | cbdata.n_flows = flowvec->n_flows; | |
1079 | cbdata.listed_flows = 0; | |
1080 | error = dp_table_foreach(rcu_dereference(dp->table), | |
1081 | list_flow, &cbdata); | |
1082 | return error ? error : cbdata.listed_flows; | |
1083 | } | |
1084 | ||
1085 | static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp, | |
1086 | int (*function)(struct datapath *, | |
1087 | const struct odp_flowvec *)) | |
1088 | { | |
1089 | struct odp_flowvec __user *uflowvec; | |
1090 | struct odp_flowvec flowvec; | |
1091 | int retval; | |
1092 | ||
1093 | uflowvec = (struct odp_flowvec __user *)argp; | |
1094 | if (!access_ok(VERIFY_WRITE, uflowvec, sizeof *uflowvec) || | |
1095 | copy_from_user(&flowvec, uflowvec, sizeof flowvec)) | |
1096 | return -EFAULT; | |
1097 | ||
1098 | if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow)) | |
1099 | return -EINVAL; | |
1100 | ||
1101 | if (!access_ok(VERIFY_WRITE, flowvec.flows, | |
1102 | flowvec.n_flows * sizeof(struct odp_flow))) | |
1103 | return -EFAULT; | |
1104 | ||
1105 | retval = function(dp, &flowvec); | |
1106 | return (retval < 0 ? retval | |
1107 | : retval == flowvec.n_flows ? 0 | |
1108 | : __put_user(retval, &uflowvec->n_flows)); | |
1109 | } | |
1110 | ||
1111 | static int do_execute(struct datapath *dp, const struct odp_execute *executep) | |
1112 | { | |
1113 | struct odp_execute execute; | |
1114 | struct odp_flow_key key; | |
1115 | struct sk_buff *skb; | |
1116 | struct sw_flow_actions *actions; | |
1117 | int err; | |
1118 | ||
1119 | err = -EFAULT; | |
1120 | if (copy_from_user(&execute, executep, sizeof execute)) | |
1121 | goto error; | |
1122 | ||
1123 | err = -EINVAL; | |
1124 | if (execute.length < ETH_HLEN || execute.length > 65535) | |
1125 | goto error; | |
1126 | ||
1127 | err = -ENOMEM; | |
1128 | actions = flow_actions_alloc(execute.n_actions); | |
1129 | if (!actions) | |
1130 | goto error; | |
1131 | ||
1132 | err = -EFAULT; | |
1133 | if (copy_from_user(actions->actions, execute.actions, | |
1134 | execute.n_actions * sizeof *execute.actions)) | |
1135 | goto error_free_actions; | |
1136 | ||
1137 | err = validate_actions(actions); | |
1138 | if (err) | |
1139 | goto error_free_actions; | |
1140 | ||
1141 | err = -ENOMEM; | |
1142 | skb = alloc_skb(execute.length, GFP_KERNEL); | |
1143 | if (!skb) | |
1144 | goto error_free_actions; | |
1145 | if (execute.in_port < DP_MAX_PORTS) { | |
1146 | struct net_bridge_port *p = dp->ports[execute.in_port]; | |
1147 | if (p) | |
1148 | skb->dev = p->dev; | |
1149 | } | |
1150 | ||
1151 | err = -EFAULT; | |
1152 | if (copy_from_user(skb_put(skb, execute.length), execute.data, | |
1153 | execute.length)) | |
1154 | goto error_free_skb; | |
1155 | ||
1156 | flow_extract(skb, execute.in_port, &key); | |
1157 | err = execute_actions(dp, skb, &key, actions->actions, | |
1158 | actions->n_actions, GFP_KERNEL); | |
1159 | kfree(actions); | |
1160 | return err; | |
1161 | ||
1162 | error_free_skb: | |
1163 | kfree_skb(skb); | |
1164 | error_free_actions: | |
1165 | kfree(actions); | |
1166 | error: | |
1167 | return err; | |
1168 | } | |
1169 | ||
1170 | static int | |
1171 | get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp) | |
1172 | { | |
1173 | struct odp_stats stats; | |
1174 | int i; | |
1175 | ||
1176 | stats.n_flows = dp->n_flows; | |
1177 | stats.cur_capacity = rcu_dereference(dp->table)->n_buckets * 2; | |
1178 | stats.max_capacity = DP_MAX_BUCKETS * 2; | |
1179 | stats.n_ports = dp->n_ports; | |
1180 | stats.max_ports = DP_MAX_PORTS; | |
1181 | stats.max_groups = DP_MAX_GROUPS; | |
1182 | stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0; | |
1183 | for_each_possible_cpu(i) { | |
1184 | const struct dp_stats_percpu *s; | |
1185 | s = percpu_ptr(dp->stats_percpu, i); | |
1186 | stats.n_frags += s->n_frags; | |
1187 | stats.n_hit += s->n_hit; | |
1188 | stats.n_missed += s->n_missed; | |
1189 | stats.n_lost += s->n_lost; | |
1190 | } | |
1191 | stats.max_miss_queue = DP_MAX_QUEUE_LEN; | |
1192 | stats.max_action_queue = DP_MAX_QUEUE_LEN; | |
1193 | return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0; | |
1194 | } | |
1195 | ||
1196 | static int | |
1197 | put_port(const struct net_bridge_port *p, struct odp_port __user *uop) | |
1198 | { | |
1199 | struct odp_port op; | |
1200 | memset(&op, 0, sizeof op); | |
1201 | strncpy(op.devname, p->dev->name, sizeof op.devname); | |
1202 | op.port = p->port_no; | |
1203 | op.flags = is_dp_dev(p->dev) ? ODP_PORT_INTERNAL : 0; | |
1204 | return copy_to_user(uop, &op, sizeof op) ? -EFAULT : 0; | |
1205 | } | |
1206 | ||
1207 | static int | |
1208 | query_port(struct datapath *dp, struct odp_port __user *uport) | |
1209 | { | |
1210 | struct odp_port port; | |
1211 | ||
1212 | if (copy_from_user(&port, uport, sizeof port)) | |
1213 | return -EFAULT; | |
1214 | if (port.devname[0]) { | |
1215 | struct net_bridge_port *p; | |
1216 | struct net_device *dev; | |
1217 | int err; | |
1218 | ||
1219 | port.devname[IFNAMSIZ - 1] = '\0'; | |
1220 | ||
1221 | dev = dev_get_by_name(&init_net, port.devname); | |
1222 | if (!dev) | |
1223 | return -ENODEV; | |
1224 | ||
1225 | p = dev->br_port; | |
1226 | if (!p && is_dp_dev(dev)) { | |
1227 | struct dp_dev *dp_dev = dp_dev_priv(dev); | |
1228 | if (dp_dev->dp == dp) | |
1229 | p = dp->ports[dp_dev->port_no]; | |
1230 | } | |
1231 | err = p && p->dp == dp ? put_port(p, uport) : -ENOENT; | |
1232 | dev_put(dev); | |
1233 | ||
1234 | return err; | |
1235 | } else { | |
1236 | if (port.port >= DP_MAX_PORTS) | |
1237 | return -EINVAL; | |
1238 | if (!dp->ports[port.port]) | |
1239 | return -ENOENT; | |
1240 | return put_port(dp->ports[port.port], uport); | |
1241 | } | |
1242 | } | |
1243 | ||
1244 | static int | |
1245 | list_ports(struct datapath *dp, struct odp_portvec __user *pvp) | |
1246 | { | |
1247 | struct odp_portvec pv; | |
1248 | struct net_bridge_port *p; | |
1249 | int idx; | |
1250 | ||
1251 | if (copy_from_user(&pv, pvp, sizeof pv)) | |
1252 | return -EFAULT; | |
1253 | ||
1254 | idx = 0; | |
1255 | if (pv.n_ports) { | |
1256 | list_for_each_entry_rcu (p, &dp->port_list, node) { | |
1257 | if (put_port(p, &pv.ports[idx])) | |
1258 | return -EFAULT; | |
1259 | if (idx++ >= pv.n_ports) | |
1260 | break; | |
1261 | } | |
1262 | } | |
1263 | return put_user(idx, &pvp->n_ports); | |
1264 | } | |
1265 | ||
1266 | /* RCU callback for freeing a dp_port_group */ | |
1267 | static void free_port_group(struct rcu_head *rcu) | |
1268 | { | |
1269 | struct dp_port_group *g = container_of(rcu, struct dp_port_group, rcu); | |
1270 | kfree(g); | |
1271 | } | |
1272 | ||
1273 | static int | |
1274 | set_port_group(struct datapath *dp, const struct odp_port_group __user *upg) | |
1275 | { | |
1276 | struct odp_port_group pg; | |
1277 | struct dp_port_group *new_group, *old_group; | |
1278 | int error; | |
1279 | ||
1280 | error = -EFAULT; | |
1281 | if (copy_from_user(&pg, upg, sizeof pg)) | |
1282 | goto error; | |
1283 | ||
1284 | error = -EINVAL; | |
1285 | if (pg.n_ports > DP_MAX_PORTS || pg.group >= DP_MAX_GROUPS) | |
1286 | goto error; | |
1287 | ||
1288 | error = -ENOMEM; | |
1289 | new_group = kmalloc(sizeof *new_group + sizeof(u16) * pg.n_ports, | |
1290 | GFP_KERNEL); | |
1291 | if (!new_group) | |
1292 | goto error; | |
1293 | ||
1294 | new_group->n_ports = pg.n_ports; | |
1295 | error = -EFAULT; | |
1296 | if (copy_from_user(new_group->ports, pg.ports, | |
1297 | sizeof(u16) * pg.n_ports)) | |
1298 | goto error_free; | |
1299 | ||
1300 | old_group = rcu_dereference(dp->groups[pg.group]); | |
1301 | rcu_assign_pointer(dp->groups[pg.group], new_group); | |
1302 | if (old_group) | |
1303 | call_rcu(&old_group->rcu, free_port_group); | |
1304 | return 0; | |
1305 | ||
1306 | error_free: | |
1307 | kfree(new_group); | |
1308 | error: | |
1309 | return error; | |
1310 | } | |
1311 | ||
1312 | static int | |
1313 | get_port_group(struct datapath *dp, struct odp_port_group *upg) | |
1314 | { | |
1315 | struct odp_port_group pg; | |
1316 | struct dp_port_group *g; | |
1317 | u16 n_copy; | |
1318 | ||
1319 | if (copy_from_user(&pg, upg, sizeof pg)) | |
1320 | return -EFAULT; | |
1321 | ||
1322 | if (pg.group >= DP_MAX_GROUPS) | |
1323 | return -EINVAL; | |
1324 | ||
1325 | g = dp->groups[pg.group]; | |
1326 | n_copy = g ? min_t(int, g->n_ports, pg.n_ports) : 0; | |
1327 | if (n_copy && copy_to_user(pg.ports, g->ports, n_copy * sizeof(u16))) | |
1328 | return -EFAULT; | |
1329 | ||
1330 | if (put_user(g ? g->n_ports : 0, &upg->n_ports)) | |
1331 | return -EFAULT; | |
1332 | ||
1333 | return 0; | |
1334 | } | |
1335 | ||
1336 | static long openvswitch_ioctl(struct file *f, unsigned int cmd, | |
1337 | unsigned long argp) | |
1338 | { | |
1339 | int dp_idx = iminor(f->f_dentry->d_inode); | |
1340 | struct datapath *dp; | |
1341 | int drop_frags, listeners, port_no; | |
1342 | int err; | |
1343 | ||
1344 | /* Handle commands with special locking requirements up front. */ | |
1345 | switch (cmd) { | |
1346 | case ODP_DP_CREATE: | |
1347 | return create_dp(dp_idx, (char __user *)argp); | |
1348 | ||
1349 | case ODP_DP_DESTROY: | |
1350 | return destroy_dp(dp_idx); | |
1351 | ||
1352 | case ODP_PORT_ADD: | |
1353 | return add_port(dp_idx, (struct odp_port __user *)argp); | |
1354 | ||
1355 | case ODP_PORT_DEL: | |
1356 | err = get_user(port_no, (int __user *)argp); | |
1357 | if (err) | |
1358 | break; | |
1359 | return del_port(dp_idx, port_no); | |
1360 | } | |
1361 | ||
1362 | dp = get_dp_locked(dp_idx); | |
1363 | if (!dp) | |
1364 | return -ENODEV; | |
1365 | ||
1366 | switch (cmd) { | |
1367 | case ODP_DP_STATS: | |
1368 | err = get_dp_stats(dp, (struct odp_stats __user *)argp); | |
1369 | break; | |
1370 | ||
1371 | case ODP_GET_DROP_FRAGS: | |
1372 | err = put_user(dp->drop_frags, (int __user *)argp); | |
1373 | break; | |
1374 | ||
1375 | case ODP_SET_DROP_FRAGS: | |
1376 | err = get_user(drop_frags, (int __user *)argp); | |
1377 | if (err) | |
1378 | break; | |
1379 | err = -EINVAL; | |
1380 | if (drop_frags != 0 && drop_frags != 1) | |
1381 | break; | |
1382 | dp->drop_frags = drop_frags; | |
1383 | err = 0; | |
1384 | break; | |
1385 | ||
1386 | case ODP_GET_LISTEN_MASK: | |
1387 | err = put_user((int)f->private_data, (int __user *)argp); | |
1388 | break; | |
1389 | ||
1390 | case ODP_SET_LISTEN_MASK: | |
1391 | err = get_user(listeners, (int __user *)argp); | |
1392 | if (err) | |
1393 | break; | |
1394 | err = -EINVAL; | |
1395 | if (listeners & ~ODPL_ALL) | |
1396 | break; | |
1397 | err = 0; | |
1398 | f->private_data = (void*)listeners; | |
1399 | break; | |
1400 | ||
1401 | case ODP_PORT_QUERY: | |
1402 | err = query_port(dp, (struct odp_port __user *)argp); | |
1403 | break; | |
1404 | ||
1405 | case ODP_PORT_LIST: | |
1406 | err = list_ports(dp, (struct odp_portvec __user *)argp); | |
1407 | break; | |
1408 | ||
1409 | case ODP_PORT_GROUP_SET: | |
1410 | err = set_port_group(dp, (struct odp_port_group __user *)argp); | |
1411 | break; | |
1412 | ||
1413 | case ODP_PORT_GROUP_GET: | |
1414 | err = get_port_group(dp, (struct odp_port_group __user *)argp); | |
1415 | break; | |
1416 | ||
1417 | case ODP_FLOW_FLUSH: | |
1418 | err = flush_flows(dp); | |
1419 | break; | |
1420 | ||
1421 | case ODP_FLOW_PUT: | |
1422 | err = put_flow(dp, (struct odp_flow_put __user *)argp); | |
1423 | break; | |
1424 | ||
1425 | case ODP_FLOW_DEL: | |
1426 | case ODP_FLOW_GET: | |
1427 | err = del_or_query_flow(dp, (struct odp_flow __user *)argp, | |
1428 | cmd); | |
1429 | break; | |
1430 | ||
1431 | case ODP_FLOW_GET_MULTIPLE: | |
1432 | err = do_flowvec_ioctl(dp, argp, query_multiple_flows); | |
1433 | break; | |
1434 | ||
1435 | case ODP_FLOW_LIST: | |
1436 | err = do_flowvec_ioctl(dp, argp, list_flows); | |
1437 | break; | |
1438 | ||
1439 | case ODP_EXECUTE: | |
1440 | err = do_execute(dp, (struct odp_execute __user *)argp); | |
1441 | break; | |
1442 | ||
1443 | default: | |
1444 | err = -ENOIOCTLCMD; | |
1445 | break; | |
1446 | } | |
1447 | mutex_unlock(&dp->mutex); | |
1448 | return err; | |
1449 | } | |
1450 | ||
1451 | static int dp_has_packet_of_interest(struct datapath *dp, int listeners) | |
1452 | { | |
1453 | int i; | |
1454 | for (i = 0; i < DP_N_QUEUES; i++) { | |
1455 | if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i])) | |
1456 | return 1; | |
1457 | } | |
1458 | return 0; | |
1459 | } | |
1460 | ||
1461 | ssize_t openvswitch_read(struct file *f, char __user *buf, size_t nbytes, | |
1462 | loff_t *ppos) | |
1463 | { | |
1464 | int listeners = (int) f->private_data; | |
1465 | int dp_idx = iminor(f->f_dentry->d_inode); | |
1466 | struct datapath *dp = get_dp(dp_idx); | |
1467 | struct sk_buff *skb; | |
1468 | struct iovec __user iov; | |
1469 | size_t copy_bytes; | |
1470 | int retval; | |
1471 | ||
1472 | if (!dp) | |
1473 | return -ENODEV; | |
1474 | ||
1475 | if (nbytes == 0 || !listeners) | |
1476 | return 0; | |
1477 | ||
1478 | for (;;) { | |
1479 | int i; | |
1480 | ||
1481 | for (i = 0; i < DP_N_QUEUES; i++) { | |
1482 | if (listeners & (1 << i)) { | |
1483 | skb = skb_dequeue(&dp->queues[i]); | |
1484 | if (skb) | |
1485 | goto success; | |
1486 | } | |
1487 | } | |
1488 | ||
1489 | if (f->f_flags & O_NONBLOCK) { | |
1490 | retval = -EAGAIN; | |
1491 | goto error; | |
1492 | } | |
1493 | ||
1494 | wait_event_interruptible(dp->waitqueue, | |
1495 | dp_has_packet_of_interest(dp, | |
1496 | listeners)); | |
1497 | ||
1498 | if (signal_pending(current)) { | |
1499 | retval = -ERESTARTSYS; | |
1500 | goto error; | |
1501 | } | |
1502 | } | |
1503 | success: | |
1504 | copy_bytes = min(skb->len, nbytes); | |
1505 | iov.iov_base = buf; | |
1506 | iov.iov_len = copy_bytes; | |
1507 | retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len); | |
1508 | if (!retval) | |
1509 | retval = copy_bytes; | |
1510 | kfree_skb(skb); | |
1511 | ||
1512 | error: | |
1513 | return retval; | |
1514 | } | |
1515 | ||
1516 | static unsigned int openvswitch_poll(struct file *file, poll_table *wait) | |
1517 | { | |
1518 | int dp_idx = iminor(file->f_dentry->d_inode); | |
1519 | struct datapath *dp = get_dp(dp_idx); | |
1520 | unsigned int mask; | |
1521 | ||
1522 | if (dp) { | |
1523 | mask = 0; | |
1524 | poll_wait(file, &dp->waitqueue, wait); | |
1525 | if (dp_has_packet_of_interest(dp, (int)file->private_data)) | |
1526 | mask |= POLLIN | POLLRDNORM; | |
1527 | } else { | |
1528 | mask = POLLIN | POLLRDNORM | POLLHUP; | |
1529 | } | |
1530 | return mask; | |
1531 | } | |
1532 | ||
1533 | struct file_operations openvswitch_fops = { | |
1534 | /* XXX .aio_read = openvswitch_aio_read, */ | |
1535 | .read = openvswitch_read, | |
1536 | .poll = openvswitch_poll, | |
1537 | .unlocked_ioctl = openvswitch_ioctl, | |
1538 | /* XXX .fasync = openvswitch_fasync, */ | |
1539 | }; | |
1540 | ||
1541 | static int major; | |
1542 | static struct llc_sap *dp_stp_sap; | |
1543 | ||
1544 | static int dp_stp_rcv(struct sk_buff *skb, struct net_device *dev, | |
1545 | struct packet_type *pt, struct net_device *orig_dev) | |
1546 | { | |
1547 | /* We don't really care about STP packets, we just listen for them for | |
1548 | * mutual exclusion with the bridge module, so this just discards | |
1549 | * them. */ | |
1550 | kfree_skb(skb); | |
1551 | return 0; | |
1552 | } | |
1553 | ||
1554 | static int __init dp_init(void) | |
1555 | { | |
1556 | int err; | |
1557 | ||
1558 | printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR); | |
1559 | ||
1560 | /* Register to receive STP packets because the bridge module also | |
1561 | * attempts to do so. Since there can only be a single listener for a | |
1562 | * given protocol, this provides mutual exclusion against the bridge | |
1563 | * module, preventing both of them from being loaded at the same | |
1564 | * time. */ | |
1565 | dp_stp_sap = llc_sap_open(LLC_SAP_BSPAN, dp_stp_rcv); | |
1566 | if (!dp_stp_sap) { | |
1567 | printk(KERN_ERR "openvswitch: can't register sap for STP (probably the bridge module is loaded)\n"); | |
1568 | return -EADDRINUSE; | |
1569 | } | |
1570 | ||
1571 | err = flow_init(); | |
1572 | if (err) | |
1573 | goto error; | |
1574 | ||
1575 | err = register_netdevice_notifier(&dp_device_notifier); | |
1576 | if (err) | |
1577 | goto error_flow_exit; | |
1578 | ||
1579 | major = register_chrdev(0, "openvswitch", &openvswitch_fops); | |
1580 | if (err < 0) | |
1581 | goto error_unreg_notifier; | |
1582 | ||
1583 | /* Hook into callback used by the bridge to intercept packets. | |
1584 | * Parasites we are. */ | |
1585 | br_handle_frame_hook = dp_frame_hook; | |
1586 | ||
1587 | return 0; | |
1588 | ||
1589 | error_unreg_notifier: | |
1590 | unregister_netdevice_notifier(&dp_device_notifier); | |
1591 | error_flow_exit: | |
1592 | flow_exit(); | |
1593 | error: | |
1594 | return err; | |
1595 | } | |
1596 | ||
1597 | static void dp_cleanup(void) | |
1598 | { | |
1599 | rcu_barrier(); | |
1600 | unregister_chrdev(major, "openvswitch"); | |
1601 | unregister_netdevice_notifier(&dp_device_notifier); | |
1602 | flow_exit(); | |
1603 | br_handle_frame_hook = NULL; | |
1604 | llc_sap_put(dp_stp_sap); | |
1605 | } | |
1606 | ||
1607 | module_init(dp_init); | |
1608 | module_exit(dp_cleanup); | |
1609 | ||
1610 | MODULE_DESCRIPTION("Open vSwitch switching datapath"); | |
1611 | MODULE_LICENSE("GPL"); |