]>
Commit | Line | Data |
---|---|---|
ccb1352e | 1 | /* |
caf2ee14 | 2 | * Copyright (c) 2007-2012 Nicira, Inc. |
ccb1352e JG |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of version 2 of the GNU General Public | |
6 | * License as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
16 | * 02110-1301, USA | |
17 | */ | |
18 | ||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
20 | ||
21 | #include <linux/init.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/if_arp.h> | |
24 | #include <linux/if_vlan.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/ip.h> | |
27 | #include <linux/jhash.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/time.h> | |
30 | #include <linux/etherdevice.h> | |
31 | #include <linux/genetlink.h> | |
32 | #include <linux/kernel.h> | |
33 | #include <linux/kthread.h> | |
34 | #include <linux/mutex.h> | |
35 | #include <linux/percpu.h> | |
36 | #include <linux/rcupdate.h> | |
37 | #include <linux/tcp.h> | |
38 | #include <linux/udp.h> | |
ccb1352e JG |
39 | #include <linux/ethtool.h> |
40 | #include <linux/wait.h> | |
ccb1352e JG |
41 | #include <asm/div64.h> |
42 | #include <linux/highmem.h> | |
43 | #include <linux/netfilter_bridge.h> | |
44 | #include <linux/netfilter_ipv4.h> | |
45 | #include <linux/inetdevice.h> | |
46 | #include <linux/list.h> | |
47 | #include <linux/openvswitch.h> | |
48 | #include <linux/rculist.h> | |
49 | #include <linux/dmi.h> | |
50 | #include <linux/workqueue.h> | |
51 | #include <net/genetlink.h> | |
46df7b81 PS |
52 | #include <net/net_namespace.h> |
53 | #include <net/netns/generic.h> | |
ccb1352e JG |
54 | |
55 | #include "datapath.h" | |
56 | #include "flow.h" | |
57 | #include "vport-internal_dev.h" | |
58 | ||
46df7b81 PS |
59 | /** |
60 | * struct ovs_net - Per net-namespace data for ovs. | |
61 | * @dps: List of datapaths to enable dumping them all out. | |
62 | * Protected by genl_mutex. | |
63 | */ | |
64 | struct ovs_net { | |
65 | struct list_head dps; | |
66 | }; | |
67 | ||
68 | static int ovs_net_id __read_mostly; | |
69 | ||
70 | #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) | |
71 | static void rehash_flow_table(struct work_struct *work); | |
72 | static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); | |
73 | ||
ccb1352e JG |
74 | /** |
75 | * DOC: Locking: | |
76 | * | |
77 | * Writes to device state (add/remove datapath, port, set operations on vports, | |
78 | * etc.) are protected by RTNL. | |
79 | * | |
80 | * Writes to other state (flow table modifications, set miscellaneous datapath | |
81 | * parameters, etc.) are protected by genl_mutex. The RTNL lock nests inside | |
82 | * genl_mutex. | |
83 | * | |
84 | * Reads are protected by RCU. | |
85 | * | |
86 | * There are a few special cases (mostly stats) that have their own | |
87 | * synchronization but they nest under all of above and don't interact with | |
88 | * each other. | |
89 | */ | |
90 | ||
ccb1352e | 91 | static struct vport *new_vport(const struct vport_parms *); |
46df7b81 | 92 | static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *, |
ccb1352e | 93 | const struct dp_upcall_info *); |
46df7b81 PS |
94 | static int queue_userspace_packet(struct net *, int dp_ifindex, |
95 | struct sk_buff *, | |
ccb1352e JG |
96 | const struct dp_upcall_info *); |
97 | ||
98 | /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */ | |
46df7b81 | 99 | static struct datapath *get_dp(struct net *net, int dp_ifindex) |
ccb1352e JG |
100 | { |
101 | struct datapath *dp = NULL; | |
102 | struct net_device *dev; | |
103 | ||
104 | rcu_read_lock(); | |
46df7b81 | 105 | dev = dev_get_by_index_rcu(net, dp_ifindex); |
ccb1352e JG |
106 | if (dev) { |
107 | struct vport *vport = ovs_internal_dev_get_vport(dev); | |
108 | if (vport) | |
109 | dp = vport->dp; | |
110 | } | |
111 | rcu_read_unlock(); | |
112 | ||
113 | return dp; | |
114 | } | |
115 | ||
116 | /* Must be called with rcu_read_lock or RTNL lock. */ | |
117 | const char *ovs_dp_name(const struct datapath *dp) | |
118 | { | |
15eac2a7 | 119 | struct vport *vport = ovs_vport_rtnl_rcu(dp, OVSP_LOCAL); |
ccb1352e JG |
120 | return vport->ops->get_name(vport); |
121 | } | |
122 | ||
123 | static int get_dpifindex(struct datapath *dp) | |
124 | { | |
125 | struct vport *local; | |
126 | int ifindex; | |
127 | ||
128 | rcu_read_lock(); | |
129 | ||
15eac2a7 | 130 | local = ovs_vport_rcu(dp, OVSP_LOCAL); |
ccb1352e JG |
131 | if (local) |
132 | ifindex = local->ops->get_ifindex(local); | |
133 | else | |
134 | ifindex = 0; | |
135 | ||
136 | rcu_read_unlock(); | |
137 | ||
138 | return ifindex; | |
139 | } | |
140 | ||
141 | static void destroy_dp_rcu(struct rcu_head *rcu) | |
142 | { | |
143 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | |
144 | ||
145 | ovs_flow_tbl_destroy((__force struct flow_table *)dp->table); | |
146 | free_percpu(dp->stats_percpu); | |
46df7b81 | 147 | release_net(ovs_dp_get_net(dp)); |
15eac2a7 | 148 | kfree(dp->ports); |
ccb1352e JG |
149 | kfree(dp); |
150 | } | |
151 | ||
15eac2a7 PS |
152 | static struct hlist_head *vport_hash_bucket(const struct datapath *dp, |
153 | u16 port_no) | |
154 | { | |
155 | return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)]; | |
156 | } | |
157 | ||
158 | struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no) | |
159 | { | |
160 | struct vport *vport; | |
15eac2a7 PS |
161 | struct hlist_head *head; |
162 | ||
163 | head = vport_hash_bucket(dp, port_no); | |
b67bfe0d | 164 | hlist_for_each_entry_rcu(vport, head, dp_hash_node) { |
15eac2a7 PS |
165 | if (vport->port_no == port_no) |
166 | return vport; | |
167 | } | |
168 | return NULL; | |
169 | } | |
170 | ||
ccb1352e JG |
171 | /* Called with RTNL lock and genl_lock. */ |
172 | static struct vport *new_vport(const struct vport_parms *parms) | |
173 | { | |
174 | struct vport *vport; | |
175 | ||
176 | vport = ovs_vport_add(parms); | |
177 | if (!IS_ERR(vport)) { | |
178 | struct datapath *dp = parms->dp; | |
15eac2a7 | 179 | struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); |
ccb1352e | 180 | |
15eac2a7 | 181 | hlist_add_head_rcu(&vport->dp_hash_node, head); |
ccb1352e JG |
182 | } |
183 | ||
184 | return vport; | |
185 | } | |
186 | ||
187 | /* Called with RTNL lock. */ | |
188 | void ovs_dp_detach_port(struct vport *p) | |
189 | { | |
190 | ASSERT_RTNL(); | |
191 | ||
192 | /* First drop references to device. */ | |
15eac2a7 | 193 | hlist_del_rcu(&p->dp_hash_node); |
ccb1352e JG |
194 | |
195 | /* Then destroy it. */ | |
196 | ovs_vport_del(p); | |
197 | } | |
198 | ||
199 | /* Must be called with rcu_read_lock. */ | |
200 | void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |
201 | { | |
202 | struct datapath *dp = p->dp; | |
203 | struct sw_flow *flow; | |
204 | struct dp_stats_percpu *stats; | |
205 | struct sw_flow_key key; | |
206 | u64 *stats_counter; | |
207 | int error; | |
208 | int key_len; | |
209 | ||
404f2f10 | 210 | stats = this_cpu_ptr(dp->stats_percpu); |
ccb1352e JG |
211 | |
212 | /* Extract flow from 'skb' into 'key'. */ | |
213 | error = ovs_flow_extract(skb, p->port_no, &key, &key_len); | |
214 | if (unlikely(error)) { | |
215 | kfree_skb(skb); | |
216 | return; | |
217 | } | |
218 | ||
219 | /* Look up flow. */ | |
220 | flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len); | |
221 | if (unlikely(!flow)) { | |
222 | struct dp_upcall_info upcall; | |
223 | ||
224 | upcall.cmd = OVS_PACKET_CMD_MISS; | |
225 | upcall.key = &key; | |
226 | upcall.userdata = NULL; | |
15e47304 | 227 | upcall.portid = p->upcall_portid; |
ccb1352e JG |
228 | ovs_dp_upcall(dp, skb, &upcall); |
229 | consume_skb(skb); | |
230 | stats_counter = &stats->n_missed; | |
231 | goto out; | |
232 | } | |
233 | ||
234 | OVS_CB(skb)->flow = flow; | |
235 | ||
236 | stats_counter = &stats->n_hit; | |
237 | ovs_flow_used(OVS_CB(skb)->flow, skb); | |
238 | ovs_execute_actions(dp, skb); | |
239 | ||
240 | out: | |
241 | /* Update datapath statistics. */ | |
242 | u64_stats_update_begin(&stats->sync); | |
243 | (*stats_counter)++; | |
244 | u64_stats_update_end(&stats->sync); | |
245 | } | |
246 | ||
247 | static struct genl_family dp_packet_genl_family = { | |
248 | .id = GENL_ID_GENERATE, | |
249 | .hdrsize = sizeof(struct ovs_header), | |
250 | .name = OVS_PACKET_FAMILY, | |
251 | .version = OVS_PACKET_VERSION, | |
46df7b81 PS |
252 | .maxattr = OVS_PACKET_ATTR_MAX, |
253 | .netnsok = true | |
ccb1352e JG |
254 | }; |
255 | ||
256 | int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, | |
46df7b81 | 257 | const struct dp_upcall_info *upcall_info) |
ccb1352e JG |
258 | { |
259 | struct dp_stats_percpu *stats; | |
260 | int dp_ifindex; | |
261 | int err; | |
262 | ||
15e47304 | 263 | if (upcall_info->portid == 0) { |
ccb1352e JG |
264 | err = -ENOTCONN; |
265 | goto err; | |
266 | } | |
267 | ||
268 | dp_ifindex = get_dpifindex(dp); | |
269 | if (!dp_ifindex) { | |
270 | err = -ENODEV; | |
271 | goto err; | |
272 | } | |
273 | ||
274 | if (!skb_is_gso(skb)) | |
46df7b81 | 275 | err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); |
ccb1352e | 276 | else |
46df7b81 | 277 | err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info); |
ccb1352e JG |
278 | if (err) |
279 | goto err; | |
280 | ||
281 | return 0; | |
282 | ||
283 | err: | |
404f2f10 | 284 | stats = this_cpu_ptr(dp->stats_percpu); |
ccb1352e JG |
285 | |
286 | u64_stats_update_begin(&stats->sync); | |
287 | stats->n_lost++; | |
288 | u64_stats_update_end(&stats->sync); | |
289 | ||
290 | return err; | |
291 | } | |
292 | ||
46df7b81 PS |
293 | static int queue_gso_packets(struct net *net, int dp_ifindex, |
294 | struct sk_buff *skb, | |
ccb1352e JG |
295 | const struct dp_upcall_info *upcall_info) |
296 | { | |
a1b5d0dd | 297 | unsigned short gso_type = skb_shinfo(skb)->gso_type; |
ccb1352e JG |
298 | struct dp_upcall_info later_info; |
299 | struct sw_flow_key later_key; | |
300 | struct sk_buff *segs, *nskb; | |
301 | int err; | |
302 | ||
12b0004d | 303 | segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false); |
92e5dfc3 PS |
304 | if (IS_ERR(segs)) |
305 | return PTR_ERR(segs); | |
ccb1352e JG |
306 | |
307 | /* Queue all of the segments. */ | |
308 | skb = segs; | |
309 | do { | |
46df7b81 | 310 | err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info); |
ccb1352e JG |
311 | if (err) |
312 | break; | |
313 | ||
a1b5d0dd | 314 | if (skb == segs && gso_type & SKB_GSO_UDP) { |
ccb1352e JG |
315 | /* The initial flow key extracted by ovs_flow_extract() |
316 | * in this case is for a first fragment, so we need to | |
317 | * properly mark later fragments. | |
318 | */ | |
319 | later_key = *upcall_info->key; | |
320 | later_key.ip.frag = OVS_FRAG_TYPE_LATER; | |
321 | ||
322 | later_info = *upcall_info; | |
323 | later_info.key = &later_key; | |
324 | upcall_info = &later_info; | |
325 | } | |
326 | } while ((skb = skb->next)); | |
327 | ||
328 | /* Free all of the segments. */ | |
329 | skb = segs; | |
330 | do { | |
331 | nskb = skb->next; | |
332 | if (err) | |
333 | kfree_skb(skb); | |
334 | else | |
335 | consume_skb(skb); | |
336 | } while ((skb = nskb)); | |
337 | return err; | |
338 | } | |
339 | ||
46df7b81 PS |
340 | static int queue_userspace_packet(struct net *net, int dp_ifindex, |
341 | struct sk_buff *skb, | |
ccb1352e JG |
342 | const struct dp_upcall_info *upcall_info) |
343 | { | |
344 | struct ovs_header *upcall; | |
345 | struct sk_buff *nskb = NULL; | |
346 | struct sk_buff *user_skb; /* to be queued to userspace */ | |
347 | struct nlattr *nla; | |
348 | unsigned int len; | |
349 | int err; | |
350 | ||
351 | if (vlan_tx_tag_present(skb)) { | |
352 | nskb = skb_clone(skb, GFP_ATOMIC); | |
353 | if (!nskb) | |
354 | return -ENOMEM; | |
355 | ||
356 | nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); | |
8aa51d64 | 357 | if (!nskb) |
ccb1352e JG |
358 | return -ENOMEM; |
359 | ||
360 | nskb->vlan_tci = 0; | |
361 | skb = nskb; | |
362 | } | |
363 | ||
364 | if (nla_attr_size(skb->len) > USHRT_MAX) { | |
365 | err = -EFBIG; | |
366 | goto out; | |
367 | } | |
368 | ||
369 | len = sizeof(struct ovs_header); | |
370 | len += nla_total_size(skb->len); | |
371 | len += nla_total_size(FLOW_BUFSIZE); | |
4490108b BP |
372 | if (upcall_info->userdata) |
373 | len += NLA_ALIGN(upcall_info->userdata->nla_len); | |
ccb1352e JG |
374 | |
375 | user_skb = genlmsg_new(len, GFP_ATOMIC); | |
376 | if (!user_skb) { | |
377 | err = -ENOMEM; | |
378 | goto out; | |
379 | } | |
380 | ||
381 | upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, | |
382 | 0, upcall_info->cmd); | |
383 | upcall->dp_ifindex = dp_ifindex; | |
384 | ||
385 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); | |
386 | ovs_flow_to_nlattrs(upcall_info->key, user_skb); | |
387 | nla_nest_end(user_skb, nla); | |
388 | ||
389 | if (upcall_info->userdata) | |
4490108b BP |
390 | __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA, |
391 | nla_len(upcall_info->userdata), | |
392 | nla_data(upcall_info->userdata)); | |
ccb1352e JG |
393 | |
394 | nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); | |
395 | ||
396 | skb_copy_and_csum_dev(skb, nla_data(nla)); | |
397 | ||
a15ff76c | 398 | genlmsg_end(user_skb, upcall); |
15e47304 | 399 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); |
ccb1352e JG |
400 | |
401 | out: | |
402 | kfree_skb(nskb); | |
403 | return err; | |
404 | } | |
405 | ||
406 | /* Called with genl_mutex. */ | |
46df7b81 | 407 | static int flush_flows(struct datapath *dp) |
ccb1352e JG |
408 | { |
409 | struct flow_table *old_table; | |
410 | struct flow_table *new_table; | |
ccb1352e JG |
411 | |
412 | old_table = genl_dereference(dp->table); | |
413 | new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); | |
414 | if (!new_table) | |
415 | return -ENOMEM; | |
416 | ||
417 | rcu_assign_pointer(dp->table, new_table); | |
418 | ||
419 | ovs_flow_tbl_deferred_destroy(old_table); | |
420 | return 0; | |
421 | } | |
422 | ||
423 | static int validate_actions(const struct nlattr *attr, | |
424 | const struct sw_flow_key *key, int depth); | |
425 | ||
426 | static int validate_sample(const struct nlattr *attr, | |
427 | const struct sw_flow_key *key, int depth) | |
428 | { | |
429 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; | |
430 | const struct nlattr *probability, *actions; | |
431 | const struct nlattr *a; | |
432 | int rem; | |
433 | ||
434 | memset(attrs, 0, sizeof(attrs)); | |
435 | nla_for_each_nested(a, attr, rem) { | |
436 | int type = nla_type(a); | |
437 | if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) | |
438 | return -EINVAL; | |
439 | attrs[type] = a; | |
440 | } | |
441 | if (rem) | |
442 | return -EINVAL; | |
443 | ||
444 | probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; | |
445 | if (!probability || nla_len(probability) != sizeof(u32)) | |
446 | return -EINVAL; | |
447 | ||
448 | actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; | |
449 | if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) | |
450 | return -EINVAL; | |
451 | return validate_actions(actions, key, depth + 1); | |
452 | } | |
453 | ||
072ae631 PS |
454 | static int validate_tp_port(const struct sw_flow_key *flow_key) |
455 | { | |
456 | if (flow_key->eth.type == htons(ETH_P_IP)) { | |
4185392d | 457 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) |
072ae631 PS |
458 | return 0; |
459 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | |
4185392d | 460 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) |
072ae631 PS |
461 | return 0; |
462 | } | |
463 | ||
464 | return -EINVAL; | |
465 | } | |
466 | ||
ccb1352e JG |
467 | static int validate_set(const struct nlattr *a, |
468 | const struct sw_flow_key *flow_key) | |
469 | { | |
470 | const struct nlattr *ovs_key = nla_data(a); | |
471 | int key_type = nla_type(ovs_key); | |
472 | ||
473 | /* There can be only one key in a action */ | |
474 | if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) | |
475 | return -EINVAL; | |
476 | ||
477 | if (key_type > OVS_KEY_ATTR_MAX || | |
478 | nla_len(ovs_key) != ovs_key_lens[key_type]) | |
479 | return -EINVAL; | |
480 | ||
481 | switch (key_type) { | |
482 | const struct ovs_key_ipv4 *ipv4_key; | |
3fdbd1ce | 483 | const struct ovs_key_ipv6 *ipv6_key; |
ccb1352e JG |
484 | |
485 | case OVS_KEY_ATTR_PRIORITY: | |
39c7caeb | 486 | case OVS_KEY_ATTR_SKB_MARK: |
ccb1352e JG |
487 | case OVS_KEY_ATTR_ETHERNET: |
488 | break; | |
489 | ||
490 | case OVS_KEY_ATTR_IPV4: | |
491 | if (flow_key->eth.type != htons(ETH_P_IP)) | |
492 | return -EINVAL; | |
493 | ||
4185392d | 494 | if (!flow_key->ip.proto) |
ccb1352e JG |
495 | return -EINVAL; |
496 | ||
497 | ipv4_key = nla_data(ovs_key); | |
498 | if (ipv4_key->ipv4_proto != flow_key->ip.proto) | |
499 | return -EINVAL; | |
500 | ||
501 | if (ipv4_key->ipv4_frag != flow_key->ip.frag) | |
502 | return -EINVAL; | |
503 | ||
504 | break; | |
505 | ||
3fdbd1ce AA |
506 | case OVS_KEY_ATTR_IPV6: |
507 | if (flow_key->eth.type != htons(ETH_P_IPV6)) | |
508 | return -EINVAL; | |
509 | ||
510 | if (!flow_key->ip.proto) | |
511 | return -EINVAL; | |
512 | ||
513 | ipv6_key = nla_data(ovs_key); | |
514 | if (ipv6_key->ipv6_proto != flow_key->ip.proto) | |
515 | return -EINVAL; | |
516 | ||
517 | if (ipv6_key->ipv6_frag != flow_key->ip.frag) | |
518 | return -EINVAL; | |
519 | ||
520 | if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) | |
521 | return -EINVAL; | |
522 | ||
523 | break; | |
524 | ||
ccb1352e JG |
525 | case OVS_KEY_ATTR_TCP: |
526 | if (flow_key->ip.proto != IPPROTO_TCP) | |
527 | return -EINVAL; | |
528 | ||
072ae631 | 529 | return validate_tp_port(flow_key); |
ccb1352e JG |
530 | |
531 | case OVS_KEY_ATTR_UDP: | |
532 | if (flow_key->ip.proto != IPPROTO_UDP) | |
533 | return -EINVAL; | |
534 | ||
072ae631 | 535 | return validate_tp_port(flow_key); |
ccb1352e JG |
536 | |
537 | default: | |
538 | return -EINVAL; | |
539 | } | |
540 | ||
541 | return 0; | |
542 | } | |
543 | ||
544 | static int validate_userspace(const struct nlattr *attr) | |
545 | { | |
546 | static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { | |
547 | [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, | |
4490108b | 548 | [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, |
ccb1352e JG |
549 | }; |
550 | struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; | |
551 | int error; | |
552 | ||
553 | error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, | |
554 | attr, userspace_policy); | |
555 | if (error) | |
556 | return error; | |
557 | ||
558 | if (!a[OVS_USERSPACE_ATTR_PID] || | |
559 | !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) | |
560 | return -EINVAL; | |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
565 | static int validate_actions(const struct nlattr *attr, | |
566 | const struct sw_flow_key *key, int depth) | |
567 | { | |
568 | const struct nlattr *a; | |
569 | int rem, err; | |
570 | ||
571 | if (depth >= SAMPLE_ACTION_DEPTH) | |
572 | return -EOVERFLOW; | |
573 | ||
574 | nla_for_each_nested(a, attr, rem) { | |
575 | /* Expected argument lengths, (u32)-1 for variable length. */ | |
576 | static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { | |
577 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), | |
578 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, | |
579 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), | |
580 | [OVS_ACTION_ATTR_POP_VLAN] = 0, | |
581 | [OVS_ACTION_ATTR_SET] = (u32)-1, | |
582 | [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 | |
583 | }; | |
584 | const struct ovs_action_push_vlan *vlan; | |
585 | int type = nla_type(a); | |
586 | ||
587 | if (type > OVS_ACTION_ATTR_MAX || | |
588 | (action_lens[type] != nla_len(a) && | |
589 | action_lens[type] != (u32)-1)) | |
590 | return -EINVAL; | |
591 | ||
592 | switch (type) { | |
593 | case OVS_ACTION_ATTR_UNSPEC: | |
594 | return -EINVAL; | |
595 | ||
596 | case OVS_ACTION_ATTR_USERSPACE: | |
597 | err = validate_userspace(a); | |
598 | if (err) | |
599 | return err; | |
600 | break; | |
601 | ||
602 | case OVS_ACTION_ATTR_OUTPUT: | |
603 | if (nla_get_u32(a) >= DP_MAX_PORTS) | |
604 | return -EINVAL; | |
605 | break; | |
606 | ||
607 | ||
608 | case OVS_ACTION_ATTR_POP_VLAN: | |
609 | break; | |
610 | ||
611 | case OVS_ACTION_ATTR_PUSH_VLAN: | |
612 | vlan = nla_data(a); | |
613 | if (vlan->vlan_tpid != htons(ETH_P_8021Q)) | |
614 | return -EINVAL; | |
615 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) | |
616 | return -EINVAL; | |
617 | break; | |
618 | ||
619 | case OVS_ACTION_ATTR_SET: | |
620 | err = validate_set(a, key); | |
621 | if (err) | |
622 | return err; | |
623 | break; | |
624 | ||
625 | case OVS_ACTION_ATTR_SAMPLE: | |
626 | err = validate_sample(a, key, depth); | |
627 | if (err) | |
628 | return err; | |
629 | break; | |
630 | ||
631 | default: | |
632 | return -EINVAL; | |
633 | } | |
634 | } | |
635 | ||
636 | if (rem > 0) | |
637 | return -EINVAL; | |
638 | ||
639 | return 0; | |
640 | } | |
641 | ||
642 | static void clear_stats(struct sw_flow *flow) | |
643 | { | |
644 | flow->used = 0; | |
645 | flow->tcp_flags = 0; | |
646 | flow->packet_count = 0; | |
647 | flow->byte_count = 0; | |
648 | } | |
649 | ||
650 | static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |
651 | { | |
652 | struct ovs_header *ovs_header = info->userhdr; | |
653 | struct nlattr **a = info->attrs; | |
654 | struct sw_flow_actions *acts; | |
655 | struct sk_buff *packet; | |
656 | struct sw_flow *flow; | |
657 | struct datapath *dp; | |
658 | struct ethhdr *eth; | |
659 | int len; | |
660 | int err; | |
661 | int key_len; | |
662 | ||
663 | err = -EINVAL; | |
664 | if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || | |
665 | !a[OVS_PACKET_ATTR_ACTIONS] || | |
666 | nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN) | |
667 | goto err; | |
668 | ||
669 | len = nla_len(a[OVS_PACKET_ATTR_PACKET]); | |
670 | packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL); | |
671 | err = -ENOMEM; | |
672 | if (!packet) | |
673 | goto err; | |
674 | skb_reserve(packet, NET_IP_ALIGN); | |
675 | ||
676 | memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len); | |
677 | ||
678 | skb_reset_mac_header(packet); | |
679 | eth = eth_hdr(packet); | |
680 | ||
681 | /* Normally, setting the skb 'protocol' field would be handled by a | |
682 | * call to eth_type_trans(), but it assumes there's a sending | |
683 | * device, which we may not have. */ | |
e5c5d22e | 684 | if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) |
ccb1352e JG |
685 | packet->protocol = eth->h_proto; |
686 | else | |
687 | packet->protocol = htons(ETH_P_802_2); | |
688 | ||
689 | /* Build an sw_flow for sending this packet. */ | |
690 | flow = ovs_flow_alloc(); | |
691 | err = PTR_ERR(flow); | |
692 | if (IS_ERR(flow)) | |
693 | goto err_kfree_skb; | |
694 | ||
695 | err = ovs_flow_extract(packet, -1, &flow->key, &key_len); | |
696 | if (err) | |
697 | goto err_flow_free; | |
698 | ||
699 | err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority, | |
39c7caeb | 700 | &flow->key.phy.skb_mark, |
ccb1352e JG |
701 | &flow->key.phy.in_port, |
702 | a[OVS_PACKET_ATTR_KEY]); | |
703 | if (err) | |
704 | goto err_flow_free; | |
705 | ||
706 | err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0); | |
707 | if (err) | |
708 | goto err_flow_free; | |
709 | ||
710 | flow->hash = ovs_flow_hash(&flow->key, key_len); | |
711 | ||
712 | acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]); | |
713 | err = PTR_ERR(acts); | |
714 | if (IS_ERR(acts)) | |
715 | goto err_flow_free; | |
716 | rcu_assign_pointer(flow->sf_acts, acts); | |
717 | ||
718 | OVS_CB(packet)->flow = flow; | |
719 | packet->priority = flow->key.phy.priority; | |
39c7caeb | 720 | packet->mark = flow->key.phy.skb_mark; |
ccb1352e JG |
721 | |
722 | rcu_read_lock(); | |
46df7b81 | 723 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
ccb1352e JG |
724 | err = -ENODEV; |
725 | if (!dp) | |
726 | goto err_unlock; | |
727 | ||
728 | local_bh_disable(); | |
729 | err = ovs_execute_actions(dp, packet); | |
730 | local_bh_enable(); | |
731 | rcu_read_unlock(); | |
732 | ||
733 | ovs_flow_free(flow); | |
734 | return err; | |
735 | ||
736 | err_unlock: | |
737 | rcu_read_unlock(); | |
738 | err_flow_free: | |
739 | ovs_flow_free(flow); | |
740 | err_kfree_skb: | |
741 | kfree_skb(packet); | |
742 | err: | |
743 | return err; | |
744 | } | |
745 | ||
746 | static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { | |
747 | [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC }, | |
748 | [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, | |
749 | [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, | |
750 | }; | |
751 | ||
752 | static struct genl_ops dp_packet_genl_ops[] = { | |
753 | { .cmd = OVS_PACKET_CMD_EXECUTE, | |
754 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
755 | .policy = packet_policy, | |
756 | .doit = ovs_packet_cmd_execute | |
757 | } | |
758 | }; | |
759 | ||
760 | static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) | |
761 | { | |
762 | int i; | |
763 | struct flow_table *table = genl_dereference(dp->table); | |
764 | ||
765 | stats->n_flows = ovs_flow_tbl_count(table); | |
766 | ||
767 | stats->n_hit = stats->n_missed = stats->n_lost = 0; | |
768 | for_each_possible_cpu(i) { | |
769 | const struct dp_stats_percpu *percpu_stats; | |
770 | struct dp_stats_percpu local_stats; | |
771 | unsigned int start; | |
772 | ||
773 | percpu_stats = per_cpu_ptr(dp->stats_percpu, i); | |
774 | ||
775 | do { | |
776 | start = u64_stats_fetch_begin_bh(&percpu_stats->sync); | |
777 | local_stats = *percpu_stats; | |
778 | } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start)); | |
779 | ||
780 | stats->n_hit += local_stats.n_hit; | |
781 | stats->n_missed += local_stats.n_missed; | |
782 | stats->n_lost += local_stats.n_lost; | |
783 | } | |
784 | } | |
785 | ||
786 | static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { | |
787 | [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, | |
788 | [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, | |
789 | [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, | |
790 | }; | |
791 | ||
792 | static struct genl_family dp_flow_genl_family = { | |
793 | .id = GENL_ID_GENERATE, | |
794 | .hdrsize = sizeof(struct ovs_header), | |
795 | .name = OVS_FLOW_FAMILY, | |
796 | .version = OVS_FLOW_VERSION, | |
46df7b81 PS |
797 | .maxattr = OVS_FLOW_ATTR_MAX, |
798 | .netnsok = true | |
ccb1352e JG |
799 | }; |
800 | ||
801 | static struct genl_multicast_group ovs_dp_flow_multicast_group = { | |
802 | .name = OVS_FLOW_MCGROUP | |
803 | }; | |
804 | ||
805 | /* Called with genl_lock. */ | |
806 | static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |
15e47304 | 807 | struct sk_buff *skb, u32 portid, |
ccb1352e JG |
808 | u32 seq, u32 flags, u8 cmd) |
809 | { | |
810 | const int skb_orig_len = skb->len; | |
811 | const struct sw_flow_actions *sf_acts; | |
812 | struct ovs_flow_stats stats; | |
813 | struct ovs_header *ovs_header; | |
814 | struct nlattr *nla; | |
815 | unsigned long used; | |
816 | u8 tcp_flags; | |
817 | int err; | |
818 | ||
819 | sf_acts = rcu_dereference_protected(flow->sf_acts, | |
820 | lockdep_genl_is_held()); | |
821 | ||
15e47304 | 822 | ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); |
ccb1352e JG |
823 | if (!ovs_header) |
824 | return -EMSGSIZE; | |
825 | ||
826 | ovs_header->dp_ifindex = get_dpifindex(dp); | |
827 | ||
828 | nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); | |
829 | if (!nla) | |
830 | goto nla_put_failure; | |
831 | err = ovs_flow_to_nlattrs(&flow->key, skb); | |
832 | if (err) | |
833 | goto error; | |
834 | nla_nest_end(skb, nla); | |
835 | ||
836 | spin_lock_bh(&flow->lock); | |
837 | used = flow->used; | |
838 | stats.n_packets = flow->packet_count; | |
839 | stats.n_bytes = flow->byte_count; | |
840 | tcp_flags = flow->tcp_flags; | |
841 | spin_unlock_bh(&flow->lock); | |
842 | ||
028d6a67 DM |
843 | if (used && |
844 | nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) | |
845 | goto nla_put_failure; | |
ccb1352e | 846 | |
028d6a67 DM |
847 | if (stats.n_packets && |
848 | nla_put(skb, OVS_FLOW_ATTR_STATS, | |
849 | sizeof(struct ovs_flow_stats), &stats)) | |
850 | goto nla_put_failure; | |
ccb1352e | 851 | |
028d6a67 DM |
852 | if (tcp_flags && |
853 | nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags)) | |
854 | goto nla_put_failure; | |
ccb1352e JG |
855 | |
856 | /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if | |
857 | * this is the first flow to be dumped into 'skb'. This is unusual for | |
858 | * Netlink but individual action lists can be longer than | |
859 | * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this. | |
860 | * The userspace caller can always fetch the actions separately if it | |
861 | * really wants them. (Most userspace callers in fact don't care.) | |
862 | * | |
863 | * This can only fail for dump operations because the skb is always | |
864 | * properly sized for single flows. | |
865 | */ | |
866 | err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len, | |
867 | sf_acts->actions); | |
868 | if (err < 0 && skb_orig_len) | |
869 | goto error; | |
870 | ||
871 | return genlmsg_end(skb, ovs_header); | |
872 | ||
873 | nla_put_failure: | |
874 | err = -EMSGSIZE; | |
875 | error: | |
876 | genlmsg_cancel(skb, ovs_header); | |
877 | return err; | |
878 | } | |
879 | ||
880 | static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow) | |
881 | { | |
882 | const struct sw_flow_actions *sf_acts; | |
883 | int len; | |
884 | ||
885 | sf_acts = rcu_dereference_protected(flow->sf_acts, | |
886 | lockdep_genl_is_held()); | |
887 | ||
888 | /* OVS_FLOW_ATTR_KEY */ | |
889 | len = nla_total_size(FLOW_BUFSIZE); | |
890 | /* OVS_FLOW_ATTR_ACTIONS */ | |
891 | len += nla_total_size(sf_acts->actions_len); | |
892 | /* OVS_FLOW_ATTR_STATS */ | |
893 | len += nla_total_size(sizeof(struct ovs_flow_stats)); | |
894 | /* OVS_FLOW_ATTR_TCP_FLAGS */ | |
895 | len += nla_total_size(1); | |
896 | /* OVS_FLOW_ATTR_USED */ | |
897 | len += nla_total_size(8); | |
898 | ||
899 | len += NLMSG_ALIGN(sizeof(struct ovs_header)); | |
900 | ||
901 | return genlmsg_new(len, GFP_KERNEL); | |
902 | } | |
903 | ||
904 | static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, | |
905 | struct datapath *dp, | |
15e47304 | 906 | u32 portid, u32 seq, u8 cmd) |
ccb1352e JG |
907 | { |
908 | struct sk_buff *skb; | |
909 | int retval; | |
910 | ||
911 | skb = ovs_flow_cmd_alloc_info(flow); | |
912 | if (!skb) | |
913 | return ERR_PTR(-ENOMEM); | |
914 | ||
15e47304 | 915 | retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd); |
ccb1352e JG |
916 | BUG_ON(retval < 0); |
917 | return skb; | |
918 | } | |
919 | ||
920 | static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |
921 | { | |
922 | struct nlattr **a = info->attrs; | |
923 | struct ovs_header *ovs_header = info->userhdr; | |
924 | struct sw_flow_key key; | |
925 | struct sw_flow *flow; | |
926 | struct sk_buff *reply; | |
927 | struct datapath *dp; | |
928 | struct flow_table *table; | |
929 | int error; | |
930 | int key_len; | |
931 | ||
932 | /* Extract key. */ | |
933 | error = -EINVAL; | |
934 | if (!a[OVS_FLOW_ATTR_KEY]) | |
935 | goto error; | |
936 | error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); | |
937 | if (error) | |
938 | goto error; | |
939 | ||
940 | /* Validate actions. */ | |
941 | if (a[OVS_FLOW_ATTR_ACTIONS]) { | |
942 | error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0); | |
943 | if (error) | |
944 | goto error; | |
945 | } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) { | |
946 | error = -EINVAL; | |
947 | goto error; | |
948 | } | |
949 | ||
46df7b81 | 950 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
ccb1352e JG |
951 | error = -ENODEV; |
952 | if (!dp) | |
953 | goto error; | |
954 | ||
955 | table = genl_dereference(dp->table); | |
956 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | |
957 | if (!flow) { | |
958 | struct sw_flow_actions *acts; | |
959 | ||
960 | /* Bail out if we're not allowed to create a new flow. */ | |
961 | error = -ENOENT; | |
962 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) | |
963 | goto error; | |
964 | ||
965 | /* Expand table, if necessary, to make room. */ | |
966 | if (ovs_flow_tbl_need_to_expand(table)) { | |
967 | struct flow_table *new_table; | |
968 | ||
969 | new_table = ovs_flow_tbl_expand(table); | |
970 | if (!IS_ERR(new_table)) { | |
971 | rcu_assign_pointer(dp->table, new_table); | |
972 | ovs_flow_tbl_deferred_destroy(table); | |
973 | table = genl_dereference(dp->table); | |
974 | } | |
975 | } | |
976 | ||
977 | /* Allocate flow. */ | |
978 | flow = ovs_flow_alloc(); | |
979 | if (IS_ERR(flow)) { | |
980 | error = PTR_ERR(flow); | |
981 | goto error; | |
982 | } | |
983 | flow->key = key; | |
984 | clear_stats(flow); | |
985 | ||
986 | /* Obtain actions. */ | |
987 | acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]); | |
988 | error = PTR_ERR(acts); | |
989 | if (IS_ERR(acts)) | |
990 | goto error_free_flow; | |
991 | rcu_assign_pointer(flow->sf_acts, acts); | |
992 | ||
993 | /* Put flow in bucket. */ | |
994 | flow->hash = ovs_flow_hash(&key, key_len); | |
995 | ovs_flow_tbl_insert(table, flow); | |
996 | ||
15e47304 | 997 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
ccb1352e JG |
998 | info->snd_seq, |
999 | OVS_FLOW_CMD_NEW); | |
1000 | } else { | |
1001 | /* We found a matching flow. */ | |
1002 | struct sw_flow_actions *old_acts; | |
1003 | struct nlattr *acts_attrs; | |
1004 | ||
1005 | /* Bail out if we're not allowed to modify an existing flow. | |
1006 | * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL | |
1007 | * because Generic Netlink treats the latter as a dump | |
1008 | * request. We also accept NLM_F_EXCL in case that bug ever | |
1009 | * gets fixed. | |
1010 | */ | |
1011 | error = -EEXIST; | |
1012 | if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW && | |
1013 | info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) | |
1014 | goto error; | |
1015 | ||
1016 | /* Update actions. */ | |
1017 | old_acts = rcu_dereference_protected(flow->sf_acts, | |
1018 | lockdep_genl_is_held()); | |
1019 | acts_attrs = a[OVS_FLOW_ATTR_ACTIONS]; | |
1020 | if (acts_attrs && | |
1021 | (old_acts->actions_len != nla_len(acts_attrs) || | |
1022 | memcmp(old_acts->actions, nla_data(acts_attrs), | |
1023 | old_acts->actions_len))) { | |
1024 | struct sw_flow_actions *new_acts; | |
1025 | ||
1026 | new_acts = ovs_flow_actions_alloc(acts_attrs); | |
1027 | error = PTR_ERR(new_acts); | |
1028 | if (IS_ERR(new_acts)) | |
1029 | goto error; | |
1030 | ||
1031 | rcu_assign_pointer(flow->sf_acts, new_acts); | |
1032 | ovs_flow_deferred_free_acts(old_acts); | |
1033 | } | |
1034 | ||
15e47304 | 1035 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
ccb1352e JG |
1036 | info->snd_seq, OVS_FLOW_CMD_NEW); |
1037 | ||
1038 | /* Clear stats. */ | |
1039 | if (a[OVS_FLOW_ATTR_CLEAR]) { | |
1040 | spin_lock_bh(&flow->lock); | |
1041 | clear_stats(flow); | |
1042 | spin_unlock_bh(&flow->lock); | |
1043 | } | |
1044 | } | |
1045 | ||
1046 | if (!IS_ERR(reply)) | |
15e47304 | 1047 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1048 | ovs_dp_flow_multicast_group.id, info->nlhdr, |
1049 | GFP_KERNEL); | |
1050 | else | |
46df7b81 | 1051 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, |
ccb1352e JG |
1052 | ovs_dp_flow_multicast_group.id, PTR_ERR(reply)); |
1053 | return 0; | |
1054 | ||
1055 | error_free_flow: | |
1056 | ovs_flow_free(flow); | |
1057 | error: | |
1058 | return error; | |
1059 | } | |
1060 | ||
1061 | static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |
1062 | { | |
1063 | struct nlattr **a = info->attrs; | |
1064 | struct ovs_header *ovs_header = info->userhdr; | |
1065 | struct sw_flow_key key; | |
1066 | struct sk_buff *reply; | |
1067 | struct sw_flow *flow; | |
1068 | struct datapath *dp; | |
1069 | struct flow_table *table; | |
1070 | int err; | |
1071 | int key_len; | |
1072 | ||
1073 | if (!a[OVS_FLOW_ATTR_KEY]) | |
1074 | return -EINVAL; | |
1075 | err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); | |
1076 | if (err) | |
1077 | return err; | |
1078 | ||
46df7b81 | 1079 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
ccb1352e JG |
1080 | if (!dp) |
1081 | return -ENODEV; | |
1082 | ||
1083 | table = genl_dereference(dp->table); | |
1084 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | |
1085 | if (!flow) | |
1086 | return -ENOENT; | |
1087 | ||
15e47304 | 1088 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
ccb1352e JG |
1089 | info->snd_seq, OVS_FLOW_CMD_NEW); |
1090 | if (IS_ERR(reply)) | |
1091 | return PTR_ERR(reply); | |
1092 | ||
1093 | return genlmsg_reply(reply, info); | |
1094 | } | |
1095 | ||
1096 | static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |
1097 | { | |
1098 | struct nlattr **a = info->attrs; | |
1099 | struct ovs_header *ovs_header = info->userhdr; | |
1100 | struct sw_flow_key key; | |
1101 | struct sk_buff *reply; | |
1102 | struct sw_flow *flow; | |
1103 | struct datapath *dp; | |
1104 | struct flow_table *table; | |
1105 | int err; | |
1106 | int key_len; | |
1107 | ||
46df7b81 PS |
1108 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1109 | if (!dp) | |
1110 | return -ENODEV; | |
1111 | ||
ccb1352e | 1112 | if (!a[OVS_FLOW_ATTR_KEY]) |
46df7b81 PS |
1113 | return flush_flows(dp); |
1114 | ||
ccb1352e JG |
1115 | err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]); |
1116 | if (err) | |
1117 | return err; | |
1118 | ||
ccb1352e JG |
1119 | table = genl_dereference(dp->table); |
1120 | flow = ovs_flow_tbl_lookup(table, &key, key_len); | |
1121 | if (!flow) | |
1122 | return -ENOENT; | |
1123 | ||
1124 | reply = ovs_flow_cmd_alloc_info(flow); | |
1125 | if (!reply) | |
1126 | return -ENOMEM; | |
1127 | ||
1128 | ovs_flow_tbl_remove(table, flow); | |
1129 | ||
15e47304 | 1130 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, |
ccb1352e JG |
1131 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); |
1132 | BUG_ON(err < 0); | |
1133 | ||
1134 | ovs_flow_deferred_free(flow); | |
1135 | ||
15e47304 | 1136 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1137 | ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL); |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |
1142 | { | |
1143 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); | |
1144 | struct datapath *dp; | |
1145 | struct flow_table *table; | |
1146 | ||
46df7b81 | 1147 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
ccb1352e JG |
1148 | if (!dp) |
1149 | return -ENODEV; | |
1150 | ||
1151 | table = genl_dereference(dp->table); | |
1152 | ||
1153 | for (;;) { | |
1154 | struct sw_flow *flow; | |
1155 | u32 bucket, obj; | |
1156 | ||
1157 | bucket = cb->args[0]; | |
1158 | obj = cb->args[1]; | |
1159 | flow = ovs_flow_tbl_next(table, &bucket, &obj); | |
1160 | if (!flow) | |
1161 | break; | |
1162 | ||
1163 | if (ovs_flow_cmd_fill_info(flow, dp, skb, | |
15e47304 | 1164 | NETLINK_CB(cb->skb).portid, |
ccb1352e JG |
1165 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1166 | OVS_FLOW_CMD_NEW) < 0) | |
1167 | break; | |
1168 | ||
1169 | cb->args[0] = bucket; | |
1170 | cb->args[1] = obj; | |
1171 | } | |
1172 | return skb->len; | |
1173 | } | |
1174 | ||
1175 | static struct genl_ops dp_flow_genl_ops[] = { | |
1176 | { .cmd = OVS_FLOW_CMD_NEW, | |
1177 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1178 | .policy = flow_policy, | |
1179 | .doit = ovs_flow_cmd_new_or_set | |
1180 | }, | |
1181 | { .cmd = OVS_FLOW_CMD_DEL, | |
1182 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1183 | .policy = flow_policy, | |
1184 | .doit = ovs_flow_cmd_del | |
1185 | }, | |
1186 | { .cmd = OVS_FLOW_CMD_GET, | |
1187 | .flags = 0, /* OK for unprivileged users. */ | |
1188 | .policy = flow_policy, | |
1189 | .doit = ovs_flow_cmd_get, | |
1190 | .dumpit = ovs_flow_cmd_dump | |
1191 | }, | |
1192 | { .cmd = OVS_FLOW_CMD_SET, | |
1193 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1194 | .policy = flow_policy, | |
1195 | .doit = ovs_flow_cmd_new_or_set, | |
1196 | }, | |
1197 | }; | |
1198 | ||
1199 | static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { | |
1200 | [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, | |
1201 | [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, | |
1202 | }; | |
1203 | ||
1204 | static struct genl_family dp_datapath_genl_family = { | |
1205 | .id = GENL_ID_GENERATE, | |
1206 | .hdrsize = sizeof(struct ovs_header), | |
1207 | .name = OVS_DATAPATH_FAMILY, | |
1208 | .version = OVS_DATAPATH_VERSION, | |
46df7b81 PS |
1209 | .maxattr = OVS_DP_ATTR_MAX, |
1210 | .netnsok = true | |
ccb1352e JG |
1211 | }; |
1212 | ||
1213 | static struct genl_multicast_group ovs_dp_datapath_multicast_group = { | |
1214 | .name = OVS_DATAPATH_MCGROUP | |
1215 | }; | |
1216 | ||
1217 | static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, | |
15e47304 | 1218 | u32 portid, u32 seq, u32 flags, u8 cmd) |
ccb1352e JG |
1219 | { |
1220 | struct ovs_header *ovs_header; | |
1221 | struct ovs_dp_stats dp_stats; | |
1222 | int err; | |
1223 | ||
15e47304 | 1224 | ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, |
ccb1352e JG |
1225 | flags, cmd); |
1226 | if (!ovs_header) | |
1227 | goto error; | |
1228 | ||
1229 | ovs_header->dp_ifindex = get_dpifindex(dp); | |
1230 | ||
1231 | rcu_read_lock(); | |
1232 | err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp)); | |
1233 | rcu_read_unlock(); | |
1234 | if (err) | |
1235 | goto nla_put_failure; | |
1236 | ||
1237 | get_dp_stats(dp, &dp_stats); | |
028d6a67 DM |
1238 | if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats)) |
1239 | goto nla_put_failure; | |
ccb1352e JG |
1240 | |
1241 | return genlmsg_end(skb, ovs_header); | |
1242 | ||
1243 | nla_put_failure: | |
1244 | genlmsg_cancel(skb, ovs_header); | |
1245 | error: | |
1246 | return -EMSGSIZE; | |
1247 | } | |
1248 | ||
15e47304 | 1249 | static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid, |
ccb1352e JG |
1250 | u32 seq, u8 cmd) |
1251 | { | |
1252 | struct sk_buff *skb; | |
1253 | int retval; | |
1254 | ||
1255 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | |
1256 | if (!skb) | |
1257 | return ERR_PTR(-ENOMEM); | |
1258 | ||
15e47304 | 1259 | retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd); |
ccb1352e JG |
1260 | if (retval < 0) { |
1261 | kfree_skb(skb); | |
1262 | return ERR_PTR(retval); | |
1263 | } | |
1264 | return skb; | |
1265 | } | |
1266 | ||
1267 | /* Called with genl_mutex and optionally with RTNL lock also. */ | |
46df7b81 PS |
1268 | static struct datapath *lookup_datapath(struct net *net, |
1269 | struct ovs_header *ovs_header, | |
ccb1352e JG |
1270 | struct nlattr *a[OVS_DP_ATTR_MAX + 1]) |
1271 | { | |
1272 | struct datapath *dp; | |
1273 | ||
1274 | if (!a[OVS_DP_ATTR_NAME]) | |
46df7b81 | 1275 | dp = get_dp(net, ovs_header->dp_ifindex); |
ccb1352e JG |
1276 | else { |
1277 | struct vport *vport; | |
1278 | ||
1279 | rcu_read_lock(); | |
46df7b81 | 1280 | vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME])); |
ccb1352e JG |
1281 | dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL; |
1282 | rcu_read_unlock(); | |
1283 | } | |
1284 | return dp ? dp : ERR_PTR(-ENODEV); | |
1285 | } | |
1286 | ||
1287 | static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |
1288 | { | |
1289 | struct nlattr **a = info->attrs; | |
1290 | struct vport_parms parms; | |
1291 | struct sk_buff *reply; | |
1292 | struct datapath *dp; | |
1293 | struct vport *vport; | |
46df7b81 | 1294 | struct ovs_net *ovs_net; |
15eac2a7 | 1295 | int err, i; |
ccb1352e JG |
1296 | |
1297 | err = -EINVAL; | |
1298 | if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID]) | |
1299 | goto err; | |
1300 | ||
1301 | rtnl_lock(); | |
ccb1352e JG |
1302 | |
1303 | err = -ENOMEM; | |
1304 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | |
1305 | if (dp == NULL) | |
46df7b81 PS |
1306 | goto err_unlock_rtnl; |
1307 | ||
46df7b81 | 1308 | ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); |
ccb1352e JG |
1309 | |
1310 | /* Allocate table. */ | |
1311 | err = -ENOMEM; | |
1312 | rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); | |
1313 | if (!dp->table) | |
1314 | goto err_free_dp; | |
1315 | ||
1316 | dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); | |
1317 | if (!dp->stats_percpu) { | |
1318 | err = -ENOMEM; | |
1319 | goto err_destroy_table; | |
1320 | } | |
1321 | ||
15eac2a7 PS |
1322 | dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), |
1323 | GFP_KERNEL); | |
1324 | if (!dp->ports) { | |
1325 | err = -ENOMEM; | |
1326 | goto err_destroy_percpu; | |
1327 | } | |
1328 | ||
1329 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) | |
1330 | INIT_HLIST_HEAD(&dp->ports[i]); | |
1331 | ||
ccb1352e JG |
1332 | /* Set up our datapath device. */ |
1333 | parms.name = nla_data(a[OVS_DP_ATTR_NAME]); | |
1334 | parms.type = OVS_VPORT_TYPE_INTERNAL; | |
1335 | parms.options = NULL; | |
1336 | parms.dp = dp; | |
1337 | parms.port_no = OVSP_LOCAL; | |
15e47304 | 1338 | parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]); |
ccb1352e JG |
1339 | |
1340 | vport = new_vport(&parms); | |
1341 | if (IS_ERR(vport)) { | |
1342 | err = PTR_ERR(vport); | |
1343 | if (err == -EBUSY) | |
1344 | err = -EEXIST; | |
1345 | ||
15eac2a7 | 1346 | goto err_destroy_ports_array; |
ccb1352e JG |
1347 | } |
1348 | ||
15e47304 | 1349 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
ccb1352e JG |
1350 | info->snd_seq, OVS_DP_CMD_NEW); |
1351 | err = PTR_ERR(reply); | |
1352 | if (IS_ERR(reply)) | |
1353 | goto err_destroy_local_port; | |
1354 | ||
46df7b81 PS |
1355 | ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); |
1356 | list_add_tail(&dp->list_node, &ovs_net->dps); | |
ccb1352e JG |
1357 | rtnl_unlock(); |
1358 | ||
15e47304 | 1359 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1360 | ovs_dp_datapath_multicast_group.id, info->nlhdr, |
1361 | GFP_KERNEL); | |
1362 | return 0; | |
1363 | ||
1364 | err_destroy_local_port: | |
15eac2a7 PS |
1365 | ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL)); |
1366 | err_destroy_ports_array: | |
1367 | kfree(dp->ports); | |
ccb1352e JG |
1368 | err_destroy_percpu: |
1369 | free_percpu(dp->stats_percpu); | |
1370 | err_destroy_table: | |
1371 | ovs_flow_tbl_destroy(genl_dereference(dp->table)); | |
1372 | err_free_dp: | |
46df7b81 | 1373 | release_net(ovs_dp_get_net(dp)); |
ccb1352e | 1374 | kfree(dp); |
ccb1352e JG |
1375 | err_unlock_rtnl: |
1376 | rtnl_unlock(); | |
1377 | err: | |
1378 | return err; | |
1379 | } | |
1380 | ||
46df7b81 PS |
1381 | /* Called with genl_mutex. */ |
1382 | static void __dp_destroy(struct datapath *dp) | |
ccb1352e | 1383 | { |
15eac2a7 | 1384 | int i; |
ccb1352e JG |
1385 | |
1386 | rtnl_lock(); | |
15eac2a7 PS |
1387 | |
1388 | for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { | |
1389 | struct vport *vport; | |
b67bfe0d | 1390 | struct hlist_node *n; |
15eac2a7 | 1391 | |
b67bfe0d | 1392 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) |
15eac2a7 PS |
1393 | if (vport->port_no != OVSP_LOCAL) |
1394 | ovs_dp_detach_port(vport); | |
1395 | } | |
ccb1352e JG |
1396 | |
1397 | list_del(&dp->list_node); | |
15eac2a7 | 1398 | ovs_dp_detach_port(ovs_vport_rtnl(dp, OVSP_LOCAL)); |
ccb1352e JG |
1399 | |
1400 | /* rtnl_unlock() will wait until all the references to devices that | |
1401 | * are pending unregistration have been dropped. We do it here to | |
1402 | * ensure that any internal devices (which contain DP pointers) are | |
1403 | * fully destroyed before freeing the datapath. | |
1404 | */ | |
1405 | rtnl_unlock(); | |
1406 | ||
1407 | call_rcu(&dp->rcu, destroy_dp_rcu); | |
46df7b81 PS |
1408 | } |
1409 | ||
1410 | static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) | |
1411 | { | |
1412 | struct sk_buff *reply; | |
1413 | struct datapath *dp; | |
1414 | int err; | |
1415 | ||
1416 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | |
1417 | err = PTR_ERR(dp); | |
1418 | if (IS_ERR(dp)) | |
1419 | return err; | |
1420 | ||
15e47304 | 1421 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
46df7b81 PS |
1422 | info->snd_seq, OVS_DP_CMD_DEL); |
1423 | err = PTR_ERR(reply); | |
1424 | if (IS_ERR(reply)) | |
1425 | return err; | |
1426 | ||
1427 | __dp_destroy(dp); | |
ccb1352e | 1428 | |
15e47304 | 1429 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1430 | ovs_dp_datapath_multicast_group.id, info->nlhdr, |
1431 | GFP_KERNEL); | |
1432 | ||
1433 | return 0; | |
ccb1352e JG |
1434 | } |
1435 | ||
1436 | static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) | |
1437 | { | |
1438 | struct sk_buff *reply; | |
1439 | struct datapath *dp; | |
1440 | int err; | |
1441 | ||
46df7b81 | 1442 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
ccb1352e JG |
1443 | if (IS_ERR(dp)) |
1444 | return PTR_ERR(dp); | |
1445 | ||
15e47304 | 1446 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
ccb1352e JG |
1447 | info->snd_seq, OVS_DP_CMD_NEW); |
1448 | if (IS_ERR(reply)) { | |
1449 | err = PTR_ERR(reply); | |
46df7b81 | 1450 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, |
ccb1352e JG |
1451 | ovs_dp_datapath_multicast_group.id, err); |
1452 | return 0; | |
1453 | } | |
1454 | ||
15e47304 | 1455 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1456 | ovs_dp_datapath_multicast_group.id, info->nlhdr, |
1457 | GFP_KERNEL); | |
1458 | ||
1459 | return 0; | |
1460 | } | |
1461 | ||
1462 | static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) | |
1463 | { | |
1464 | struct sk_buff *reply; | |
1465 | struct datapath *dp; | |
1466 | ||
46df7b81 | 1467 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
ccb1352e JG |
1468 | if (IS_ERR(dp)) |
1469 | return PTR_ERR(dp); | |
1470 | ||
15e47304 | 1471 | reply = ovs_dp_cmd_build_info(dp, info->snd_portid, |
ccb1352e JG |
1472 | info->snd_seq, OVS_DP_CMD_NEW); |
1473 | if (IS_ERR(reply)) | |
1474 | return PTR_ERR(reply); | |
1475 | ||
1476 | return genlmsg_reply(reply, info); | |
1477 | } | |
1478 | ||
1479 | static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |
1480 | { | |
46df7b81 | 1481 | struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id); |
ccb1352e JG |
1482 | struct datapath *dp; |
1483 | int skip = cb->args[0]; | |
1484 | int i = 0; | |
1485 | ||
46df7b81 | 1486 | list_for_each_entry(dp, &ovs_net->dps, list_node) { |
77676fdb | 1487 | if (i >= skip && |
15e47304 | 1488 | ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, |
ccb1352e JG |
1489 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1490 | OVS_DP_CMD_NEW) < 0) | |
1491 | break; | |
1492 | i++; | |
1493 | } | |
1494 | ||
1495 | cb->args[0] = i; | |
1496 | ||
1497 | return skb->len; | |
1498 | } | |
1499 | ||
1500 | static struct genl_ops dp_datapath_genl_ops[] = { | |
1501 | { .cmd = OVS_DP_CMD_NEW, | |
1502 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1503 | .policy = datapath_policy, | |
1504 | .doit = ovs_dp_cmd_new | |
1505 | }, | |
1506 | { .cmd = OVS_DP_CMD_DEL, | |
1507 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1508 | .policy = datapath_policy, | |
1509 | .doit = ovs_dp_cmd_del | |
1510 | }, | |
1511 | { .cmd = OVS_DP_CMD_GET, | |
1512 | .flags = 0, /* OK for unprivileged users. */ | |
1513 | .policy = datapath_policy, | |
1514 | .doit = ovs_dp_cmd_get, | |
1515 | .dumpit = ovs_dp_cmd_dump | |
1516 | }, | |
1517 | { .cmd = OVS_DP_CMD_SET, | |
1518 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1519 | .policy = datapath_policy, | |
1520 | .doit = ovs_dp_cmd_set, | |
1521 | }, | |
1522 | }; | |
1523 | ||
1524 | static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { | |
1525 | [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, | |
1526 | [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, | |
1527 | [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 }, | |
1528 | [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 }, | |
1529 | [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 }, | |
1530 | [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED }, | |
1531 | }; | |
1532 | ||
1533 | static struct genl_family dp_vport_genl_family = { | |
1534 | .id = GENL_ID_GENERATE, | |
1535 | .hdrsize = sizeof(struct ovs_header), | |
1536 | .name = OVS_VPORT_FAMILY, | |
1537 | .version = OVS_VPORT_VERSION, | |
46df7b81 PS |
1538 | .maxattr = OVS_VPORT_ATTR_MAX, |
1539 | .netnsok = true | |
ccb1352e JG |
1540 | }; |
1541 | ||
1542 | struct genl_multicast_group ovs_dp_vport_multicast_group = { | |
1543 | .name = OVS_VPORT_MCGROUP | |
1544 | }; | |
1545 | ||
1546 | /* Called with RTNL lock or RCU read lock. */ | |
1547 | static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, | |
15e47304 | 1548 | u32 portid, u32 seq, u32 flags, u8 cmd) |
ccb1352e JG |
1549 | { |
1550 | struct ovs_header *ovs_header; | |
1551 | struct ovs_vport_stats vport_stats; | |
1552 | int err; | |
1553 | ||
15e47304 | 1554 | ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, |
ccb1352e JG |
1555 | flags, cmd); |
1556 | if (!ovs_header) | |
1557 | return -EMSGSIZE; | |
1558 | ||
1559 | ovs_header->dp_ifindex = get_dpifindex(vport->dp); | |
1560 | ||
028d6a67 DM |
1561 | if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) || |
1562 | nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) || | |
1563 | nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) || | |
15e47304 | 1564 | nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid)) |
028d6a67 | 1565 | goto nla_put_failure; |
ccb1352e JG |
1566 | |
1567 | ovs_vport_get_stats(vport, &vport_stats); | |
028d6a67 DM |
1568 | if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), |
1569 | &vport_stats)) | |
1570 | goto nla_put_failure; | |
ccb1352e JG |
1571 | |
1572 | err = ovs_vport_get_options(vport, skb); | |
1573 | if (err == -EMSGSIZE) | |
1574 | goto error; | |
1575 | ||
1576 | return genlmsg_end(skb, ovs_header); | |
1577 | ||
1578 | nla_put_failure: | |
1579 | err = -EMSGSIZE; | |
1580 | error: | |
1581 | genlmsg_cancel(skb, ovs_header); | |
1582 | return err; | |
1583 | } | |
1584 | ||
1585 | /* Called with RTNL lock or RCU read lock. */ | |
15e47304 | 1586 | struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, |
ccb1352e JG |
1587 | u32 seq, u8 cmd) |
1588 | { | |
1589 | struct sk_buff *skb; | |
1590 | int retval; | |
1591 | ||
1592 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | |
1593 | if (!skb) | |
1594 | return ERR_PTR(-ENOMEM); | |
1595 | ||
15e47304 | 1596 | retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); |
ccb1352e JG |
1597 | if (retval < 0) { |
1598 | kfree_skb(skb); | |
1599 | return ERR_PTR(retval); | |
1600 | } | |
1601 | return skb; | |
1602 | } | |
1603 | ||
1604 | /* Called with RTNL lock or RCU read lock. */ | |
46df7b81 PS |
1605 | static struct vport *lookup_vport(struct net *net, |
1606 | struct ovs_header *ovs_header, | |
ccb1352e JG |
1607 | struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) |
1608 | { | |
1609 | struct datapath *dp; | |
1610 | struct vport *vport; | |
1611 | ||
1612 | if (a[OVS_VPORT_ATTR_NAME]) { | |
46df7b81 | 1613 | vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME])); |
ccb1352e JG |
1614 | if (!vport) |
1615 | return ERR_PTR(-ENODEV); | |
651a68ea BP |
1616 | if (ovs_header->dp_ifindex && |
1617 | ovs_header->dp_ifindex != get_dpifindex(vport->dp)) | |
1618 | return ERR_PTR(-ENODEV); | |
ccb1352e JG |
1619 | return vport; |
1620 | } else if (a[OVS_VPORT_ATTR_PORT_NO]) { | |
1621 | u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); | |
1622 | ||
1623 | if (port_no >= DP_MAX_PORTS) | |
1624 | return ERR_PTR(-EFBIG); | |
1625 | ||
46df7b81 | 1626 | dp = get_dp(net, ovs_header->dp_ifindex); |
ccb1352e JG |
1627 | if (!dp) |
1628 | return ERR_PTR(-ENODEV); | |
1629 | ||
15eac2a7 | 1630 | vport = ovs_vport_rtnl_rcu(dp, port_no); |
ccb1352e | 1631 | if (!vport) |
14408dba | 1632 | return ERR_PTR(-ENODEV); |
ccb1352e JG |
1633 | return vport; |
1634 | } else | |
1635 | return ERR_PTR(-EINVAL); | |
1636 | } | |
1637 | ||
1638 | static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |
1639 | { | |
1640 | struct nlattr **a = info->attrs; | |
1641 | struct ovs_header *ovs_header = info->userhdr; | |
1642 | struct vport_parms parms; | |
1643 | struct sk_buff *reply; | |
1644 | struct vport *vport; | |
1645 | struct datapath *dp; | |
1646 | u32 port_no; | |
1647 | int err; | |
1648 | ||
1649 | err = -EINVAL; | |
1650 | if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] || | |
1651 | !a[OVS_VPORT_ATTR_UPCALL_PID]) | |
1652 | goto exit; | |
1653 | ||
1654 | rtnl_lock(); | |
46df7b81 | 1655 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
ccb1352e JG |
1656 | err = -ENODEV; |
1657 | if (!dp) | |
1658 | goto exit_unlock; | |
1659 | ||
1660 | if (a[OVS_VPORT_ATTR_PORT_NO]) { | |
1661 | port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); | |
1662 | ||
1663 | err = -EFBIG; | |
1664 | if (port_no >= DP_MAX_PORTS) | |
1665 | goto exit_unlock; | |
1666 | ||
15eac2a7 | 1667 | vport = ovs_vport_rtnl_rcu(dp, port_no); |
ccb1352e JG |
1668 | err = -EBUSY; |
1669 | if (vport) | |
1670 | goto exit_unlock; | |
1671 | } else { | |
1672 | for (port_no = 1; ; port_no++) { | |
1673 | if (port_no >= DP_MAX_PORTS) { | |
1674 | err = -EFBIG; | |
1675 | goto exit_unlock; | |
1676 | } | |
15eac2a7 | 1677 | vport = ovs_vport_rtnl(dp, port_no); |
ccb1352e JG |
1678 | if (!vport) |
1679 | break; | |
1680 | } | |
1681 | } | |
1682 | ||
1683 | parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]); | |
1684 | parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]); | |
1685 | parms.options = a[OVS_VPORT_ATTR_OPTIONS]; | |
1686 | parms.dp = dp; | |
1687 | parms.port_no = port_no; | |
15e47304 | 1688 | parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); |
ccb1352e JG |
1689 | |
1690 | vport = new_vport(&parms); | |
1691 | err = PTR_ERR(vport); | |
1692 | if (IS_ERR(vport)) | |
1693 | goto exit_unlock; | |
1694 | ||
cb7c5bdf | 1695 | err = 0; |
15e47304 | 1696 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
ccb1352e JG |
1697 | OVS_VPORT_CMD_NEW); |
1698 | if (IS_ERR(reply)) { | |
1699 | err = PTR_ERR(reply); | |
1700 | ovs_dp_detach_port(vport); | |
1701 | goto exit_unlock; | |
1702 | } | |
15e47304 | 1703 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1704 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); |
1705 | ||
1706 | exit_unlock: | |
1707 | rtnl_unlock(); | |
1708 | exit: | |
1709 | return err; | |
1710 | } | |
1711 | ||
1712 | static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |
1713 | { | |
1714 | struct nlattr **a = info->attrs; | |
1715 | struct sk_buff *reply; | |
1716 | struct vport *vport; | |
1717 | int err; | |
1718 | ||
1719 | rtnl_lock(); | |
46df7b81 | 1720 | vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); |
ccb1352e JG |
1721 | err = PTR_ERR(vport); |
1722 | if (IS_ERR(vport)) | |
1723 | goto exit_unlock; | |
1724 | ||
1725 | err = 0; | |
1726 | if (a[OVS_VPORT_ATTR_TYPE] && | |
1727 | nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) | |
1728 | err = -EINVAL; | |
1729 | ||
1730 | if (!err && a[OVS_VPORT_ATTR_OPTIONS]) | |
1731 | err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); | |
03fbf8b3 AA |
1732 | if (err) |
1733 | goto exit_unlock; | |
1734 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) | |
15e47304 | 1735 | vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); |
ccb1352e | 1736 | |
15e47304 | 1737 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
ccb1352e JG |
1738 | OVS_VPORT_CMD_NEW); |
1739 | if (IS_ERR(reply)) { | |
46df7b81 | 1740 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, |
4cb6e116 AA |
1741 | ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); |
1742 | goto exit_unlock; | |
ccb1352e JG |
1743 | } |
1744 | ||
15e47304 | 1745 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1746 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); |
1747 | ||
1748 | exit_unlock: | |
1749 | rtnl_unlock(); | |
1750 | return err; | |
1751 | } | |
1752 | ||
1753 | static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) | |
1754 | { | |
1755 | struct nlattr **a = info->attrs; | |
1756 | struct sk_buff *reply; | |
1757 | struct vport *vport; | |
1758 | int err; | |
1759 | ||
1760 | rtnl_lock(); | |
46df7b81 | 1761 | vport = lookup_vport(sock_net(skb->sk), info->userhdr, a); |
ccb1352e JG |
1762 | err = PTR_ERR(vport); |
1763 | if (IS_ERR(vport)) | |
1764 | goto exit_unlock; | |
1765 | ||
1766 | if (vport->port_no == OVSP_LOCAL) { | |
1767 | err = -EINVAL; | |
1768 | goto exit_unlock; | |
1769 | } | |
1770 | ||
15e47304 | 1771 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
ccb1352e JG |
1772 | OVS_VPORT_CMD_DEL); |
1773 | err = PTR_ERR(reply); | |
1774 | if (IS_ERR(reply)) | |
1775 | goto exit_unlock; | |
1776 | ||
734907e8 | 1777 | err = 0; |
ccb1352e JG |
1778 | ovs_dp_detach_port(vport); |
1779 | ||
15e47304 | 1780 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
ccb1352e JG |
1781 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); |
1782 | ||
1783 | exit_unlock: | |
1784 | rtnl_unlock(); | |
1785 | return err; | |
1786 | } | |
1787 | ||
1788 | static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info) | |
1789 | { | |
1790 | struct nlattr **a = info->attrs; | |
1791 | struct ovs_header *ovs_header = info->userhdr; | |
1792 | struct sk_buff *reply; | |
1793 | struct vport *vport; | |
1794 | int err; | |
1795 | ||
1796 | rcu_read_lock(); | |
46df7b81 | 1797 | vport = lookup_vport(sock_net(skb->sk), ovs_header, a); |
ccb1352e JG |
1798 | err = PTR_ERR(vport); |
1799 | if (IS_ERR(vport)) | |
1800 | goto exit_unlock; | |
1801 | ||
15e47304 | 1802 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
ccb1352e JG |
1803 | OVS_VPORT_CMD_NEW); |
1804 | err = PTR_ERR(reply); | |
1805 | if (IS_ERR(reply)) | |
1806 | goto exit_unlock; | |
1807 | ||
1808 | rcu_read_unlock(); | |
1809 | ||
1810 | return genlmsg_reply(reply, info); | |
1811 | ||
1812 | exit_unlock: | |
1813 | rcu_read_unlock(); | |
1814 | return err; | |
1815 | } | |
1816 | ||
1817 | static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |
1818 | { | |
1819 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); | |
1820 | struct datapath *dp; | |
15eac2a7 PS |
1821 | int bucket = cb->args[0], skip = cb->args[1]; |
1822 | int i, j = 0; | |
ccb1352e | 1823 | |
46df7b81 | 1824 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
ccb1352e JG |
1825 | if (!dp) |
1826 | return -ENODEV; | |
1827 | ||
1828 | rcu_read_lock(); | |
15eac2a7 | 1829 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
ccb1352e | 1830 | struct vport *vport; |
15eac2a7 PS |
1831 | |
1832 | j = 0; | |
b67bfe0d | 1833 | hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) { |
15eac2a7 PS |
1834 | if (j >= skip && |
1835 | ovs_vport_cmd_fill_info(vport, skb, | |
15e47304 | 1836 | NETLINK_CB(cb->skb).portid, |
15eac2a7 PS |
1837 | cb->nlh->nlmsg_seq, |
1838 | NLM_F_MULTI, | |
1839 | OVS_VPORT_CMD_NEW) < 0) | |
1840 | goto out; | |
1841 | ||
1842 | j++; | |
1843 | } | |
1844 | skip = 0; | |
ccb1352e | 1845 | } |
15eac2a7 | 1846 | out: |
ccb1352e JG |
1847 | rcu_read_unlock(); |
1848 | ||
15eac2a7 PS |
1849 | cb->args[0] = i; |
1850 | cb->args[1] = j; | |
ccb1352e | 1851 | |
15eac2a7 | 1852 | return skb->len; |
ccb1352e JG |
1853 | } |
1854 | ||
ccb1352e JG |
1855 | static struct genl_ops dp_vport_genl_ops[] = { |
1856 | { .cmd = OVS_VPORT_CMD_NEW, | |
1857 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1858 | .policy = vport_policy, | |
1859 | .doit = ovs_vport_cmd_new | |
1860 | }, | |
1861 | { .cmd = OVS_VPORT_CMD_DEL, | |
1862 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1863 | .policy = vport_policy, | |
1864 | .doit = ovs_vport_cmd_del | |
1865 | }, | |
1866 | { .cmd = OVS_VPORT_CMD_GET, | |
1867 | .flags = 0, /* OK for unprivileged users. */ | |
1868 | .policy = vport_policy, | |
1869 | .doit = ovs_vport_cmd_get, | |
1870 | .dumpit = ovs_vport_cmd_dump | |
1871 | }, | |
1872 | { .cmd = OVS_VPORT_CMD_SET, | |
1873 | .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */ | |
1874 | .policy = vport_policy, | |
1875 | .doit = ovs_vport_cmd_set, | |
1876 | }, | |
1877 | }; | |
1878 | ||
1879 | struct genl_family_and_ops { | |
1880 | struct genl_family *family; | |
1881 | struct genl_ops *ops; | |
1882 | int n_ops; | |
1883 | struct genl_multicast_group *group; | |
1884 | }; | |
1885 | ||
1886 | static const struct genl_family_and_ops dp_genl_families[] = { | |
1887 | { &dp_datapath_genl_family, | |
1888 | dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops), | |
1889 | &ovs_dp_datapath_multicast_group }, | |
1890 | { &dp_vport_genl_family, | |
1891 | dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops), | |
1892 | &ovs_dp_vport_multicast_group }, | |
1893 | { &dp_flow_genl_family, | |
1894 | dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops), | |
1895 | &ovs_dp_flow_multicast_group }, | |
1896 | { &dp_packet_genl_family, | |
1897 | dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops), | |
1898 | NULL }, | |
1899 | }; | |
1900 | ||
1901 | static void dp_unregister_genl(int n_families) | |
1902 | { | |
1903 | int i; | |
1904 | ||
1905 | for (i = 0; i < n_families; i++) | |
1906 | genl_unregister_family(dp_genl_families[i].family); | |
1907 | } | |
1908 | ||
1909 | static int dp_register_genl(void) | |
1910 | { | |
1911 | int n_registered; | |
1912 | int err; | |
1913 | int i; | |
1914 | ||
1915 | n_registered = 0; | |
1916 | for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) { | |
1917 | const struct genl_family_and_ops *f = &dp_genl_families[i]; | |
1918 | ||
1919 | err = genl_register_family_with_ops(f->family, f->ops, | |
1920 | f->n_ops); | |
1921 | if (err) | |
1922 | goto error; | |
1923 | n_registered++; | |
1924 | ||
1925 | if (f->group) { | |
1926 | err = genl_register_mc_group(f->family, f->group); | |
1927 | if (err) | |
1928 | goto error; | |
1929 | } | |
1930 | } | |
1931 | ||
1932 | return 0; | |
1933 | ||
1934 | error: | |
1935 | dp_unregister_genl(n_registered); | |
1936 | return err; | |
1937 | } | |
1938 | ||
46df7b81 PS |
1939 | static void rehash_flow_table(struct work_struct *work) |
1940 | { | |
1941 | struct datapath *dp; | |
1942 | struct net *net; | |
1943 | ||
1944 | genl_lock(); | |
1945 | rtnl_lock(); | |
1946 | for_each_net(net) { | |
1947 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | |
1948 | ||
1949 | list_for_each_entry(dp, &ovs_net->dps, list_node) { | |
1950 | struct flow_table *old_table = genl_dereference(dp->table); | |
1951 | struct flow_table *new_table; | |
1952 | ||
1953 | new_table = ovs_flow_tbl_rehash(old_table); | |
1954 | if (!IS_ERR(new_table)) { | |
1955 | rcu_assign_pointer(dp->table, new_table); | |
1956 | ovs_flow_tbl_deferred_destroy(old_table); | |
1957 | } | |
1958 | } | |
1959 | } | |
1960 | rtnl_unlock(); | |
1961 | genl_unlock(); | |
1962 | ||
1963 | schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); | |
1964 | } | |
1965 | ||
1966 | static int __net_init ovs_init_net(struct net *net) | |
1967 | { | |
1968 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | |
1969 | ||
1970 | INIT_LIST_HEAD(&ovs_net->dps); | |
1971 | return 0; | |
1972 | } | |
1973 | ||
1974 | static void __net_exit ovs_exit_net(struct net *net) | |
1975 | { | |
1976 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | |
1977 | struct datapath *dp, *dp_next; | |
1978 | ||
1979 | genl_lock(); | |
1980 | list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) | |
1981 | __dp_destroy(dp); | |
1982 | genl_unlock(); | |
1983 | } | |
1984 | ||
1985 | static struct pernet_operations ovs_net_ops = { | |
1986 | .init = ovs_init_net, | |
1987 | .exit = ovs_exit_net, | |
1988 | .id = &ovs_net_id, | |
1989 | .size = sizeof(struct ovs_net), | |
1990 | }; | |
1991 | ||
ccb1352e JG |
1992 | static int __init dp_init(void) |
1993 | { | |
ccb1352e JG |
1994 | int err; |
1995 | ||
3523b29b | 1996 | BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); |
ccb1352e JG |
1997 | |
1998 | pr_info("Open vSwitch switching datapath\n"); | |
1999 | ||
2000 | err = ovs_flow_init(); | |
2001 | if (err) | |
2002 | goto error; | |
2003 | ||
2004 | err = ovs_vport_init(); | |
2005 | if (err) | |
2006 | goto error_flow_exit; | |
2007 | ||
46df7b81 | 2008 | err = register_pernet_device(&ovs_net_ops); |
ccb1352e JG |
2009 | if (err) |
2010 | goto error_vport_exit; | |
2011 | ||
46df7b81 PS |
2012 | err = register_netdevice_notifier(&ovs_dp_device_notifier); |
2013 | if (err) | |
2014 | goto error_netns_exit; | |
2015 | ||
ccb1352e JG |
2016 | err = dp_register_genl(); |
2017 | if (err < 0) | |
2018 | goto error_unreg_notifier; | |
2019 | ||
2020 | schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); | |
2021 | ||
2022 | return 0; | |
2023 | ||
2024 | error_unreg_notifier: | |
2025 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | |
46df7b81 PS |
2026 | error_netns_exit: |
2027 | unregister_pernet_device(&ovs_net_ops); | |
ccb1352e JG |
2028 | error_vport_exit: |
2029 | ovs_vport_exit(); | |
2030 | error_flow_exit: | |
2031 | ovs_flow_exit(); | |
2032 | error: | |
2033 | return err; | |
2034 | } | |
2035 | ||
2036 | static void dp_cleanup(void) | |
2037 | { | |
2038 | cancel_delayed_work_sync(&rehash_flow_wq); | |
ccb1352e JG |
2039 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); |
2040 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | |
46df7b81 PS |
2041 | unregister_pernet_device(&ovs_net_ops); |
2042 | rcu_barrier(); | |
ccb1352e JG |
2043 | ovs_vport_exit(); |
2044 | ovs_flow_exit(); | |
2045 | } | |
2046 | ||
2047 | module_init(dp_init); | |
2048 | module_exit(dp_cleanup); | |
2049 | ||
2050 | MODULE_DESCRIPTION("Open vSwitch switching datapath"); | |
2051 | MODULE_LICENSE("GPL"); |