]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/openvswitch/datapath.c
Merge tag 'usb-4.7-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[mirror_ubuntu-bionic-kernel.git] / net / openvswitch / datapath.c
1 /*
2 * Copyright (c) 2007-2014 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60
61 int ovs_net_id __read_mostly;
62 EXPORT_SYMBOL_GPL(ovs_net_id);
63
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67
68 static const struct nla_policy flow_policy[];
69
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71 .name = OVS_FLOW_MCGROUP,
72 };
73
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75 .name = OVS_DATAPATH_MCGROUP,
76 };
77
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79 .name = OVS_VPORT_MCGROUP,
80 };
81
82 /* Check if need to build a reply message.
83 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85 unsigned int group)
86 {
87 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88 genl_has_listeners(family, genl_info_net(info), group);
89 }
90
91 static void ovs_notify(struct genl_family *family,
92 struct sk_buff *skb, struct genl_info *info)
93 {
94 genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96
97 /**
98 * DOC: Locking:
99 *
100 * All writes e.g. Writes to device state (add/remove datapath, port, set
101 * operations on vports, etc.), Writes to other state (flow table
102 * modifications, set miscellaneous datapath parameters, etc.) are protected
103 * by ovs_lock.
104 *
105 * Reads are protected by RCU.
106 *
107 * There are a few special cases (mostly stats) that have their own
108 * synchronization but they nest under all of above and don't interact with
109 * each other.
110 *
111 * The RTNL lock nests inside ovs_mutex.
112 */
113
114 static DEFINE_MUTEX(ovs_mutex);
115
116 void ovs_lock(void)
117 {
118 mutex_lock(&ovs_mutex);
119 }
120
121 void ovs_unlock(void)
122 {
123 mutex_unlock(&ovs_mutex);
124 }
125
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129 if (debug_locks)
130 return lockdep_is_held(&ovs_mutex);
131 else
132 return 1;
133 }
134 EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
135 #endif
136
137 static struct vport *new_vport(const struct vport_parms *);
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139 const struct sw_flow_key *,
140 const struct dp_upcall_info *);
141 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142 const struct sw_flow_key *,
143 const struct dp_upcall_info *);
144
145 /* Must be called with rcu_read_lock. */
146 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
147 {
148 struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
149
150 if (dev) {
151 struct vport *vport = ovs_internal_dev_get_vport(dev);
152 if (vport)
153 return vport->dp;
154 }
155
156 return NULL;
157 }
158
159 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
160 * returned dp pointer valid.
161 */
162 static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
163 {
164 struct datapath *dp;
165
166 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
167 rcu_read_lock();
168 dp = get_dp_rcu(net, dp_ifindex);
169 rcu_read_unlock();
170
171 return dp;
172 }
173
174 /* Must be called with rcu_read_lock or ovs_mutex. */
175 const char *ovs_dp_name(const struct datapath *dp)
176 {
177 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
178 return ovs_vport_name(vport);
179 }
180
181 static int get_dpifindex(const struct datapath *dp)
182 {
183 struct vport *local;
184 int ifindex;
185
186 rcu_read_lock();
187
188 local = ovs_vport_rcu(dp, OVSP_LOCAL);
189 if (local)
190 ifindex = local->dev->ifindex;
191 else
192 ifindex = 0;
193
194 rcu_read_unlock();
195
196 return ifindex;
197 }
198
199 static void destroy_dp_rcu(struct rcu_head *rcu)
200 {
201 struct datapath *dp = container_of(rcu, struct datapath, rcu);
202
203 ovs_flow_tbl_destroy(&dp->table);
204 free_percpu(dp->stats_percpu);
205 kfree(dp->ports);
206 kfree(dp);
207 }
208
209 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
210 u16 port_no)
211 {
212 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
213 }
214
215 /* Called with ovs_mutex or RCU read lock. */
216 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
217 {
218 struct vport *vport;
219 struct hlist_head *head;
220
221 head = vport_hash_bucket(dp, port_no);
222 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
223 if (vport->port_no == port_no)
224 return vport;
225 }
226 return NULL;
227 }
228
229 /* Called with ovs_mutex. */
230 static struct vport *new_vport(const struct vport_parms *parms)
231 {
232 struct vport *vport;
233
234 vport = ovs_vport_add(parms);
235 if (!IS_ERR(vport)) {
236 struct datapath *dp = parms->dp;
237 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
238
239 hlist_add_head_rcu(&vport->dp_hash_node, head);
240 }
241 return vport;
242 }
243
244 void ovs_dp_detach_port(struct vport *p)
245 {
246 ASSERT_OVSL();
247
248 /* First drop references to device. */
249 hlist_del_rcu(&p->dp_hash_node);
250
251 /* Then destroy it. */
252 ovs_vport_del(p);
253 }
254
255 /* Must be called with rcu_read_lock. */
256 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
257 {
258 const struct vport *p = OVS_CB(skb)->input_vport;
259 struct datapath *dp = p->dp;
260 struct sw_flow *flow;
261 struct sw_flow_actions *sf_acts;
262 struct dp_stats_percpu *stats;
263 u64 *stats_counter;
264 u32 n_mask_hit;
265
266 stats = this_cpu_ptr(dp->stats_percpu);
267
268 /* Look up flow. */
269 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
270 if (unlikely(!flow)) {
271 struct dp_upcall_info upcall;
272 int error;
273
274 memset(&upcall, 0, sizeof(upcall));
275 upcall.cmd = OVS_PACKET_CMD_MISS;
276 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277 upcall.mru = OVS_CB(skb)->mru;
278 error = ovs_dp_upcall(dp, skb, key, &upcall);
279 if (unlikely(error))
280 kfree_skb(skb);
281 else
282 consume_skb(skb);
283 stats_counter = &stats->n_missed;
284 goto out;
285 }
286
287 ovs_flow_stats_update(flow, key->tp.flags, skb);
288 sf_acts = rcu_dereference(flow->sf_acts);
289 ovs_execute_actions(dp, skb, sf_acts, key);
290
291 stats_counter = &stats->n_hit;
292
293 out:
294 /* Update datapath statistics. */
295 u64_stats_update_begin(&stats->syncp);
296 (*stats_counter)++;
297 stats->n_mask_hit += n_mask_hit;
298 u64_stats_update_end(&stats->syncp);
299 }
300
301 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
302 const struct sw_flow_key *key,
303 const struct dp_upcall_info *upcall_info)
304 {
305 struct dp_stats_percpu *stats;
306 int err;
307
308 if (upcall_info->portid == 0) {
309 err = -ENOTCONN;
310 goto err;
311 }
312
313 if (!skb_is_gso(skb))
314 err = queue_userspace_packet(dp, skb, key, upcall_info);
315 else
316 err = queue_gso_packets(dp, skb, key, upcall_info);
317 if (err)
318 goto err;
319
320 return 0;
321
322 err:
323 stats = this_cpu_ptr(dp->stats_percpu);
324
325 u64_stats_update_begin(&stats->syncp);
326 stats->n_lost++;
327 u64_stats_update_end(&stats->syncp);
328
329 return err;
330 }
331
332 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
333 const struct sw_flow_key *key,
334 const struct dp_upcall_info *upcall_info)
335 {
336 unsigned short gso_type = skb_shinfo(skb)->gso_type;
337 struct sw_flow_key later_key;
338 struct sk_buff *segs, *nskb;
339 int err;
340
341 BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
342 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
343 if (IS_ERR(segs))
344 return PTR_ERR(segs);
345 if (segs == NULL)
346 return -EINVAL;
347
348 if (gso_type & SKB_GSO_UDP) {
349 /* The initial flow key extracted by ovs_flow_key_extract()
350 * in this case is for a first fragment, so we need to
351 * properly mark later fragments.
352 */
353 later_key = *key;
354 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
355 }
356
357 /* Queue all of the segments. */
358 skb = segs;
359 do {
360 if (gso_type & SKB_GSO_UDP && skb != segs)
361 key = &later_key;
362
363 err = queue_userspace_packet(dp, skb, key, upcall_info);
364 if (err)
365 break;
366
367 } while ((skb = skb->next));
368
369 /* Free all of the segments. */
370 skb = segs;
371 do {
372 nskb = skb->next;
373 if (err)
374 kfree_skb(skb);
375 else
376 consume_skb(skb);
377 } while ((skb = nskb));
378 return err;
379 }
380
381 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
382 unsigned int hdrlen)
383 {
384 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
385 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
386 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
387
388 /* OVS_PACKET_ATTR_USERDATA */
389 if (upcall_info->userdata)
390 size += NLA_ALIGN(upcall_info->userdata->nla_len);
391
392 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
393 if (upcall_info->egress_tun_info)
394 size += nla_total_size(ovs_tun_key_attr_size());
395
396 /* OVS_PACKET_ATTR_ACTIONS */
397 if (upcall_info->actions_len)
398 size += nla_total_size(upcall_info->actions_len);
399
400 /* OVS_PACKET_ATTR_MRU */
401 if (upcall_info->mru)
402 size += nla_total_size(sizeof(upcall_info->mru));
403
404 return size;
405 }
406
407 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
408 {
409 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
410 size_t plen = NLA_ALIGN(skb->len) - skb->len;
411
412 if (plen > 0)
413 memset(skb_put(skb, plen), 0, plen);
414 }
415 }
416
417 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
418 const struct sw_flow_key *key,
419 const struct dp_upcall_info *upcall_info)
420 {
421 struct ovs_header *upcall;
422 struct sk_buff *nskb = NULL;
423 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
424 struct nlattr *nla;
425 size_t len;
426 unsigned int hlen;
427 int err, dp_ifindex;
428
429 dp_ifindex = get_dpifindex(dp);
430 if (!dp_ifindex)
431 return -ENODEV;
432
433 if (skb_vlan_tag_present(skb)) {
434 nskb = skb_clone(skb, GFP_ATOMIC);
435 if (!nskb)
436 return -ENOMEM;
437
438 nskb = __vlan_hwaccel_push_inside(nskb);
439 if (!nskb)
440 return -ENOMEM;
441
442 skb = nskb;
443 }
444
445 if (nla_attr_size(skb->len) > USHRT_MAX) {
446 err = -EFBIG;
447 goto out;
448 }
449
450 /* Complete checksum if needed */
451 if (skb->ip_summed == CHECKSUM_PARTIAL &&
452 (err = skb_checksum_help(skb)))
453 goto out;
454
455 /* Older versions of OVS user space enforce alignment of the last
456 * Netlink attribute to NLA_ALIGNTO which would require extensive
457 * padding logic. Only perform zerocopy if padding is not required.
458 */
459 if (dp->user_features & OVS_DP_F_UNALIGNED)
460 hlen = skb_zerocopy_headlen(skb);
461 else
462 hlen = skb->len;
463
464 len = upcall_msg_size(upcall_info, hlen);
465 user_skb = genlmsg_new(len, GFP_ATOMIC);
466 if (!user_skb) {
467 err = -ENOMEM;
468 goto out;
469 }
470
471 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
472 0, upcall_info->cmd);
473 upcall->dp_ifindex = dp_ifindex;
474
475 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
476 BUG_ON(err);
477
478 if (upcall_info->userdata)
479 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
480 nla_len(upcall_info->userdata),
481 nla_data(upcall_info->userdata));
482
483 if (upcall_info->egress_tun_info) {
484 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
485 err = ovs_nla_put_tunnel_info(user_skb,
486 upcall_info->egress_tun_info);
487 BUG_ON(err);
488 nla_nest_end(user_skb, nla);
489 }
490
491 if (upcall_info->actions_len) {
492 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
493 err = ovs_nla_put_actions(upcall_info->actions,
494 upcall_info->actions_len,
495 user_skb);
496 if (!err)
497 nla_nest_end(user_skb, nla);
498 else
499 nla_nest_cancel(user_skb, nla);
500 }
501
502 /* Add OVS_PACKET_ATTR_MRU */
503 if (upcall_info->mru) {
504 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
505 upcall_info->mru)) {
506 err = -ENOBUFS;
507 goto out;
508 }
509 pad_packet(dp, user_skb);
510 }
511
512 /* Only reserve room for attribute header, packet data is added
513 * in skb_zerocopy() */
514 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
515 err = -ENOBUFS;
516 goto out;
517 }
518 nla->nla_len = nla_attr_size(skb->len);
519
520 err = skb_zerocopy(user_skb, skb, skb->len, hlen);
521 if (err)
522 goto out;
523
524 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
525 pad_packet(dp, user_skb);
526
527 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
528
529 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
530 user_skb = NULL;
531 out:
532 if (err)
533 skb_tx_error(skb);
534 kfree_skb(user_skb);
535 kfree_skb(nskb);
536 return err;
537 }
538
539 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
540 {
541 struct ovs_header *ovs_header = info->userhdr;
542 struct net *net = sock_net(skb->sk);
543 struct nlattr **a = info->attrs;
544 struct sw_flow_actions *acts;
545 struct sk_buff *packet;
546 struct sw_flow *flow;
547 struct sw_flow_actions *sf_acts;
548 struct datapath *dp;
549 struct ethhdr *eth;
550 struct vport *input_vport;
551 u16 mru = 0;
552 int len;
553 int err;
554 bool log = !a[OVS_PACKET_ATTR_PROBE];
555
556 err = -EINVAL;
557 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
558 !a[OVS_PACKET_ATTR_ACTIONS])
559 goto err;
560
561 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
562 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
563 err = -ENOMEM;
564 if (!packet)
565 goto err;
566 skb_reserve(packet, NET_IP_ALIGN);
567
568 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
569
570 skb_reset_mac_header(packet);
571 eth = eth_hdr(packet);
572
573 /* Normally, setting the skb 'protocol' field would be handled by a
574 * call to eth_type_trans(), but it assumes there's a sending
575 * device, which we may not have. */
576 if (eth_proto_is_802_3(eth->h_proto))
577 packet->protocol = eth->h_proto;
578 else
579 packet->protocol = htons(ETH_P_802_2);
580
581 /* Set packet's mru */
582 if (a[OVS_PACKET_ATTR_MRU]) {
583 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
584 packet->ignore_df = 1;
585 }
586 OVS_CB(packet)->mru = mru;
587
588 /* Build an sw_flow for sending this packet. */
589 flow = ovs_flow_alloc();
590 err = PTR_ERR(flow);
591 if (IS_ERR(flow))
592 goto err_kfree_skb;
593
594 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
595 packet, &flow->key, log);
596 if (err)
597 goto err_flow_free;
598
599 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
600 &flow->key, &acts, log);
601 if (err)
602 goto err_flow_free;
603
604 rcu_assign_pointer(flow->sf_acts, acts);
605 packet->priority = flow->key.phy.priority;
606 packet->mark = flow->key.phy.skb_mark;
607
608 rcu_read_lock();
609 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
610 err = -ENODEV;
611 if (!dp)
612 goto err_unlock;
613
614 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
615 if (!input_vport)
616 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
617
618 if (!input_vport)
619 goto err_unlock;
620
621 packet->dev = input_vport->dev;
622 OVS_CB(packet)->input_vport = input_vport;
623 sf_acts = rcu_dereference(flow->sf_acts);
624
625 local_bh_disable();
626 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
627 local_bh_enable();
628 rcu_read_unlock();
629
630 ovs_flow_free(flow, false);
631 return err;
632
633 err_unlock:
634 rcu_read_unlock();
635 err_flow_free:
636 ovs_flow_free(flow, false);
637 err_kfree_skb:
638 kfree_skb(packet);
639 err:
640 return err;
641 }
642
643 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
644 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
645 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
646 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
647 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
648 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
649 };
650
651 static const struct genl_ops dp_packet_genl_ops[] = {
652 { .cmd = OVS_PACKET_CMD_EXECUTE,
653 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
654 .policy = packet_policy,
655 .doit = ovs_packet_cmd_execute
656 }
657 };
658
659 static struct genl_family dp_packet_genl_family = {
660 .id = GENL_ID_GENERATE,
661 .hdrsize = sizeof(struct ovs_header),
662 .name = OVS_PACKET_FAMILY,
663 .version = OVS_PACKET_VERSION,
664 .maxattr = OVS_PACKET_ATTR_MAX,
665 .netnsok = true,
666 .parallel_ops = true,
667 .ops = dp_packet_genl_ops,
668 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
669 };
670
671 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
672 struct ovs_dp_megaflow_stats *mega_stats)
673 {
674 int i;
675
676 memset(mega_stats, 0, sizeof(*mega_stats));
677
678 stats->n_flows = ovs_flow_tbl_count(&dp->table);
679 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
680
681 stats->n_hit = stats->n_missed = stats->n_lost = 0;
682
683 for_each_possible_cpu(i) {
684 const struct dp_stats_percpu *percpu_stats;
685 struct dp_stats_percpu local_stats;
686 unsigned int start;
687
688 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
689
690 do {
691 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
692 local_stats = *percpu_stats;
693 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
694
695 stats->n_hit += local_stats.n_hit;
696 stats->n_missed += local_stats.n_missed;
697 stats->n_lost += local_stats.n_lost;
698 mega_stats->n_mask_hit += local_stats.n_mask_hit;
699 }
700 }
701
702 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
703 {
704 return ovs_identifier_is_ufid(sfid) &&
705 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
706 }
707
708 static bool should_fill_mask(uint32_t ufid_flags)
709 {
710 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
711 }
712
713 static bool should_fill_actions(uint32_t ufid_flags)
714 {
715 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
716 }
717
718 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
719 const struct sw_flow_id *sfid,
720 uint32_t ufid_flags)
721 {
722 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
723
724 /* OVS_FLOW_ATTR_UFID */
725 if (sfid && ovs_identifier_is_ufid(sfid))
726 len += nla_total_size(sfid->ufid_len);
727
728 /* OVS_FLOW_ATTR_KEY */
729 if (!sfid || should_fill_key(sfid, ufid_flags))
730 len += nla_total_size(ovs_key_attr_size());
731
732 /* OVS_FLOW_ATTR_MASK */
733 if (should_fill_mask(ufid_flags))
734 len += nla_total_size(ovs_key_attr_size());
735
736 /* OVS_FLOW_ATTR_ACTIONS */
737 if (should_fill_actions(ufid_flags))
738 len += nla_total_size(acts->orig_len);
739
740 return len
741 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
742 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
743 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
744 }
745
746 /* Called with ovs_mutex or RCU read lock. */
747 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
748 struct sk_buff *skb)
749 {
750 struct ovs_flow_stats stats;
751 __be16 tcp_flags;
752 unsigned long used;
753
754 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
755
756 if (used &&
757 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
758 OVS_FLOW_ATTR_PAD))
759 return -EMSGSIZE;
760
761 if (stats.n_packets &&
762 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
763 sizeof(struct ovs_flow_stats), &stats,
764 OVS_FLOW_ATTR_PAD))
765 return -EMSGSIZE;
766
767 if ((u8)ntohs(tcp_flags) &&
768 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
769 return -EMSGSIZE;
770
771 return 0;
772 }
773
774 /* Called with ovs_mutex or RCU read lock. */
775 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
776 struct sk_buff *skb, int skb_orig_len)
777 {
778 struct nlattr *start;
779 int err;
780
781 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
782 * this is the first flow to be dumped into 'skb'. This is unusual for
783 * Netlink but individual action lists can be longer than
784 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
785 * The userspace caller can always fetch the actions separately if it
786 * really wants them. (Most userspace callers in fact don't care.)
787 *
788 * This can only fail for dump operations because the skb is always
789 * properly sized for single flows.
790 */
791 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
792 if (start) {
793 const struct sw_flow_actions *sf_acts;
794
795 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
796 err = ovs_nla_put_actions(sf_acts->actions,
797 sf_acts->actions_len, skb);
798
799 if (!err)
800 nla_nest_end(skb, start);
801 else {
802 if (skb_orig_len)
803 return err;
804
805 nla_nest_cancel(skb, start);
806 }
807 } else if (skb_orig_len) {
808 return -EMSGSIZE;
809 }
810
811 return 0;
812 }
813
814 /* Called with ovs_mutex or RCU read lock. */
815 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
816 struct sk_buff *skb, u32 portid,
817 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
818 {
819 const int skb_orig_len = skb->len;
820 struct ovs_header *ovs_header;
821 int err;
822
823 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
824 flags, cmd);
825 if (!ovs_header)
826 return -EMSGSIZE;
827
828 ovs_header->dp_ifindex = dp_ifindex;
829
830 err = ovs_nla_put_identifier(flow, skb);
831 if (err)
832 goto error;
833
834 if (should_fill_key(&flow->id, ufid_flags)) {
835 err = ovs_nla_put_masked_key(flow, skb);
836 if (err)
837 goto error;
838 }
839
840 if (should_fill_mask(ufid_flags)) {
841 err = ovs_nla_put_mask(flow, skb);
842 if (err)
843 goto error;
844 }
845
846 err = ovs_flow_cmd_fill_stats(flow, skb);
847 if (err)
848 goto error;
849
850 if (should_fill_actions(ufid_flags)) {
851 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
852 if (err)
853 goto error;
854 }
855
856 genlmsg_end(skb, ovs_header);
857 return 0;
858
859 error:
860 genlmsg_cancel(skb, ovs_header);
861 return err;
862 }
863
864 /* May not be called with RCU read lock. */
865 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
866 const struct sw_flow_id *sfid,
867 struct genl_info *info,
868 bool always,
869 uint32_t ufid_flags)
870 {
871 struct sk_buff *skb;
872 size_t len;
873
874 if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
875 return NULL;
876
877 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
878 skb = genlmsg_new(len, GFP_KERNEL);
879 if (!skb)
880 return ERR_PTR(-ENOMEM);
881
882 return skb;
883 }
884
885 /* Called with ovs_mutex. */
886 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
887 int dp_ifindex,
888 struct genl_info *info, u8 cmd,
889 bool always, u32 ufid_flags)
890 {
891 struct sk_buff *skb;
892 int retval;
893
894 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
895 &flow->id, info, always, ufid_flags);
896 if (IS_ERR_OR_NULL(skb))
897 return skb;
898
899 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
900 info->snd_portid, info->snd_seq, 0,
901 cmd, ufid_flags);
902 BUG_ON(retval < 0);
903 return skb;
904 }
905
906 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
907 {
908 struct net *net = sock_net(skb->sk);
909 struct nlattr **a = info->attrs;
910 struct ovs_header *ovs_header = info->userhdr;
911 struct sw_flow *flow = NULL, *new_flow;
912 struct sw_flow_mask mask;
913 struct sk_buff *reply;
914 struct datapath *dp;
915 struct sw_flow_key key;
916 struct sw_flow_actions *acts;
917 struct sw_flow_match match;
918 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
919 int error;
920 bool log = !a[OVS_FLOW_ATTR_PROBE];
921
922 /* Must have key and actions. */
923 error = -EINVAL;
924 if (!a[OVS_FLOW_ATTR_KEY]) {
925 OVS_NLERR(log, "Flow key attr not present in new flow.");
926 goto error;
927 }
928 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
929 OVS_NLERR(log, "Flow actions attr not present in new flow.");
930 goto error;
931 }
932
933 /* Most of the time we need to allocate a new flow, do it before
934 * locking.
935 */
936 new_flow = ovs_flow_alloc();
937 if (IS_ERR(new_flow)) {
938 error = PTR_ERR(new_flow);
939 goto error;
940 }
941
942 /* Extract key. */
943 ovs_match_init(&match, &key, &mask);
944 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
945 a[OVS_FLOW_ATTR_MASK], log);
946 if (error)
947 goto err_kfree_flow;
948
949 ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
950
951 /* Extract flow identifier. */
952 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
953 &key, log);
954 if (error)
955 goto err_kfree_flow;
956
957 /* Validate actions. */
958 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
959 &new_flow->key, &acts, log);
960 if (error) {
961 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
962 goto err_kfree_flow;
963 }
964
965 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
966 ufid_flags);
967 if (IS_ERR(reply)) {
968 error = PTR_ERR(reply);
969 goto err_kfree_acts;
970 }
971
972 ovs_lock();
973 dp = get_dp(net, ovs_header->dp_ifindex);
974 if (unlikely(!dp)) {
975 error = -ENODEV;
976 goto err_unlock_ovs;
977 }
978
979 /* Check if this is a duplicate flow */
980 if (ovs_identifier_is_ufid(&new_flow->id))
981 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
982 if (!flow)
983 flow = ovs_flow_tbl_lookup(&dp->table, &key);
984 if (likely(!flow)) {
985 rcu_assign_pointer(new_flow->sf_acts, acts);
986
987 /* Put flow in bucket. */
988 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
989 if (unlikely(error)) {
990 acts = NULL;
991 goto err_unlock_ovs;
992 }
993
994 if (unlikely(reply)) {
995 error = ovs_flow_cmd_fill_info(new_flow,
996 ovs_header->dp_ifindex,
997 reply, info->snd_portid,
998 info->snd_seq, 0,
999 OVS_FLOW_CMD_NEW,
1000 ufid_flags);
1001 BUG_ON(error < 0);
1002 }
1003 ovs_unlock();
1004 } else {
1005 struct sw_flow_actions *old_acts;
1006
1007 /* Bail out if we're not allowed to modify an existing flow.
1008 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1009 * because Generic Netlink treats the latter as a dump
1010 * request. We also accept NLM_F_EXCL in case that bug ever
1011 * gets fixed.
1012 */
1013 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1014 | NLM_F_EXCL))) {
1015 error = -EEXIST;
1016 goto err_unlock_ovs;
1017 }
1018 /* The flow identifier has to be the same for flow updates.
1019 * Look for any overlapping flow.
1020 */
1021 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1022 if (ovs_identifier_is_key(&flow->id))
1023 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1024 &match);
1025 else /* UFID matches but key is different */
1026 flow = NULL;
1027 if (!flow) {
1028 error = -ENOENT;
1029 goto err_unlock_ovs;
1030 }
1031 }
1032 /* Update actions. */
1033 old_acts = ovsl_dereference(flow->sf_acts);
1034 rcu_assign_pointer(flow->sf_acts, acts);
1035
1036 if (unlikely(reply)) {
1037 error = ovs_flow_cmd_fill_info(flow,
1038 ovs_header->dp_ifindex,
1039 reply, info->snd_portid,
1040 info->snd_seq, 0,
1041 OVS_FLOW_CMD_NEW,
1042 ufid_flags);
1043 BUG_ON(error < 0);
1044 }
1045 ovs_unlock();
1046
1047 ovs_nla_free_flow_actions_rcu(old_acts);
1048 ovs_flow_free(new_flow, false);
1049 }
1050
1051 if (reply)
1052 ovs_notify(&dp_flow_genl_family, reply, info);
1053 return 0;
1054
1055 err_unlock_ovs:
1056 ovs_unlock();
1057 kfree_skb(reply);
1058 err_kfree_acts:
1059 ovs_nla_free_flow_actions(acts);
1060 err_kfree_flow:
1061 ovs_flow_free(new_flow, false);
1062 error:
1063 return error;
1064 }
1065
1066 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1067 static struct sw_flow_actions *get_flow_actions(struct net *net,
1068 const struct nlattr *a,
1069 const struct sw_flow_key *key,
1070 const struct sw_flow_mask *mask,
1071 bool log)
1072 {
1073 struct sw_flow_actions *acts;
1074 struct sw_flow_key masked_key;
1075 int error;
1076
1077 ovs_flow_mask_key(&masked_key, key, true, mask);
1078 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1079 if (error) {
1080 OVS_NLERR(log,
1081 "Actions may not be safe on all matching packets");
1082 return ERR_PTR(error);
1083 }
1084
1085 return acts;
1086 }
1087
1088 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1089 {
1090 struct net *net = sock_net(skb->sk);
1091 struct nlattr **a = info->attrs;
1092 struct ovs_header *ovs_header = info->userhdr;
1093 struct sw_flow_key key;
1094 struct sw_flow *flow;
1095 struct sw_flow_mask mask;
1096 struct sk_buff *reply = NULL;
1097 struct datapath *dp;
1098 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1099 struct sw_flow_match match;
1100 struct sw_flow_id sfid;
1101 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1102 int error = 0;
1103 bool log = !a[OVS_FLOW_ATTR_PROBE];
1104 bool ufid_present;
1105
1106 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1107 if (a[OVS_FLOW_ATTR_KEY]) {
1108 ovs_match_init(&match, &key, &mask);
1109 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1110 a[OVS_FLOW_ATTR_MASK], log);
1111 } else if (!ufid_present) {
1112 OVS_NLERR(log,
1113 "Flow set message rejected, Key attribute missing.");
1114 error = -EINVAL;
1115 }
1116 if (error)
1117 goto error;
1118
1119 /* Validate actions. */
1120 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1121 if (!a[OVS_FLOW_ATTR_KEY]) {
1122 OVS_NLERR(log,
1123 "Flow key attribute not present in set flow.");
1124 error = -EINVAL;
1125 goto error;
1126 }
1127
1128 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
1129 &mask, log);
1130 if (IS_ERR(acts)) {
1131 error = PTR_ERR(acts);
1132 goto error;
1133 }
1134
1135 /* Can allocate before locking if have acts. */
1136 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1137 ufid_flags);
1138 if (IS_ERR(reply)) {
1139 error = PTR_ERR(reply);
1140 goto err_kfree_acts;
1141 }
1142 }
1143
1144 ovs_lock();
1145 dp = get_dp(net, ovs_header->dp_ifindex);
1146 if (unlikely(!dp)) {
1147 error = -ENODEV;
1148 goto err_unlock_ovs;
1149 }
1150 /* Check that the flow exists. */
1151 if (ufid_present)
1152 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1153 else
1154 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1155 if (unlikely(!flow)) {
1156 error = -ENOENT;
1157 goto err_unlock_ovs;
1158 }
1159
1160 /* Update actions, if present. */
1161 if (likely(acts)) {
1162 old_acts = ovsl_dereference(flow->sf_acts);
1163 rcu_assign_pointer(flow->sf_acts, acts);
1164
1165 if (unlikely(reply)) {
1166 error = ovs_flow_cmd_fill_info(flow,
1167 ovs_header->dp_ifindex,
1168 reply, info->snd_portid,
1169 info->snd_seq, 0,
1170 OVS_FLOW_CMD_NEW,
1171 ufid_flags);
1172 BUG_ON(error < 0);
1173 }
1174 } else {
1175 /* Could not alloc without acts before locking. */
1176 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1177 info, OVS_FLOW_CMD_NEW, false,
1178 ufid_flags);
1179
1180 if (IS_ERR(reply)) {
1181 error = PTR_ERR(reply);
1182 goto err_unlock_ovs;
1183 }
1184 }
1185
1186 /* Clear stats. */
1187 if (a[OVS_FLOW_ATTR_CLEAR])
1188 ovs_flow_stats_clear(flow);
1189 ovs_unlock();
1190
1191 if (reply)
1192 ovs_notify(&dp_flow_genl_family, reply, info);
1193 if (old_acts)
1194 ovs_nla_free_flow_actions_rcu(old_acts);
1195
1196 return 0;
1197
1198 err_unlock_ovs:
1199 ovs_unlock();
1200 kfree_skb(reply);
1201 err_kfree_acts:
1202 ovs_nla_free_flow_actions(acts);
1203 error:
1204 return error;
1205 }
1206
1207 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1208 {
1209 struct nlattr **a = info->attrs;
1210 struct ovs_header *ovs_header = info->userhdr;
1211 struct net *net = sock_net(skb->sk);
1212 struct sw_flow_key key;
1213 struct sk_buff *reply;
1214 struct sw_flow *flow;
1215 struct datapath *dp;
1216 struct sw_flow_match match;
1217 struct sw_flow_id ufid;
1218 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1219 int err = 0;
1220 bool log = !a[OVS_FLOW_ATTR_PROBE];
1221 bool ufid_present;
1222
1223 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1224 if (a[OVS_FLOW_ATTR_KEY]) {
1225 ovs_match_init(&match, &key, NULL);
1226 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1227 log);
1228 } else if (!ufid_present) {
1229 OVS_NLERR(log,
1230 "Flow get message rejected, Key attribute missing.");
1231 err = -EINVAL;
1232 }
1233 if (err)
1234 return err;
1235
1236 ovs_lock();
1237 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1238 if (!dp) {
1239 err = -ENODEV;
1240 goto unlock;
1241 }
1242
1243 if (ufid_present)
1244 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1245 else
1246 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1247 if (!flow) {
1248 err = -ENOENT;
1249 goto unlock;
1250 }
1251
1252 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1253 OVS_FLOW_CMD_NEW, true, ufid_flags);
1254 if (IS_ERR(reply)) {
1255 err = PTR_ERR(reply);
1256 goto unlock;
1257 }
1258
1259 ovs_unlock();
1260 return genlmsg_reply(reply, info);
1261 unlock:
1262 ovs_unlock();
1263 return err;
1264 }
1265
1266 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1267 {
1268 struct nlattr **a = info->attrs;
1269 struct ovs_header *ovs_header = info->userhdr;
1270 struct net *net = sock_net(skb->sk);
1271 struct sw_flow_key key;
1272 struct sk_buff *reply;
1273 struct sw_flow *flow = NULL;
1274 struct datapath *dp;
1275 struct sw_flow_match match;
1276 struct sw_flow_id ufid;
1277 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1278 int err;
1279 bool log = !a[OVS_FLOW_ATTR_PROBE];
1280 bool ufid_present;
1281
1282 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1283 if (a[OVS_FLOW_ATTR_KEY]) {
1284 ovs_match_init(&match, &key, NULL);
1285 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1286 NULL, log);
1287 if (unlikely(err))
1288 return err;
1289 }
1290
1291 ovs_lock();
1292 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1293 if (unlikely(!dp)) {
1294 err = -ENODEV;
1295 goto unlock;
1296 }
1297
1298 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1299 err = ovs_flow_tbl_flush(&dp->table);
1300 goto unlock;
1301 }
1302
1303 if (ufid_present)
1304 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1305 else
1306 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1307 if (unlikely(!flow)) {
1308 err = -ENOENT;
1309 goto unlock;
1310 }
1311
1312 ovs_flow_tbl_remove(&dp->table, flow);
1313 ovs_unlock();
1314
1315 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1316 &flow->id, info, false, ufid_flags);
1317 if (likely(reply)) {
1318 if (likely(!IS_ERR(reply))) {
1319 rcu_read_lock(); /*To keep RCU checker happy. */
1320 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1321 reply, info->snd_portid,
1322 info->snd_seq, 0,
1323 OVS_FLOW_CMD_DEL,
1324 ufid_flags);
1325 rcu_read_unlock();
1326 BUG_ON(err < 0);
1327
1328 ovs_notify(&dp_flow_genl_family, reply, info);
1329 } else {
1330 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1331 }
1332 }
1333
1334 ovs_flow_free(flow, true);
1335 return 0;
1336 unlock:
1337 ovs_unlock();
1338 return err;
1339 }
1340
1341 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1342 {
1343 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1344 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1345 struct table_instance *ti;
1346 struct datapath *dp;
1347 u32 ufid_flags;
1348 int err;
1349
1350 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1351 OVS_FLOW_ATTR_MAX, flow_policy);
1352 if (err)
1353 return err;
1354 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1355
1356 rcu_read_lock();
1357 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1358 if (!dp) {
1359 rcu_read_unlock();
1360 return -ENODEV;
1361 }
1362
1363 ti = rcu_dereference(dp->table.ti);
1364 for (;;) {
1365 struct sw_flow *flow;
1366 u32 bucket, obj;
1367
1368 bucket = cb->args[0];
1369 obj = cb->args[1];
1370 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1371 if (!flow)
1372 break;
1373
1374 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1375 NETLINK_CB(cb->skb).portid,
1376 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1377 OVS_FLOW_CMD_NEW, ufid_flags) < 0)
1378 break;
1379
1380 cb->args[0] = bucket;
1381 cb->args[1] = obj;
1382 }
1383 rcu_read_unlock();
1384 return skb->len;
1385 }
1386
1387 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1388 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1389 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1390 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1391 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1392 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1393 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1394 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1395 };
1396
1397 static const struct genl_ops dp_flow_genl_ops[] = {
1398 { .cmd = OVS_FLOW_CMD_NEW,
1399 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1400 .policy = flow_policy,
1401 .doit = ovs_flow_cmd_new
1402 },
1403 { .cmd = OVS_FLOW_CMD_DEL,
1404 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1405 .policy = flow_policy,
1406 .doit = ovs_flow_cmd_del
1407 },
1408 { .cmd = OVS_FLOW_CMD_GET,
1409 .flags = 0, /* OK for unprivileged users. */
1410 .policy = flow_policy,
1411 .doit = ovs_flow_cmd_get,
1412 .dumpit = ovs_flow_cmd_dump
1413 },
1414 { .cmd = OVS_FLOW_CMD_SET,
1415 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1416 .policy = flow_policy,
1417 .doit = ovs_flow_cmd_set,
1418 },
1419 };
1420
1421 static struct genl_family dp_flow_genl_family = {
1422 .id = GENL_ID_GENERATE,
1423 .hdrsize = sizeof(struct ovs_header),
1424 .name = OVS_FLOW_FAMILY,
1425 .version = OVS_FLOW_VERSION,
1426 .maxattr = OVS_FLOW_ATTR_MAX,
1427 .netnsok = true,
1428 .parallel_ops = true,
1429 .ops = dp_flow_genl_ops,
1430 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1431 .mcgrps = &ovs_dp_flow_multicast_group,
1432 .n_mcgrps = 1,
1433 };
1434
1435 static size_t ovs_dp_cmd_msg_size(void)
1436 {
1437 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1438
1439 msgsize += nla_total_size(IFNAMSIZ);
1440 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1441 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1442 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1443
1444 return msgsize;
1445 }
1446
1447 /* Called with ovs_mutex. */
1448 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1449 u32 portid, u32 seq, u32 flags, u8 cmd)
1450 {
1451 struct ovs_header *ovs_header;
1452 struct ovs_dp_stats dp_stats;
1453 struct ovs_dp_megaflow_stats dp_megaflow_stats;
1454 int err;
1455
1456 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1457 flags, cmd);
1458 if (!ovs_header)
1459 goto error;
1460
1461 ovs_header->dp_ifindex = get_dpifindex(dp);
1462
1463 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1464 if (err)
1465 goto nla_put_failure;
1466
1467 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1468 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1469 &dp_stats, OVS_DP_ATTR_PAD))
1470 goto nla_put_failure;
1471
1472 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1473 sizeof(struct ovs_dp_megaflow_stats),
1474 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1475 goto nla_put_failure;
1476
1477 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1478 goto nla_put_failure;
1479
1480 genlmsg_end(skb, ovs_header);
1481 return 0;
1482
1483 nla_put_failure:
1484 genlmsg_cancel(skb, ovs_header);
1485 error:
1486 return -EMSGSIZE;
1487 }
1488
1489 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1490 {
1491 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1492 }
1493
1494 /* Called with rcu_read_lock or ovs_mutex. */
1495 static struct datapath *lookup_datapath(struct net *net,
1496 const struct ovs_header *ovs_header,
1497 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1498 {
1499 struct datapath *dp;
1500
1501 if (!a[OVS_DP_ATTR_NAME])
1502 dp = get_dp(net, ovs_header->dp_ifindex);
1503 else {
1504 struct vport *vport;
1505
1506 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1507 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1508 }
1509 return dp ? dp : ERR_PTR(-ENODEV);
1510 }
1511
1512 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1513 {
1514 struct datapath *dp;
1515
1516 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1517 if (IS_ERR(dp))
1518 return;
1519
1520 WARN(dp->user_features, "Dropping previously announced user features\n");
1521 dp->user_features = 0;
1522 }
1523
1524 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1525 {
1526 if (a[OVS_DP_ATTR_USER_FEATURES])
1527 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1528 }
1529
1530 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1531 {
1532 struct nlattr **a = info->attrs;
1533 struct vport_parms parms;
1534 struct sk_buff *reply;
1535 struct datapath *dp;
1536 struct vport *vport;
1537 struct ovs_net *ovs_net;
1538 int err, i;
1539
1540 err = -EINVAL;
1541 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1542 goto err;
1543
1544 reply = ovs_dp_cmd_alloc_info();
1545 if (!reply)
1546 return -ENOMEM;
1547
1548 err = -ENOMEM;
1549 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1550 if (dp == NULL)
1551 goto err_free_reply;
1552
1553 ovs_dp_set_net(dp, sock_net(skb->sk));
1554
1555 /* Allocate table. */
1556 err = ovs_flow_tbl_init(&dp->table);
1557 if (err)
1558 goto err_free_dp;
1559
1560 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1561 if (!dp->stats_percpu) {
1562 err = -ENOMEM;
1563 goto err_destroy_table;
1564 }
1565
1566 dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1567 GFP_KERNEL);
1568 if (!dp->ports) {
1569 err = -ENOMEM;
1570 goto err_destroy_percpu;
1571 }
1572
1573 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1574 INIT_HLIST_HEAD(&dp->ports[i]);
1575
1576 /* Set up our datapath device. */
1577 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1578 parms.type = OVS_VPORT_TYPE_INTERNAL;
1579 parms.options = NULL;
1580 parms.dp = dp;
1581 parms.port_no = OVSP_LOCAL;
1582 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1583
1584 ovs_dp_change(dp, a);
1585
1586 /* So far only local changes have been made, now need the lock. */
1587 ovs_lock();
1588
1589 vport = new_vport(&parms);
1590 if (IS_ERR(vport)) {
1591 err = PTR_ERR(vport);
1592 if (err == -EBUSY)
1593 err = -EEXIST;
1594
1595 if (err == -EEXIST) {
1596 /* An outdated user space instance that does not understand
1597 * the concept of user_features has attempted to create a new
1598 * datapath and is likely to reuse it. Drop all user features.
1599 */
1600 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1601 ovs_dp_reset_user_features(skb, info);
1602 }
1603
1604 goto err_destroy_ports_array;
1605 }
1606
1607 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1608 info->snd_seq, 0, OVS_DP_CMD_NEW);
1609 BUG_ON(err < 0);
1610
1611 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1612 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1613
1614 ovs_unlock();
1615
1616 ovs_notify(&dp_datapath_genl_family, reply, info);
1617 return 0;
1618
1619 err_destroy_ports_array:
1620 ovs_unlock();
1621 kfree(dp->ports);
1622 err_destroy_percpu:
1623 free_percpu(dp->stats_percpu);
1624 err_destroy_table:
1625 ovs_flow_tbl_destroy(&dp->table);
1626 err_free_dp:
1627 kfree(dp);
1628 err_free_reply:
1629 kfree_skb(reply);
1630 err:
1631 return err;
1632 }
1633
1634 /* Called with ovs_mutex. */
1635 static void __dp_destroy(struct datapath *dp)
1636 {
1637 int i;
1638
1639 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1640 struct vport *vport;
1641 struct hlist_node *n;
1642
1643 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1644 if (vport->port_no != OVSP_LOCAL)
1645 ovs_dp_detach_port(vport);
1646 }
1647
1648 list_del_rcu(&dp->list_node);
1649
1650 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1651 * all ports in datapath are destroyed first before freeing datapath.
1652 */
1653 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1654
1655 /* RCU destroy the flow table */
1656 call_rcu(&dp->rcu, destroy_dp_rcu);
1657 }
1658
1659 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1660 {
1661 struct sk_buff *reply;
1662 struct datapath *dp;
1663 int err;
1664
1665 reply = ovs_dp_cmd_alloc_info();
1666 if (!reply)
1667 return -ENOMEM;
1668
1669 ovs_lock();
1670 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1671 err = PTR_ERR(dp);
1672 if (IS_ERR(dp))
1673 goto err_unlock_free;
1674
1675 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1676 info->snd_seq, 0, OVS_DP_CMD_DEL);
1677 BUG_ON(err < 0);
1678
1679 __dp_destroy(dp);
1680 ovs_unlock();
1681
1682 ovs_notify(&dp_datapath_genl_family, reply, info);
1683
1684 return 0;
1685
1686 err_unlock_free:
1687 ovs_unlock();
1688 kfree_skb(reply);
1689 return err;
1690 }
1691
1692 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1693 {
1694 struct sk_buff *reply;
1695 struct datapath *dp;
1696 int err;
1697
1698 reply = ovs_dp_cmd_alloc_info();
1699 if (!reply)
1700 return -ENOMEM;
1701
1702 ovs_lock();
1703 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1704 err = PTR_ERR(dp);
1705 if (IS_ERR(dp))
1706 goto err_unlock_free;
1707
1708 ovs_dp_change(dp, info->attrs);
1709
1710 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1711 info->snd_seq, 0, OVS_DP_CMD_NEW);
1712 BUG_ON(err < 0);
1713
1714 ovs_unlock();
1715 ovs_notify(&dp_datapath_genl_family, reply, info);
1716
1717 return 0;
1718
1719 err_unlock_free:
1720 ovs_unlock();
1721 kfree_skb(reply);
1722 return err;
1723 }
1724
1725 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1726 {
1727 struct sk_buff *reply;
1728 struct datapath *dp;
1729 int err;
1730
1731 reply = ovs_dp_cmd_alloc_info();
1732 if (!reply)
1733 return -ENOMEM;
1734
1735 ovs_lock();
1736 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1737 if (IS_ERR(dp)) {
1738 err = PTR_ERR(dp);
1739 goto err_unlock_free;
1740 }
1741 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1742 info->snd_seq, 0, OVS_DP_CMD_NEW);
1743 BUG_ON(err < 0);
1744 ovs_unlock();
1745
1746 return genlmsg_reply(reply, info);
1747
1748 err_unlock_free:
1749 ovs_unlock();
1750 kfree_skb(reply);
1751 return err;
1752 }
1753
1754 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1755 {
1756 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1757 struct datapath *dp;
1758 int skip = cb->args[0];
1759 int i = 0;
1760
1761 ovs_lock();
1762 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1763 if (i >= skip &&
1764 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1765 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1766 OVS_DP_CMD_NEW) < 0)
1767 break;
1768 i++;
1769 }
1770 ovs_unlock();
1771
1772 cb->args[0] = i;
1773
1774 return skb->len;
1775 }
1776
1777 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1778 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1779 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1780 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1781 };
1782
1783 static const struct genl_ops dp_datapath_genl_ops[] = {
1784 { .cmd = OVS_DP_CMD_NEW,
1785 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1786 .policy = datapath_policy,
1787 .doit = ovs_dp_cmd_new
1788 },
1789 { .cmd = OVS_DP_CMD_DEL,
1790 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1791 .policy = datapath_policy,
1792 .doit = ovs_dp_cmd_del
1793 },
1794 { .cmd = OVS_DP_CMD_GET,
1795 .flags = 0, /* OK for unprivileged users. */
1796 .policy = datapath_policy,
1797 .doit = ovs_dp_cmd_get,
1798 .dumpit = ovs_dp_cmd_dump
1799 },
1800 { .cmd = OVS_DP_CMD_SET,
1801 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1802 .policy = datapath_policy,
1803 .doit = ovs_dp_cmd_set,
1804 },
1805 };
1806
1807 static struct genl_family dp_datapath_genl_family = {
1808 .id = GENL_ID_GENERATE,
1809 .hdrsize = sizeof(struct ovs_header),
1810 .name = OVS_DATAPATH_FAMILY,
1811 .version = OVS_DATAPATH_VERSION,
1812 .maxattr = OVS_DP_ATTR_MAX,
1813 .netnsok = true,
1814 .parallel_ops = true,
1815 .ops = dp_datapath_genl_ops,
1816 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1817 .mcgrps = &ovs_dp_datapath_multicast_group,
1818 .n_mcgrps = 1,
1819 };
1820
1821 /* Called with ovs_mutex or RCU read lock. */
1822 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1823 u32 portid, u32 seq, u32 flags, u8 cmd)
1824 {
1825 struct ovs_header *ovs_header;
1826 struct ovs_vport_stats vport_stats;
1827 int err;
1828
1829 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1830 flags, cmd);
1831 if (!ovs_header)
1832 return -EMSGSIZE;
1833
1834 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1835
1836 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1837 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1838 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1839 ovs_vport_name(vport)))
1840 goto nla_put_failure;
1841
1842 ovs_vport_get_stats(vport, &vport_stats);
1843 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1844 sizeof(struct ovs_vport_stats), &vport_stats,
1845 OVS_VPORT_ATTR_PAD))
1846 goto nla_put_failure;
1847
1848 if (ovs_vport_get_upcall_portids(vport, skb))
1849 goto nla_put_failure;
1850
1851 err = ovs_vport_get_options(vport, skb);
1852 if (err == -EMSGSIZE)
1853 goto error;
1854
1855 genlmsg_end(skb, ovs_header);
1856 return 0;
1857
1858 nla_put_failure:
1859 err = -EMSGSIZE;
1860 error:
1861 genlmsg_cancel(skb, ovs_header);
1862 return err;
1863 }
1864
1865 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1866 {
1867 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1868 }
1869
1870 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1871 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1872 u32 seq, u8 cmd)
1873 {
1874 struct sk_buff *skb;
1875 int retval;
1876
1877 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1878 if (!skb)
1879 return ERR_PTR(-ENOMEM);
1880
1881 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1882 BUG_ON(retval < 0);
1883
1884 return skb;
1885 }
1886
1887 /* Called with ovs_mutex or RCU read lock. */
1888 static struct vport *lookup_vport(struct net *net,
1889 const struct ovs_header *ovs_header,
1890 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1891 {
1892 struct datapath *dp;
1893 struct vport *vport;
1894
1895 if (a[OVS_VPORT_ATTR_NAME]) {
1896 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1897 if (!vport)
1898 return ERR_PTR(-ENODEV);
1899 if (ovs_header->dp_ifindex &&
1900 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1901 return ERR_PTR(-ENODEV);
1902 return vport;
1903 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1904 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1905
1906 if (port_no >= DP_MAX_PORTS)
1907 return ERR_PTR(-EFBIG);
1908
1909 dp = get_dp(net, ovs_header->dp_ifindex);
1910 if (!dp)
1911 return ERR_PTR(-ENODEV);
1912
1913 vport = ovs_vport_ovsl_rcu(dp, port_no);
1914 if (!vport)
1915 return ERR_PTR(-ENODEV);
1916 return vport;
1917 } else
1918 return ERR_PTR(-EINVAL);
1919 }
1920
1921 /* Called with ovs_mutex */
1922 static void update_headroom(struct datapath *dp)
1923 {
1924 unsigned dev_headroom, max_headroom = 0;
1925 struct net_device *dev;
1926 struct vport *vport;
1927 int i;
1928
1929 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1930 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1931 dev = vport->dev;
1932 dev_headroom = netdev_get_fwd_headroom(dev);
1933 if (dev_headroom > max_headroom)
1934 max_headroom = dev_headroom;
1935 }
1936 }
1937
1938 dp->max_headroom = max_headroom;
1939 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1940 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1941 netdev_set_rx_headroom(vport->dev, max_headroom);
1942 }
1943
1944 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1945 {
1946 struct nlattr **a = info->attrs;
1947 struct ovs_header *ovs_header = info->userhdr;
1948 struct vport_parms parms;
1949 struct sk_buff *reply;
1950 struct vport *vport;
1951 struct datapath *dp;
1952 u32 port_no;
1953 int err;
1954
1955 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1956 !a[OVS_VPORT_ATTR_UPCALL_PID])
1957 return -EINVAL;
1958
1959 port_no = a[OVS_VPORT_ATTR_PORT_NO]
1960 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1961 if (port_no >= DP_MAX_PORTS)
1962 return -EFBIG;
1963
1964 reply = ovs_vport_cmd_alloc_info();
1965 if (!reply)
1966 return -ENOMEM;
1967
1968 ovs_lock();
1969 restart:
1970 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1971 err = -ENODEV;
1972 if (!dp)
1973 goto exit_unlock_free;
1974
1975 if (port_no) {
1976 vport = ovs_vport_ovsl(dp, port_no);
1977 err = -EBUSY;
1978 if (vport)
1979 goto exit_unlock_free;
1980 } else {
1981 for (port_no = 1; ; port_no++) {
1982 if (port_no >= DP_MAX_PORTS) {
1983 err = -EFBIG;
1984 goto exit_unlock_free;
1985 }
1986 vport = ovs_vport_ovsl(dp, port_no);
1987 if (!vport)
1988 break;
1989 }
1990 }
1991
1992 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1993 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1994 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1995 parms.dp = dp;
1996 parms.port_no = port_no;
1997 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1998
1999 vport = new_vport(&parms);
2000 err = PTR_ERR(vport);
2001 if (IS_ERR(vport)) {
2002 if (err == -EAGAIN)
2003 goto restart;
2004 goto exit_unlock_free;
2005 }
2006
2007 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2008 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2009
2010 if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2011 update_headroom(dp);
2012 else
2013 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2014
2015 BUG_ON(err < 0);
2016 ovs_unlock();
2017
2018 ovs_notify(&dp_vport_genl_family, reply, info);
2019 return 0;
2020
2021 exit_unlock_free:
2022 ovs_unlock();
2023 kfree_skb(reply);
2024 return err;
2025 }
2026
2027 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2028 {
2029 struct nlattr **a = info->attrs;
2030 struct sk_buff *reply;
2031 struct vport *vport;
2032 int err;
2033
2034 reply = ovs_vport_cmd_alloc_info();
2035 if (!reply)
2036 return -ENOMEM;
2037
2038 ovs_lock();
2039 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2040 err = PTR_ERR(vport);
2041 if (IS_ERR(vport))
2042 goto exit_unlock_free;
2043
2044 if (a[OVS_VPORT_ATTR_TYPE] &&
2045 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2046 err = -EINVAL;
2047 goto exit_unlock_free;
2048 }
2049
2050 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2051 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2052 if (err)
2053 goto exit_unlock_free;
2054 }
2055
2056
2057 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2058 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2059
2060 err = ovs_vport_set_upcall_portids(vport, ids);
2061 if (err)
2062 goto exit_unlock_free;
2063 }
2064
2065 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2066 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2067 BUG_ON(err < 0);
2068
2069 ovs_unlock();
2070 ovs_notify(&dp_vport_genl_family, reply, info);
2071 return 0;
2072
2073 exit_unlock_free:
2074 ovs_unlock();
2075 kfree_skb(reply);
2076 return err;
2077 }
2078
2079 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2080 {
2081 bool must_update_headroom = false;
2082 struct nlattr **a = info->attrs;
2083 struct sk_buff *reply;
2084 struct datapath *dp;
2085 struct vport *vport;
2086 int err;
2087
2088 reply = ovs_vport_cmd_alloc_info();
2089 if (!reply)
2090 return -ENOMEM;
2091
2092 ovs_lock();
2093 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2094 err = PTR_ERR(vport);
2095 if (IS_ERR(vport))
2096 goto exit_unlock_free;
2097
2098 if (vport->port_no == OVSP_LOCAL) {
2099 err = -EINVAL;
2100 goto exit_unlock_free;
2101 }
2102
2103 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2104 info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2105 BUG_ON(err < 0);
2106
2107 /* the vport deletion may trigger dp headroom update */
2108 dp = vport->dp;
2109 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2110 must_update_headroom = true;
2111 netdev_reset_rx_headroom(vport->dev);
2112 ovs_dp_detach_port(vport);
2113
2114 if (must_update_headroom)
2115 update_headroom(dp);
2116 ovs_unlock();
2117
2118 ovs_notify(&dp_vport_genl_family, reply, info);
2119 return 0;
2120
2121 exit_unlock_free:
2122 ovs_unlock();
2123 kfree_skb(reply);
2124 return err;
2125 }
2126
2127 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2128 {
2129 struct nlattr **a = info->attrs;
2130 struct ovs_header *ovs_header = info->userhdr;
2131 struct sk_buff *reply;
2132 struct vport *vport;
2133 int err;
2134
2135 reply = ovs_vport_cmd_alloc_info();
2136 if (!reply)
2137 return -ENOMEM;
2138
2139 rcu_read_lock();
2140 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2141 err = PTR_ERR(vport);
2142 if (IS_ERR(vport))
2143 goto exit_unlock_free;
2144 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2145 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2146 BUG_ON(err < 0);
2147 rcu_read_unlock();
2148
2149 return genlmsg_reply(reply, info);
2150
2151 exit_unlock_free:
2152 rcu_read_unlock();
2153 kfree_skb(reply);
2154 return err;
2155 }
2156
2157 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2158 {
2159 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2160 struct datapath *dp;
2161 int bucket = cb->args[0], skip = cb->args[1];
2162 int i, j = 0;
2163
2164 rcu_read_lock();
2165 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2166 if (!dp) {
2167 rcu_read_unlock();
2168 return -ENODEV;
2169 }
2170 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2171 struct vport *vport;
2172
2173 j = 0;
2174 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2175 if (j >= skip &&
2176 ovs_vport_cmd_fill_info(vport, skb,
2177 NETLINK_CB(cb->skb).portid,
2178 cb->nlh->nlmsg_seq,
2179 NLM_F_MULTI,
2180 OVS_VPORT_CMD_NEW) < 0)
2181 goto out;
2182
2183 j++;
2184 }
2185 skip = 0;
2186 }
2187 out:
2188 rcu_read_unlock();
2189
2190 cb->args[0] = i;
2191 cb->args[1] = j;
2192
2193 return skb->len;
2194 }
2195
2196 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2197 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2198 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2199 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2200 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2201 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2202 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2203 };
2204
2205 static const struct genl_ops dp_vport_genl_ops[] = {
2206 { .cmd = OVS_VPORT_CMD_NEW,
2207 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2208 .policy = vport_policy,
2209 .doit = ovs_vport_cmd_new
2210 },
2211 { .cmd = OVS_VPORT_CMD_DEL,
2212 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2213 .policy = vport_policy,
2214 .doit = ovs_vport_cmd_del
2215 },
2216 { .cmd = OVS_VPORT_CMD_GET,
2217 .flags = 0, /* OK for unprivileged users. */
2218 .policy = vport_policy,
2219 .doit = ovs_vport_cmd_get,
2220 .dumpit = ovs_vport_cmd_dump
2221 },
2222 { .cmd = OVS_VPORT_CMD_SET,
2223 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2224 .policy = vport_policy,
2225 .doit = ovs_vport_cmd_set,
2226 },
2227 };
2228
2229 struct genl_family dp_vport_genl_family = {
2230 .id = GENL_ID_GENERATE,
2231 .hdrsize = sizeof(struct ovs_header),
2232 .name = OVS_VPORT_FAMILY,
2233 .version = OVS_VPORT_VERSION,
2234 .maxattr = OVS_VPORT_ATTR_MAX,
2235 .netnsok = true,
2236 .parallel_ops = true,
2237 .ops = dp_vport_genl_ops,
2238 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2239 .mcgrps = &ovs_dp_vport_multicast_group,
2240 .n_mcgrps = 1,
2241 };
2242
2243 static struct genl_family * const dp_genl_families[] = {
2244 &dp_datapath_genl_family,
2245 &dp_vport_genl_family,
2246 &dp_flow_genl_family,
2247 &dp_packet_genl_family,
2248 };
2249
2250 static void dp_unregister_genl(int n_families)
2251 {
2252 int i;
2253
2254 for (i = 0; i < n_families; i++)
2255 genl_unregister_family(dp_genl_families[i]);
2256 }
2257
2258 static int dp_register_genl(void)
2259 {
2260 int err;
2261 int i;
2262
2263 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2264
2265 err = genl_register_family(dp_genl_families[i]);
2266 if (err)
2267 goto error;
2268 }
2269
2270 return 0;
2271
2272 error:
2273 dp_unregister_genl(i);
2274 return err;
2275 }
2276
2277 static int __net_init ovs_init_net(struct net *net)
2278 {
2279 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2280
2281 INIT_LIST_HEAD(&ovs_net->dps);
2282 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2283 ovs_ct_init(net);
2284 return 0;
2285 }
2286
2287 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2288 struct list_head *head)
2289 {
2290 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2291 struct datapath *dp;
2292
2293 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2294 int i;
2295
2296 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2297 struct vport *vport;
2298
2299 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2300 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2301 continue;
2302
2303 if (dev_net(vport->dev) == dnet)
2304 list_add(&vport->detach_list, head);
2305 }
2306 }
2307 }
2308 }
2309
2310 static void __net_exit ovs_exit_net(struct net *dnet)
2311 {
2312 struct datapath *dp, *dp_next;
2313 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2314 struct vport *vport, *vport_next;
2315 struct net *net;
2316 LIST_HEAD(head);
2317
2318 ovs_ct_exit(dnet);
2319 ovs_lock();
2320 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2321 __dp_destroy(dp);
2322
2323 rtnl_lock();
2324 for_each_net(net)
2325 list_vports_from_net(net, dnet, &head);
2326 rtnl_unlock();
2327
2328 /* Detach all vports from given namespace. */
2329 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2330 list_del(&vport->detach_list);
2331 ovs_dp_detach_port(vport);
2332 }
2333
2334 ovs_unlock();
2335
2336 cancel_work_sync(&ovs_net->dp_notify_work);
2337 }
2338
2339 static struct pernet_operations ovs_net_ops = {
2340 .init = ovs_init_net,
2341 .exit = ovs_exit_net,
2342 .id = &ovs_net_id,
2343 .size = sizeof(struct ovs_net),
2344 };
2345
2346 static int __init dp_init(void)
2347 {
2348 int err;
2349
2350 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2351
2352 pr_info("Open vSwitch switching datapath\n");
2353
2354 err = action_fifos_init();
2355 if (err)
2356 goto error;
2357
2358 err = ovs_internal_dev_rtnl_link_register();
2359 if (err)
2360 goto error_action_fifos_exit;
2361
2362 err = ovs_flow_init();
2363 if (err)
2364 goto error_unreg_rtnl_link;
2365
2366 err = ovs_vport_init();
2367 if (err)
2368 goto error_flow_exit;
2369
2370 err = register_pernet_device(&ovs_net_ops);
2371 if (err)
2372 goto error_vport_exit;
2373
2374 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2375 if (err)
2376 goto error_netns_exit;
2377
2378 err = ovs_netdev_init();
2379 if (err)
2380 goto error_unreg_notifier;
2381
2382 err = dp_register_genl();
2383 if (err < 0)
2384 goto error_unreg_netdev;
2385
2386 return 0;
2387
2388 error_unreg_netdev:
2389 ovs_netdev_exit();
2390 error_unreg_notifier:
2391 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2392 error_netns_exit:
2393 unregister_pernet_device(&ovs_net_ops);
2394 error_vport_exit:
2395 ovs_vport_exit();
2396 error_flow_exit:
2397 ovs_flow_exit();
2398 error_unreg_rtnl_link:
2399 ovs_internal_dev_rtnl_link_unregister();
2400 error_action_fifos_exit:
2401 action_fifos_exit();
2402 error:
2403 return err;
2404 }
2405
2406 static void dp_cleanup(void)
2407 {
2408 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2409 ovs_netdev_exit();
2410 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2411 unregister_pernet_device(&ovs_net_ops);
2412 rcu_barrier();
2413 ovs_vport_exit();
2414 ovs_flow_exit();
2415 ovs_internal_dev_rtnl_link_unregister();
2416 action_fifos_exit();
2417 }
2418
2419 module_init(dp_init);
2420 module_exit(dp_cleanup);
2421
2422 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2423 MODULE_LICENSE("GPL");