]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/datapath.c
compat: Clean up gre_calc_hlen
[mirror_ovs.git] / datapath / datapath.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54 #include <net/nsh.h>
55
56 #include "datapath.h"
57 #include "conntrack.h"
58 #include "flow.h"
59 #include "flow_table.h"
60 #include "flow_netlink.h"
61 #include "meter.h"
62 #include "gso.h"
63 #include "vport-internal_dev.h"
64 #include "vport-netdev.h"
65
66 unsigned int ovs_net_id __read_mostly;
67
68 static struct genl_family dp_packet_genl_family;
69 static struct genl_family dp_flow_genl_family;
70 static struct genl_family dp_datapath_genl_family;
71
72 static const struct nla_policy flow_policy[];
73
74 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
75 .name = OVS_FLOW_MCGROUP
76 };
77
78 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
79 .name = OVS_DATAPATH_MCGROUP
80 };
81
82 struct genl_multicast_group ovs_dp_vport_multicast_group = {
83 .name = OVS_VPORT_MCGROUP
84 };
85
86 /* Check if need to build a reply message.
87 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
88 */
89 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
90 unsigned int group)
91 {
92 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
93 genl_has_listeners(family, genl_info_net(info), group);
94 }
95
96 static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
97 struct sk_buff *skb, struct genl_info *info)
98 {
99 genl_notify(family, skb, info, GROUP_ID(grp), GFP_KERNEL);
100 }
101
102 /**
103 * DOC: Locking:
104 *
105 * All writes e.g. Writes to device state (add/remove datapath, port, set
106 * operations on vports, etc.), Writes to other state (flow table
107 * modifications, set miscellaneous datapath parameters, etc.) are protected
108 * by ovs_lock.
109 *
110 * Reads are protected by RCU.
111 *
112 * There are a few special cases (mostly stats) that have their own
113 * synchronization but they nest under all of above and don't interact with
114 * each other.
115 *
116 * The RTNL lock nests inside ovs_mutex.
117 */
118
119 static DEFINE_MUTEX(ovs_mutex);
120
121 void ovs_lock(void)
122 {
123 mutex_lock(&ovs_mutex);
124 }
125
126 void ovs_unlock(void)
127 {
128 mutex_unlock(&ovs_mutex);
129 }
130
131 #ifdef CONFIG_LOCKDEP
132 int lockdep_ovsl_is_held(void)
133 {
134 if (debug_locks)
135 return lockdep_is_held(&ovs_mutex);
136 else
137 return 1;
138 }
139 #endif
140
141 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
142 const struct sw_flow_key *,
143 const struct dp_upcall_info *,
144 uint32_t cutlen);
145 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
146 const struct sw_flow_key *,
147 const struct dp_upcall_info *,
148 uint32_t cutlen);
149
150 /* Must be called with rcu_read_lock or ovs_mutex. */
151 const char *ovs_dp_name(const struct datapath *dp)
152 {
153 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
154 return ovs_vport_name(vport);
155 }
156
157 static int get_dpifindex(const struct datapath *dp)
158 {
159 struct vport *local;
160 int ifindex;
161
162 rcu_read_lock();
163
164 local = ovs_vport_rcu(dp, OVSP_LOCAL);
165 if (local)
166 ifindex = local->dev->ifindex;
167 else
168 ifindex = 0;
169
170 rcu_read_unlock();
171
172 return ifindex;
173 }
174
175 static void destroy_dp_rcu(struct rcu_head *rcu)
176 {
177 struct datapath *dp = container_of(rcu, struct datapath, rcu);
178
179 ovs_flow_tbl_destroy(&dp->table);
180 free_percpu(dp->stats_percpu);
181 kfree(dp->ports);
182 ovs_meters_exit(dp);
183 kfree(dp);
184 }
185
186 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
187 u16 port_no)
188 {
189 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
190 }
191
192 /* Called with ovs_mutex or RCU read lock. */
193 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
194 {
195 struct vport *vport;
196 struct hlist_head *head;
197
198 head = vport_hash_bucket(dp, port_no);
199 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
200 if (vport->port_no == port_no)
201 return vport;
202 }
203 return NULL;
204 }
205
206 /* Called with ovs_mutex. */
207 static struct vport *new_vport(const struct vport_parms *parms)
208 {
209 struct vport *vport;
210
211 vport = ovs_vport_add(parms);
212 if (!IS_ERR(vport)) {
213 struct datapath *dp = parms->dp;
214 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
215
216 hlist_add_head_rcu(&vport->dp_hash_node, head);
217 }
218 return vport;
219 }
220
221 void ovs_dp_detach_port(struct vport *p)
222 {
223 ASSERT_OVSL();
224
225 /* First drop references to device. */
226 hlist_del_rcu(&p->dp_hash_node);
227
228 /* Then destroy it. */
229 ovs_vport_del(p);
230 }
231
232 /* Must be called with rcu_read_lock. */
233 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
234 {
235 const struct vport *p = OVS_CB(skb)->input_vport;
236 struct datapath *dp = p->dp;
237 struct sw_flow *flow;
238 struct sw_flow_actions *sf_acts;
239 struct dp_stats_percpu *stats;
240 u64 *stats_counter;
241 u32 n_mask_hit;
242
243 stats = this_cpu_ptr(dp->stats_percpu);
244
245 /* Look up flow. */
246 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
247 &n_mask_hit);
248 if (unlikely(!flow)) {
249 struct dp_upcall_info upcall;
250 int error;
251
252 memset(&upcall, 0, sizeof(upcall));
253 upcall.cmd = OVS_PACKET_CMD_MISS;
254 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
255 upcall.mru = OVS_CB(skb)->mru;
256 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
257 if (unlikely(error))
258 kfree_skb(skb);
259 else
260 consume_skb(skb);
261 stats_counter = &stats->n_missed;
262 goto out;
263 }
264
265 ovs_flow_stats_update(flow, key->tp.flags, skb);
266 sf_acts = rcu_dereference(flow->sf_acts);
267 ovs_execute_actions(dp, skb, sf_acts, key);
268
269 stats_counter = &stats->n_hit;
270
271 out:
272 /* Update datapath statistics. */
273 u64_stats_update_begin(&stats->syncp);
274 (*stats_counter)++;
275 stats->n_mask_hit += n_mask_hit;
276 u64_stats_update_end(&stats->syncp);
277 }
278
279 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
280 const struct sw_flow_key *key,
281 const struct dp_upcall_info *upcall_info,
282 uint32_t cutlen)
283 {
284 struct dp_stats_percpu *stats;
285 int err;
286
287 if (upcall_info->portid == 0) {
288 err = -ENOTCONN;
289 goto err;
290 }
291
292 if (!skb_is_gso(skb))
293 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
294 else
295 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
296 if (err)
297 goto err;
298
299 return 0;
300
301 err:
302 stats = this_cpu_ptr(dp->stats_percpu);
303
304 u64_stats_update_begin(&stats->syncp);
305 stats->n_lost++;
306 u64_stats_update_end(&stats->syncp);
307
308 return err;
309 }
310
311 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
312 const struct sw_flow_key *key,
313 const struct dp_upcall_info *upcall_info,
314 uint32_t cutlen)
315 {
316 #ifdef HAVE_SKB_GSO_UDP
317 unsigned int gso_type = skb_shinfo(skb)->gso_type;
318 struct sw_flow_key later_key;
319 #endif
320 struct sk_buff *segs, *nskb;
321 struct ovs_skb_cb ovs_cb;
322 int err;
323
324 ovs_cb = *OVS_CB(skb);
325 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
326 *OVS_CB(skb) = ovs_cb;
327 if (IS_ERR(segs))
328 return PTR_ERR(segs);
329 if (segs == NULL)
330 return -EINVAL;
331 #ifdef HAVE_SKB_GSO_UDP
332 if (gso_type & SKB_GSO_UDP) {
333 /* The initial flow key extracted by ovs_flow_key_extract()
334 * in this case is for a first fragment, so we need to
335 * properly mark later fragments.
336 */
337 later_key = *key;
338 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
339 }
340 #endif
341 /* Queue all of the segments. */
342 skb = segs;
343 do {
344 *OVS_CB(skb) = ovs_cb;
345 #ifdef HAVE_SKB_GSO_UDP
346 if (gso_type & SKB_GSO_UDP && skb != segs)
347 key = &later_key;
348 #endif
349 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
350 if (err)
351 break;
352
353 } while ((skb = skb->next));
354
355 /* Free all of the segments. */
356 skb = segs;
357 do {
358 nskb = skb->next;
359 if (err)
360 kfree_skb(skb);
361 else
362 consume_skb(skb);
363 } while ((skb = nskb));
364 return err;
365 }
366
367 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
368 unsigned int hdrlen, int actions_attrlen)
369 {
370 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
371 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
372 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
373 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
374
375 /* OVS_PACKET_ATTR_USERDATA */
376 if (upcall_info->userdata)
377 size += NLA_ALIGN(upcall_info->userdata->nla_len);
378
379 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
380 if (upcall_info->egress_tun_info)
381 size += nla_total_size(ovs_tun_key_attr_size());
382
383 /* OVS_PACKET_ATTR_ACTIONS */
384 if (upcall_info->actions_len)
385 size += nla_total_size(actions_attrlen);
386
387 /* OVS_PACKET_ATTR_MRU */
388 if (upcall_info->mru)
389 size += nla_total_size(sizeof(upcall_info->mru));
390
391 return size;
392 }
393
394 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
395 {
396 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
397 size_t plen = NLA_ALIGN(skb->len) - skb->len;
398
399 if (plen > 0)
400 skb_put_zero(skb, plen);
401 }
402 }
403
404 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
405 const struct sw_flow_key *key,
406 const struct dp_upcall_info *upcall_info,
407 uint32_t cutlen)
408 {
409 struct ovs_header *upcall;
410 struct sk_buff *nskb = NULL;
411 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
412 struct nlattr *nla;
413 size_t len;
414 unsigned int hlen;
415 int err, dp_ifindex;
416
417 dp_ifindex = get_dpifindex(dp);
418 if (!dp_ifindex)
419 return -ENODEV;
420
421 if (skb_vlan_tag_present(skb)) {
422 nskb = skb_clone(skb, GFP_ATOMIC);
423 if (!nskb)
424 return -ENOMEM;
425
426 nskb = __vlan_hwaccel_push_inside(nskb);
427 if (!nskb)
428 return -ENOMEM;
429
430 skb = nskb;
431 }
432
433 if (nla_attr_size(skb->len) > USHRT_MAX) {
434 err = -EFBIG;
435 goto out;
436 }
437
438 /* Complete checksum if needed */
439 if (skb->ip_summed == CHECKSUM_PARTIAL &&
440 (err = skb_csum_hwoffload_help(skb, 0)))
441 goto out;
442
443 /* Older versions of OVS user space enforce alignment of the last
444 * Netlink attribute to NLA_ALIGNTO which would require extensive
445 * padding logic. Only perform zerocopy if padding is not required.
446 */
447 if (dp->user_features & OVS_DP_F_UNALIGNED)
448 hlen = skb_zerocopy_headlen(skb);
449 else
450 hlen = skb->len;
451
452 len = upcall_msg_size(upcall_info, hlen - cutlen,
453 OVS_CB(skb)->acts_origlen);
454 user_skb = genlmsg_new(len, GFP_ATOMIC);
455 if (!user_skb) {
456 err = -ENOMEM;
457 goto out;
458 }
459
460 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
461 0, upcall_info->cmd);
462 if (!upcall) {
463 err = -EINVAL;
464 goto out;
465 }
466 upcall->dp_ifindex = dp_ifindex;
467
468 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
469 BUG_ON(err);
470
471 if (upcall_info->userdata)
472 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
473 nla_len(upcall_info->userdata),
474 nla_data(upcall_info->userdata));
475
476
477 if (upcall_info->egress_tun_info) {
478 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
479 if (!nla) {
480 err = -EMSGSIZE;
481 goto out;
482 }
483 err = ovs_nla_put_tunnel_info(user_skb,
484 upcall_info->egress_tun_info);
485 BUG_ON(err);
486 nla_nest_end(user_skb, nla);
487 }
488
489 if (upcall_info->actions_len) {
490 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
491 if (!nla) {
492 err = -EMSGSIZE;
493 goto out;
494 }
495 err = ovs_nla_put_actions(upcall_info->actions,
496 upcall_info->actions_len,
497 user_skb);
498 if (!err)
499 nla_nest_end(user_skb, nla);
500 else
501 nla_nest_cancel(user_skb, nla);
502 }
503
504 /* Add OVS_PACKET_ATTR_MRU */
505 if (upcall_info->mru) {
506 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
507 upcall_info->mru)) {
508 err = -ENOBUFS;
509 goto out;
510 }
511 pad_packet(dp, user_skb);
512 }
513
514 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
515 if (cutlen > 0) {
516 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
517 skb->len)) {
518 err = -ENOBUFS;
519 goto out;
520 }
521 pad_packet(dp, user_skb);
522 }
523
524 /* Only reserve room for attribute header, packet data is added
525 * in skb_zerocopy()
526 */
527 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
528 err = -ENOBUFS;
529 goto out;
530 }
531 nla->nla_len = nla_attr_size(skb->len - cutlen);
532
533 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
534 if (err)
535 goto out;
536
537 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
538 pad_packet(dp, user_skb);
539
540 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
541
542 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
543 user_skb = NULL;
544 out:
545 if (err)
546 skb_tx_error(skb);
547 kfree_skb(user_skb);
548 kfree_skb(nskb);
549 return err;
550 }
551
552 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
553 {
554 struct ovs_header *ovs_header = info->userhdr;
555 struct net *net = sock_net(skb->sk);
556 struct nlattr **a = info->attrs;
557 struct sw_flow_actions *acts;
558 struct sk_buff *packet;
559 struct sw_flow *flow;
560 struct sw_flow_actions *sf_acts;
561 struct datapath *dp;
562 struct vport *input_vport;
563 u16 mru = 0;
564 int len;
565 int err;
566 bool log = !a[OVS_PACKET_ATTR_PROBE];
567
568 err = -EINVAL;
569 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
570 !a[OVS_PACKET_ATTR_ACTIONS])
571 goto err;
572
573 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
574 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
575 err = -ENOMEM;
576 if (!packet)
577 goto err;
578 skb_reserve(packet, NET_IP_ALIGN);
579
580 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
581
582 /* Set packet's mru */
583 if (a[OVS_PACKET_ATTR_MRU]) {
584 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
585 packet->ignore_df = 1;
586 }
587 OVS_CB(packet)->mru = mru;
588
589 /* Build an sw_flow for sending this packet. */
590 flow = ovs_flow_alloc();
591 err = PTR_ERR(flow);
592 if (IS_ERR(flow))
593 goto err_kfree_skb;
594
595 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
596 packet, &flow->key, log);
597 if (err)
598 goto err_flow_free;
599
600 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
601 &flow->key, &acts, log);
602 if (err)
603 goto err_flow_free;
604
605 rcu_assign_pointer(flow->sf_acts, acts);
606 packet->priority = flow->key.phy.priority;
607 packet->mark = flow->key.phy.skb_mark;
608
609 rcu_read_lock();
610 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
611 err = -ENODEV;
612 if (!dp)
613 goto err_unlock;
614
615 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
616 if (!input_vport)
617 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
618
619 if (!input_vport)
620 goto err_unlock;
621
622 packet->dev = input_vport->dev;
623 OVS_CB(packet)->input_vport = input_vport;
624 sf_acts = rcu_dereference(flow->sf_acts);
625
626 local_bh_disable();
627 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
628 local_bh_enable();
629 rcu_read_unlock();
630
631 ovs_flow_free(flow, false);
632 return err;
633
634 err_unlock:
635 rcu_read_unlock();
636 err_flow_free:
637 ovs_flow_free(flow, false);
638 err_kfree_skb:
639 kfree_skb(packet);
640 err:
641 return err;
642 }
643
644 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
645 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
646 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
647 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
648 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
649 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
650 };
651
652 static struct genl_ops dp_packet_genl_ops[] = {
653 { .cmd = OVS_PACKET_CMD_EXECUTE,
654 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
655 .policy = packet_policy,
656 .doit = ovs_packet_cmd_execute
657 }
658 };
659
660 static struct genl_family dp_packet_genl_family __ro_after_init = {
661 .hdrsize = sizeof(struct ovs_header),
662 .name = OVS_PACKET_FAMILY,
663 .version = OVS_PACKET_VERSION,
664 .maxattr = OVS_PACKET_ATTR_MAX,
665 .netnsok = true,
666 .parallel_ops = true,
667 .ops = dp_packet_genl_ops,
668 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
669 .module = THIS_MODULE,
670 };
671
672 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
673 struct ovs_dp_megaflow_stats *mega_stats)
674 {
675 int i;
676
677 memset(mega_stats, 0, sizeof(*mega_stats));
678
679 stats->n_flows = ovs_flow_tbl_count(&dp->table);
680 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
681
682 stats->n_hit = stats->n_missed = stats->n_lost = 0;
683
684 for_each_possible_cpu(i) {
685 const struct dp_stats_percpu *percpu_stats;
686 struct dp_stats_percpu local_stats;
687 unsigned int start;
688
689 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
690
691 do {
692 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
693 local_stats = *percpu_stats;
694 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
695
696 stats->n_hit += local_stats.n_hit;
697 stats->n_missed += local_stats.n_missed;
698 stats->n_lost += local_stats.n_lost;
699 mega_stats->n_mask_hit += local_stats.n_mask_hit;
700 }
701 }
702
703 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
704 {
705 return ovs_identifier_is_ufid(sfid) &&
706 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
707 }
708
709 static bool should_fill_mask(uint32_t ufid_flags)
710 {
711 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
712 }
713
714 static bool should_fill_actions(uint32_t ufid_flags)
715 {
716 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
717 }
718
719 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
720 const struct sw_flow_id *sfid,
721 uint32_t ufid_flags)
722 {
723 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
724
725 /* OVS_FLOW_ATTR_UFID */
726 if (sfid && ovs_identifier_is_ufid(sfid))
727 len += nla_total_size(sfid->ufid_len);
728
729 /* OVS_FLOW_ATTR_KEY */
730 if (!sfid || should_fill_key(sfid, ufid_flags))
731 len += nla_total_size(ovs_key_attr_size());
732
733 /* OVS_FLOW_ATTR_MASK */
734 if (should_fill_mask(ufid_flags))
735 len += nla_total_size(ovs_key_attr_size());
736
737 /* OVS_FLOW_ATTR_ACTIONS */
738 if (should_fill_actions(ufid_flags))
739 len += nla_total_size(acts->orig_len);
740
741 return len
742 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
743 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
744 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
745 }
746
747 /* Called with ovs_mutex or RCU read lock. */
748 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
749 struct sk_buff *skb)
750 {
751 struct ovs_flow_stats stats;
752 __be16 tcp_flags;
753 unsigned long used;
754
755 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
756
757 if (used &&
758 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
759 OVS_FLOW_ATTR_PAD))
760 return -EMSGSIZE;
761
762 if (stats.n_packets &&
763 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
764 sizeof(struct ovs_flow_stats), &stats,
765 OVS_FLOW_ATTR_PAD))
766 return -EMSGSIZE;
767
768 if ((u8)ntohs(tcp_flags) &&
769 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
770 return -EMSGSIZE;
771
772 return 0;
773 }
774
775 /* Called with ovs_mutex or RCU read lock. */
776 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
777 struct sk_buff *skb, int skb_orig_len)
778 {
779 struct nlattr *start;
780 int err;
781
782 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
783 * this is the first flow to be dumped into 'skb'. This is unusual for
784 * Netlink but individual action lists can be longer than
785 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
786 * The userspace caller can always fetch the actions separately if it
787 * really wants them. (Most userspace callers in fact don't care.)
788 *
789 * This can only fail for dump operations because the skb is always
790 * properly sized for single flows.
791 */
792 start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
793 if (start) {
794 const struct sw_flow_actions *sf_acts;
795
796 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
797 err = ovs_nla_put_actions(sf_acts->actions,
798 sf_acts->actions_len, skb);
799
800 if (!err)
801 nla_nest_end(skb, start);
802 else {
803 if (skb_orig_len)
804 return err;
805
806 nla_nest_cancel(skb, start);
807 }
808 } else if (skb_orig_len) {
809 return -EMSGSIZE;
810 }
811
812 return 0;
813 }
814
815 /* Called with ovs_mutex or RCU read lock. */
816 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
817 struct sk_buff *skb, u32 portid,
818 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
819 {
820 const int skb_orig_len = skb->len;
821 struct ovs_header *ovs_header;
822 int err;
823
824 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
825 flags, cmd);
826 if (!ovs_header)
827 return -EMSGSIZE;
828
829 ovs_header->dp_ifindex = dp_ifindex;
830
831 err = ovs_nla_put_identifier(flow, skb);
832 if (err)
833 goto error;
834
835 if (should_fill_key(&flow->id, ufid_flags)) {
836 err = ovs_nla_put_masked_key(flow, skb);
837 if (err)
838 goto error;
839 }
840
841 if (should_fill_mask(ufid_flags)) {
842 err = ovs_nla_put_mask(flow, skb);
843 if (err)
844 goto error;
845 }
846
847 err = ovs_flow_cmd_fill_stats(flow, skb);
848 if (err)
849 goto error;
850
851 if (should_fill_actions(ufid_flags)) {
852 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
853 if (err)
854 goto error;
855 }
856
857 genlmsg_end(skb, ovs_header);
858 return 0;
859
860 error:
861 genlmsg_cancel(skb, ovs_header);
862 return err;
863 }
864
865 /* May not be called with RCU read lock. */
866 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
867 const struct sw_flow_id *sfid,
868 struct genl_info *info,
869 bool always,
870 uint32_t ufid_flags)
871 {
872 struct sk_buff *skb;
873 size_t len;
874
875 if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
876 GROUP_ID(&ovs_dp_flow_multicast_group)))
877 return NULL;
878
879 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
880 skb = genlmsg_new(len, GFP_KERNEL);
881 if (!skb)
882 return ERR_PTR(-ENOMEM);
883
884 return skb;
885 }
886
887 /* Called with ovs_mutex. */
888 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
889 int dp_ifindex,
890 struct genl_info *info, u8 cmd,
891 bool always, u32 ufid_flags)
892 {
893 struct sk_buff *skb;
894 int retval;
895
896 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
897 &flow->id, info, always, ufid_flags);
898 if (IS_ERR_OR_NULL(skb))
899 return skb;
900
901 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
902 info->snd_portid, info->snd_seq, 0,
903 cmd, ufid_flags);
904 BUG_ON(retval < 0);
905 return skb;
906 }
907
908 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
909 {
910 struct net *net = sock_net(skb->sk);
911 struct nlattr **a = info->attrs;
912 struct ovs_header *ovs_header = info->userhdr;
913 struct sw_flow *flow = NULL, *new_flow;
914 struct sw_flow_mask mask;
915 struct sk_buff *reply;
916 struct datapath *dp;
917 struct sw_flow_actions *acts;
918 struct sw_flow_match match;
919 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
920 int error;
921 bool log = !a[OVS_FLOW_ATTR_PROBE];
922
923 /* Must have key and actions. */
924 error = -EINVAL;
925 if (!a[OVS_FLOW_ATTR_KEY]) {
926 OVS_NLERR(log, "Flow key attr not present in new flow.");
927 goto error;
928 }
929 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
930 OVS_NLERR(log, "Flow actions attr not present in new flow.");
931 goto error;
932 }
933
934 /* Most of the time we need to allocate a new flow, do it before
935 * locking.
936 */
937 new_flow = ovs_flow_alloc();
938 if (IS_ERR(new_flow)) {
939 error = PTR_ERR(new_flow);
940 goto error;
941 }
942
943 /* Extract key. */
944 ovs_match_init(&match, &new_flow->key, false, &mask);
945 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
946 a[OVS_FLOW_ATTR_MASK], log);
947 if (error)
948 goto err_kfree_flow;
949
950 /* Extract flow identifier. */
951 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
952 &new_flow->key, log);
953 if (error)
954 goto err_kfree_flow;
955
956 /* unmasked key is needed to match when ufid is not used. */
957 if (ovs_identifier_is_key(&new_flow->id))
958 match.key = new_flow->id.unmasked_key;
959
960 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
961
962 /* Validate actions. */
963 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
964 &new_flow->key, &acts, log);
965 if (error) {
966 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
967 goto err_kfree_flow;
968 }
969
970 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
971 ufid_flags);
972 if (IS_ERR(reply)) {
973 error = PTR_ERR(reply);
974 goto err_kfree_acts;
975 }
976
977 ovs_lock();
978 dp = get_dp(net, ovs_header->dp_ifindex);
979 if (unlikely(!dp)) {
980 error = -ENODEV;
981 goto err_unlock_ovs;
982 }
983
984 /* Check if this is a duplicate flow */
985 if (ovs_identifier_is_ufid(&new_flow->id))
986 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
987 if (!flow)
988 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
989 if (likely(!flow)) {
990 rcu_assign_pointer(new_flow->sf_acts, acts);
991
992 /* Put flow in bucket. */
993 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
994 if (unlikely(error)) {
995 acts = NULL;
996 goto err_unlock_ovs;
997 }
998
999 if (unlikely(reply)) {
1000 error = ovs_flow_cmd_fill_info(new_flow,
1001 ovs_header->dp_ifindex,
1002 reply, info->snd_portid,
1003 info->snd_seq, 0,
1004 OVS_FLOW_CMD_NEW,
1005 ufid_flags);
1006 BUG_ON(error < 0);
1007 }
1008 ovs_unlock();
1009 } else {
1010 struct sw_flow_actions *old_acts;
1011
1012 /* Bail out if we're not allowed to modify an existing flow.
1013 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1014 * because Generic Netlink treats the latter as a dump
1015 * request. We also accept NLM_F_EXCL in case that bug ever
1016 * gets fixed.
1017 */
1018 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1019 | NLM_F_EXCL))) {
1020 error = -EEXIST;
1021 goto err_unlock_ovs;
1022 }
1023 /* The flow identifier has to be the same for flow updates.
1024 * Look for any overlapping flow.
1025 */
1026 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1027 if (ovs_identifier_is_key(&flow->id))
1028 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1029 &match);
1030 else /* UFID matches but key is different */
1031 flow = NULL;
1032 if (!flow) {
1033 error = -ENOENT;
1034 goto err_unlock_ovs;
1035 }
1036 }
1037 /* Update actions. */
1038 old_acts = ovsl_dereference(flow->sf_acts);
1039 rcu_assign_pointer(flow->sf_acts, acts);
1040
1041 if (unlikely(reply)) {
1042 error = ovs_flow_cmd_fill_info(flow,
1043 ovs_header->dp_ifindex,
1044 reply, info->snd_portid,
1045 info->snd_seq, 0,
1046 OVS_FLOW_CMD_NEW,
1047 ufid_flags);
1048 BUG_ON(error < 0);
1049 }
1050 ovs_unlock();
1051
1052 ovs_nla_free_flow_actions_rcu(old_acts);
1053 ovs_flow_free(new_flow, false);
1054 }
1055
1056 if (reply)
1057 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1058 return 0;
1059
1060 err_unlock_ovs:
1061 ovs_unlock();
1062 kfree_skb(reply);
1063 err_kfree_acts:
1064 ovs_nla_free_flow_actions(acts);
1065 err_kfree_flow:
1066 ovs_flow_free(new_flow, false);
1067 error:
1068 return error;
1069 }
1070
1071 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1072 static struct sw_flow_actions *get_flow_actions(struct net *net,
1073 const struct nlattr *a,
1074 const struct sw_flow_key *key,
1075 const struct sw_flow_mask *mask,
1076 bool log)
1077 {
1078 struct sw_flow_actions *acts;
1079 struct sw_flow_key masked_key;
1080 int error;
1081
1082 ovs_flow_mask_key(&masked_key, key, true, mask);
1083 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1084 if (error) {
1085 OVS_NLERR(log,
1086 "Actions may not be safe on all matching packets");
1087 return ERR_PTR(error);
1088 }
1089
1090 return acts;
1091 }
1092
1093 /* Factor out match-init and action-copy to avoid
1094 * "Wframe-larger-than=1024" warning. Because mask is only
1095 * used to get actions, we new a function to save some
1096 * stack space.
1097 *
1098 * If there are not key and action attrs, we return 0
1099 * directly. In the case, the caller will also not use the
1100 * match as before. If there is action attr, we try to get
1101 * actions and save them to *acts. Before returning from
1102 * the function, we reset the match->mask pointer. Because
1103 * we should not to return match object with dangling reference
1104 * to mask.
1105 * */
1106 static int ovs_nla_init_match_and_action(struct net *net,
1107 struct sw_flow_match *match,
1108 struct sw_flow_key *key,
1109 struct nlattr **a,
1110 struct sw_flow_actions **acts,
1111 bool log)
1112 {
1113 struct sw_flow_mask mask;
1114 int error = 0;
1115
1116 if (a[OVS_FLOW_ATTR_KEY]) {
1117 ovs_match_init(match, key, true, &mask);
1118 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1119 a[OVS_FLOW_ATTR_MASK], log);
1120 if (error)
1121 goto error;
1122 }
1123
1124 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1125 if (!a[OVS_FLOW_ATTR_KEY]) {
1126 OVS_NLERR(log,
1127 "Flow key attribute not present in set flow.");
1128 error = -EINVAL;
1129 goto error;
1130 }
1131
1132 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1133 &mask, log);
1134 if (IS_ERR(*acts)) {
1135 error = PTR_ERR(*acts);
1136 goto error;
1137 }
1138 }
1139
1140 /* On success, error is 0. */
1141 error:
1142 match->mask = NULL;
1143 return error;
1144 }
1145
1146 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1147 {
1148 struct net *net = sock_net(skb->sk);
1149 struct nlattr **a = info->attrs;
1150 struct ovs_header *ovs_header = info->userhdr;
1151 struct sw_flow_key key;
1152 struct sw_flow *flow;
1153 struct sk_buff *reply = NULL;
1154 struct datapath *dp;
1155 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1156 struct sw_flow_match match;
1157 struct sw_flow_id sfid;
1158 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1159 int error = 0;
1160 bool log = !a[OVS_FLOW_ATTR_PROBE];
1161 bool ufid_present;
1162
1163 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1164 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1165 OVS_NLERR(log,
1166 "Flow set message rejected, Key attribute missing.");
1167 return -EINVAL;
1168 }
1169
1170 error = ovs_nla_init_match_and_action(net, &match, &key, a,
1171 &acts, log);
1172 if (error)
1173 goto error;
1174
1175 if (acts) {
1176 /* Can allocate before locking if have acts. */
1177 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1178 ufid_flags);
1179 if (IS_ERR(reply)) {
1180 error = PTR_ERR(reply);
1181 goto err_kfree_acts;
1182 }
1183 }
1184
1185 ovs_lock();
1186 dp = get_dp(net, ovs_header->dp_ifindex);
1187 if (unlikely(!dp)) {
1188 error = -ENODEV;
1189 goto err_unlock_ovs;
1190 }
1191 /* Check that the flow exists. */
1192 if (ufid_present)
1193 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1194 else
1195 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1196 if (unlikely(!flow)) {
1197 error = -ENOENT;
1198 goto err_unlock_ovs;
1199 }
1200
1201 /* Update actions, if present. */
1202 if (likely(acts)) {
1203 old_acts = ovsl_dereference(flow->sf_acts);
1204 rcu_assign_pointer(flow->sf_acts, acts);
1205
1206 if (unlikely(reply)) {
1207 error = ovs_flow_cmd_fill_info(flow,
1208 ovs_header->dp_ifindex,
1209 reply, info->snd_portid,
1210 info->snd_seq, 0,
1211 OVS_FLOW_CMD_SET,
1212 ufid_flags);
1213 BUG_ON(error < 0);
1214 }
1215 } else {
1216 /* Could not alloc without acts before locking. */
1217 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1218 info, OVS_FLOW_CMD_SET, false,
1219 ufid_flags);
1220
1221 if (unlikely(IS_ERR(reply))) {
1222 error = PTR_ERR(reply);
1223 goto err_unlock_ovs;
1224 }
1225 }
1226
1227 /* Clear stats. */
1228 if (a[OVS_FLOW_ATTR_CLEAR])
1229 ovs_flow_stats_clear(flow);
1230 ovs_unlock();
1231
1232 if (reply)
1233 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1234 if (old_acts)
1235 ovs_nla_free_flow_actions_rcu(old_acts);
1236
1237 return 0;
1238
1239 err_unlock_ovs:
1240 ovs_unlock();
1241 kfree_skb(reply);
1242 err_kfree_acts:
1243 ovs_nla_free_flow_actions(acts);
1244 error:
1245 return error;
1246 }
1247
1248 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1249 {
1250 struct nlattr **a = info->attrs;
1251 struct ovs_header *ovs_header = info->userhdr;
1252 struct net *net = sock_net(skb->sk);
1253 struct sw_flow_key key;
1254 struct sk_buff *reply;
1255 struct sw_flow *flow;
1256 struct datapath *dp;
1257 struct sw_flow_match match;
1258 struct sw_flow_id ufid;
1259 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1260 int err = 0;
1261 bool log = !a[OVS_FLOW_ATTR_PROBE];
1262 bool ufid_present;
1263
1264 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1265 if (a[OVS_FLOW_ATTR_KEY]) {
1266 ovs_match_init(&match, &key, true, NULL);
1267 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1268 log);
1269 } else if (!ufid_present) {
1270 OVS_NLERR(log,
1271 "Flow get message rejected, Key attribute missing.");
1272 err = -EINVAL;
1273 }
1274 if (err)
1275 return err;
1276
1277 ovs_lock();
1278 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1279 if (!dp) {
1280 err = -ENODEV;
1281 goto unlock;
1282 }
1283
1284 if (ufid_present)
1285 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1286 else
1287 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1288 if (!flow) {
1289 err = -ENOENT;
1290 goto unlock;
1291 }
1292
1293 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1294 OVS_FLOW_CMD_GET, true, ufid_flags);
1295 if (IS_ERR(reply)) {
1296 err = PTR_ERR(reply);
1297 goto unlock;
1298 }
1299
1300 ovs_unlock();
1301 return genlmsg_reply(reply, info);
1302 unlock:
1303 ovs_unlock();
1304 return err;
1305 }
1306
1307 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1308 {
1309 struct nlattr **a = info->attrs;
1310 struct ovs_header *ovs_header = info->userhdr;
1311 struct net *net = sock_net(skb->sk);
1312 struct sw_flow_key key;
1313 struct sk_buff *reply;
1314 struct sw_flow *flow = NULL;
1315 struct datapath *dp;
1316 struct sw_flow_match match;
1317 struct sw_flow_id ufid;
1318 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1319 int err;
1320 bool log = !a[OVS_FLOW_ATTR_PROBE];
1321 bool ufid_present;
1322
1323 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1324 if (a[OVS_FLOW_ATTR_KEY]) {
1325 ovs_match_init(&match, &key, true, NULL);
1326 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1327 NULL, log);
1328 if (unlikely(err))
1329 return err;
1330 }
1331
1332 ovs_lock();
1333 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1334 if (unlikely(!dp)) {
1335 err = -ENODEV;
1336 goto unlock;
1337 }
1338
1339 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1340 err = ovs_flow_tbl_flush(&dp->table);
1341 goto unlock;
1342 }
1343
1344 if (ufid_present)
1345 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1346 else
1347 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1348 if (unlikely(!flow)) {
1349 err = -ENOENT;
1350 goto unlock;
1351 }
1352
1353 ovs_flow_tbl_remove(&dp->table, flow);
1354 ovs_unlock();
1355
1356 reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
1357 &flow->id, info, false, ufid_flags);
1358
1359 if (likely(reply)) {
1360 if (likely(!IS_ERR(reply))) {
1361 rcu_read_lock(); /*To keep RCU checker happy. */
1362 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1363 reply, info->snd_portid,
1364 info->snd_seq, 0,
1365 OVS_FLOW_CMD_DEL,
1366 ufid_flags);
1367 rcu_read_unlock();
1368 BUG_ON(err < 0);
1369 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1370 } else {
1371 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1372 GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1373
1374 }
1375 }
1376
1377 ovs_flow_free(flow, true);
1378 return 0;
1379 unlock:
1380 ovs_unlock();
1381 return err;
1382 }
1383
1384 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1385 {
1386 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1387 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1388 struct table_instance *ti;
1389 struct datapath *dp;
1390 u32 ufid_flags;
1391 int err;
1392
1393 err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1394 OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1395 if (err)
1396 return err;
1397 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1398
1399 rcu_read_lock();
1400 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1401 if (!dp) {
1402 rcu_read_unlock();
1403 return -ENODEV;
1404 }
1405
1406 ti = rcu_dereference(dp->table.ti);
1407 for (;;) {
1408 struct sw_flow *flow;
1409 u32 bucket, obj;
1410
1411 bucket = cb->args[0];
1412 obj = cb->args[1];
1413 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1414 if (!flow)
1415 break;
1416
1417 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1418 NETLINK_CB(cb->skb).portid,
1419 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1420 OVS_FLOW_CMD_GET, ufid_flags) < 0)
1421 break;
1422
1423 cb->args[0] = bucket;
1424 cb->args[1] = obj;
1425 }
1426 rcu_read_unlock();
1427 return skb->len;
1428 }
1429
1430 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1431 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1432 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1433 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1434 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1435 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1436 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1437 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1438 };
1439
1440 static struct genl_ops dp_flow_genl_ops[] = {
1441 { .cmd = OVS_FLOW_CMD_NEW,
1442 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1443 .policy = flow_policy,
1444 .doit = ovs_flow_cmd_new
1445 },
1446 { .cmd = OVS_FLOW_CMD_DEL,
1447 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1448 .policy = flow_policy,
1449 .doit = ovs_flow_cmd_del
1450 },
1451 { .cmd = OVS_FLOW_CMD_GET,
1452 .flags = 0, /* OK for unprivileged users. */
1453 .policy = flow_policy,
1454 .doit = ovs_flow_cmd_get,
1455 .dumpit = ovs_flow_cmd_dump
1456 },
1457 { .cmd = OVS_FLOW_CMD_SET,
1458 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1459 .policy = flow_policy,
1460 .doit = ovs_flow_cmd_set,
1461 },
1462 };
1463
1464 static struct genl_family dp_flow_genl_family __ro_after_init = {
1465 .hdrsize = sizeof(struct ovs_header),
1466 .name = OVS_FLOW_FAMILY,
1467 .version = OVS_FLOW_VERSION,
1468 .maxattr = OVS_FLOW_ATTR_MAX,
1469 .netnsok = true,
1470 .parallel_ops = true,
1471 .ops = dp_flow_genl_ops,
1472 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1473 .mcgrps = &ovs_dp_flow_multicast_group,
1474 .n_mcgrps = 1,
1475 .module = THIS_MODULE,
1476 };
1477
1478 static size_t ovs_dp_cmd_msg_size(void)
1479 {
1480 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1481
1482 msgsize += nla_total_size(IFNAMSIZ);
1483 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1484 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1485 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1486
1487 return msgsize;
1488 }
1489
1490 /* Called with ovs_mutex. */
1491 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1492 u32 portid, u32 seq, u32 flags, u8 cmd)
1493 {
1494 struct ovs_header *ovs_header;
1495 struct ovs_dp_stats dp_stats;
1496 struct ovs_dp_megaflow_stats dp_megaflow_stats;
1497 int err;
1498
1499 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1500 flags, cmd);
1501 if (!ovs_header)
1502 goto error;
1503
1504 ovs_header->dp_ifindex = get_dpifindex(dp);
1505
1506 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1507 if (err)
1508 goto nla_put_failure;
1509
1510 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1511 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1512 &dp_stats, OVS_DP_ATTR_PAD))
1513 goto nla_put_failure;
1514
1515 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1516 sizeof(struct ovs_dp_megaflow_stats),
1517 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1518 goto nla_put_failure;
1519
1520 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1521 goto nla_put_failure;
1522
1523 genlmsg_end(skb, ovs_header);
1524 return 0;
1525
1526 nla_put_failure:
1527 genlmsg_cancel(skb, ovs_header);
1528 error:
1529 return -EMSGSIZE;
1530 }
1531
1532 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1533 {
1534 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1535 }
1536
1537 /* Called with rcu_read_lock or ovs_mutex. */
1538 static struct datapath *lookup_datapath(struct net *net,
1539 const struct ovs_header *ovs_header,
1540 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1541 {
1542 struct datapath *dp;
1543
1544 if (!a[OVS_DP_ATTR_NAME])
1545 dp = get_dp(net, ovs_header->dp_ifindex);
1546 else {
1547 struct vport *vport;
1548
1549 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1550 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1551 }
1552 return dp ? dp : ERR_PTR(-ENODEV);
1553 }
1554
1555 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1556 {
1557 struct datapath *dp;
1558
1559 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1560 if (IS_ERR(dp))
1561 return;
1562
1563 WARN(dp->user_features, "Dropping previously announced user features\n");
1564 dp->user_features = 0;
1565 }
1566
1567 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1568 {
1569 if (a[OVS_DP_ATTR_USER_FEATURES])
1570 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1571 }
1572
1573 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1574 {
1575 struct nlattr **a = info->attrs;
1576 struct vport_parms parms;
1577 struct sk_buff *reply;
1578 struct datapath *dp;
1579 struct vport *vport;
1580 struct ovs_net *ovs_net;
1581 int err, i;
1582
1583 err = -EINVAL;
1584 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1585 goto err;
1586
1587 reply = ovs_dp_cmd_alloc_info();
1588 if (!reply)
1589 return -ENOMEM;
1590
1591 err = -ENOMEM;
1592 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1593 if (dp == NULL)
1594 goto err_free_reply;
1595
1596 ovs_dp_set_net(dp, sock_net(skb->sk));
1597
1598 /* Allocate table. */
1599 err = ovs_flow_tbl_init(&dp->table);
1600 if (err)
1601 goto err_free_dp;
1602
1603 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1604 if (!dp->stats_percpu) {
1605 err = -ENOMEM;
1606 goto err_destroy_table;
1607 }
1608
1609 dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1610 sizeof(struct hlist_head),
1611 GFP_KERNEL);
1612 if (!dp->ports) {
1613 err = -ENOMEM;
1614 goto err_destroy_percpu;
1615 }
1616
1617 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1618 INIT_HLIST_HEAD(&dp->ports[i]);
1619
1620 err = ovs_meters_init(dp);
1621 if (err)
1622 goto err_destroy_ports_array;
1623
1624 /* Set up our datapath device. */
1625 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1626 parms.type = OVS_VPORT_TYPE_INTERNAL;
1627 parms.options = NULL;
1628 parms.dp = dp;
1629 parms.port_no = OVSP_LOCAL;
1630 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1631
1632 ovs_dp_change(dp, a);
1633
1634 /* So far only local changes have been made, now need the lock. */
1635 ovs_lock();
1636
1637 vport = new_vport(&parms);
1638 if (IS_ERR(vport)) {
1639 err = PTR_ERR(vport);
1640 if (err == -EBUSY)
1641 err = -EEXIST;
1642
1643 if (err == -EEXIST) {
1644 /* An outdated user space instance that does not understand
1645 * the concept of user_features has attempted to create a new
1646 * datapath and is likely to reuse it. Drop all user features.
1647 */
1648 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1649 ovs_dp_reset_user_features(skb, info);
1650 }
1651
1652 goto err_destroy_meters;
1653 }
1654
1655 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1656 info->snd_seq, 0, OVS_DP_CMD_NEW);
1657 BUG_ON(err < 0);
1658
1659 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1660 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1661
1662 ovs_unlock();
1663
1664 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1665 return 0;
1666
1667 err_destroy_meters:
1668 ovs_unlock();
1669 ovs_meters_exit(dp);
1670 err_destroy_ports_array:
1671 kfree(dp->ports);
1672 err_destroy_percpu:
1673 free_percpu(dp->stats_percpu);
1674 err_destroy_table:
1675 ovs_flow_tbl_destroy(&dp->table);
1676 err_free_dp:
1677 kfree(dp);
1678 err_free_reply:
1679 kfree_skb(reply);
1680 err:
1681 return err;
1682 }
1683
1684 /* Called with ovs_mutex. */
1685 static void __dp_destroy(struct datapath *dp)
1686 {
1687 int i;
1688
1689 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1690 struct vport *vport;
1691 struct hlist_node *n;
1692
1693 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1694 if (vport->port_no != OVSP_LOCAL)
1695 ovs_dp_detach_port(vport);
1696 }
1697
1698 list_del_rcu(&dp->list_node);
1699
1700 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1701 * all ports in datapath are destroyed first before freeing datapath.
1702 */
1703 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1704
1705 /* RCU destroy the flow table */
1706 call_rcu(&dp->rcu, destroy_dp_rcu);
1707 }
1708
1709 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1710 {
1711 struct sk_buff *reply;
1712 struct datapath *dp;
1713 int err;
1714
1715 reply = ovs_dp_cmd_alloc_info();
1716 if (!reply)
1717 return -ENOMEM;
1718
1719 ovs_lock();
1720 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1721 err = PTR_ERR(dp);
1722 if (IS_ERR(dp))
1723 goto err_unlock_free;
1724
1725 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1726 info->snd_seq, 0, OVS_DP_CMD_DEL);
1727 BUG_ON(err < 0);
1728
1729 __dp_destroy(dp);
1730 ovs_unlock();
1731
1732 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1733 return 0;
1734
1735 err_unlock_free:
1736 ovs_unlock();
1737 kfree_skb(reply);
1738 return err;
1739 }
1740
1741 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1742 {
1743 struct sk_buff *reply;
1744 struct datapath *dp;
1745 int err;
1746
1747 reply = ovs_dp_cmd_alloc_info();
1748 if (!reply)
1749 return -ENOMEM;
1750
1751 ovs_lock();
1752 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1753 err = PTR_ERR(dp);
1754 if (IS_ERR(dp))
1755 goto err_unlock_free;
1756
1757 ovs_dp_change(dp, info->attrs);
1758
1759 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1760 info->snd_seq, 0, OVS_DP_CMD_GET);
1761 BUG_ON(err < 0);
1762
1763 ovs_unlock();
1764
1765 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1766 return 0;
1767
1768 err_unlock_free:
1769 ovs_unlock();
1770 kfree_skb(reply);
1771 return err;
1772 }
1773
1774 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1775 {
1776 struct sk_buff *reply;
1777 struct datapath *dp;
1778 int err;
1779
1780 reply = ovs_dp_cmd_alloc_info();
1781 if (!reply)
1782 return -ENOMEM;
1783
1784 ovs_lock();
1785 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1786 if (IS_ERR(dp)) {
1787 err = PTR_ERR(dp);
1788 goto err_unlock_free;
1789 }
1790 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1791 info->snd_seq, 0, OVS_DP_CMD_GET);
1792 BUG_ON(err < 0);
1793 ovs_unlock();
1794
1795 return genlmsg_reply(reply, info);
1796
1797 err_unlock_free:
1798 ovs_unlock();
1799 kfree_skb(reply);
1800 return err;
1801 }
1802
1803 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1804 {
1805 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1806 struct datapath *dp;
1807 int skip = cb->args[0];
1808 int i = 0;
1809
1810 ovs_lock();
1811 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1812 if (i >= skip &&
1813 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1814 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1815 OVS_DP_CMD_GET) < 0)
1816 break;
1817 i++;
1818 }
1819 ovs_unlock();
1820
1821 cb->args[0] = i;
1822
1823 return skb->len;
1824 }
1825
1826 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1827 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1828 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1829 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1830 };
1831
1832 static struct genl_ops dp_datapath_genl_ops[] = {
1833 { .cmd = OVS_DP_CMD_NEW,
1834 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1835 .policy = datapath_policy,
1836 .doit = ovs_dp_cmd_new
1837 },
1838 { .cmd = OVS_DP_CMD_DEL,
1839 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1840 .policy = datapath_policy,
1841 .doit = ovs_dp_cmd_del
1842 },
1843 { .cmd = OVS_DP_CMD_GET,
1844 .flags = 0, /* OK for unprivileged users. */
1845 .policy = datapath_policy,
1846 .doit = ovs_dp_cmd_get,
1847 .dumpit = ovs_dp_cmd_dump
1848 },
1849 { .cmd = OVS_DP_CMD_SET,
1850 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1851 .policy = datapath_policy,
1852 .doit = ovs_dp_cmd_set,
1853 },
1854 };
1855
1856 static struct genl_family dp_datapath_genl_family __ro_after_init = {
1857 .hdrsize = sizeof(struct ovs_header),
1858 .name = OVS_DATAPATH_FAMILY,
1859 .version = OVS_DATAPATH_VERSION,
1860 .maxattr = OVS_DP_ATTR_MAX,
1861 .netnsok = true,
1862 .parallel_ops = true,
1863 .ops = dp_datapath_genl_ops,
1864 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1865 .mcgrps = &ovs_dp_datapath_multicast_group,
1866 .n_mcgrps = 1,
1867 .module = THIS_MODULE,
1868 };
1869
1870 /* Called with ovs_mutex or RCU read lock. */
1871 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1872 struct net *net, u32 portid, u32 seq,
1873 u32 flags, u8 cmd)
1874 {
1875 struct ovs_header *ovs_header;
1876 struct ovs_vport_stats vport_stats;
1877 int err;
1878
1879 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1880 flags, cmd);
1881 if (!ovs_header)
1882 return -EMSGSIZE;
1883
1884 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1885
1886 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1887 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1888 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1889 ovs_vport_name(vport)) ||
1890 nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
1891 goto nla_put_failure;
1892
1893 #ifdef HAVE_PEERNET2ID_ALLOC
1894 if (!net_eq(net, dev_net(vport->dev))) {
1895 int id = peernet2id_alloc(net, dev_net(vport->dev));
1896
1897 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
1898 goto nla_put_failure;
1899 }
1900
1901 #endif
1902 ovs_vport_get_stats(vport, &vport_stats);
1903 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1904 sizeof(struct ovs_vport_stats), &vport_stats,
1905 OVS_VPORT_ATTR_PAD))
1906 goto nla_put_failure;
1907
1908 if (ovs_vport_get_upcall_portids(vport, skb))
1909 goto nla_put_failure;
1910
1911 err = ovs_vport_get_options(vport, skb);
1912 if (err == -EMSGSIZE)
1913 goto error;
1914
1915 genlmsg_end(skb, ovs_header);
1916 return 0;
1917
1918 nla_put_failure:
1919 err = -EMSGSIZE;
1920 error:
1921 genlmsg_cancel(skb, ovs_header);
1922 return err;
1923 }
1924
1925 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1926 {
1927 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1928 }
1929
1930 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1931 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
1932 u32 portid, u32 seq, u8 cmd)
1933 {
1934 struct sk_buff *skb;
1935 int retval;
1936
1937 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1938 if (!skb)
1939 return ERR_PTR(-ENOMEM);
1940
1941 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
1942 BUG_ON(retval < 0);
1943
1944 return skb;
1945 }
1946
1947 /* Called with ovs_mutex or RCU read lock. */
1948 static struct vport *lookup_vport(struct net *net,
1949 const struct ovs_header *ovs_header,
1950 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1951 {
1952 struct datapath *dp;
1953 struct vport *vport;
1954
1955 if (a[OVS_VPORT_ATTR_IFINDEX])
1956 return ERR_PTR(-EOPNOTSUPP);
1957 if (a[OVS_VPORT_ATTR_NAME]) {
1958 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1959 if (!vport)
1960 return ERR_PTR(-ENODEV);
1961 if (ovs_header->dp_ifindex &&
1962 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1963 return ERR_PTR(-ENODEV);
1964 return vport;
1965 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1966 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1967
1968 if (port_no >= DP_MAX_PORTS)
1969 return ERR_PTR(-EFBIG);
1970
1971 dp = get_dp(net, ovs_header->dp_ifindex);
1972 if (!dp)
1973 return ERR_PTR(-ENODEV);
1974
1975 vport = ovs_vport_ovsl_rcu(dp, port_no);
1976 if (!vport)
1977 return ERR_PTR(-ENODEV);
1978 return vport;
1979 } else
1980 return ERR_PTR(-EINVAL);
1981
1982 }
1983
1984 /* Called with ovs_mutex */
1985 static void update_headroom(struct datapath *dp)
1986 {
1987 unsigned dev_headroom, max_headroom = 0;
1988 struct net_device *dev;
1989 struct vport *vport;
1990 int i;
1991
1992 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1993 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1994 dev = vport->dev;
1995 dev_headroom = netdev_get_fwd_headroom(dev);
1996 if (dev_headroom > max_headroom)
1997 max_headroom = dev_headroom;
1998 }
1999 }
2000
2001 dp->max_headroom = max_headroom;
2002 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
2003 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
2004 netdev_set_rx_headroom(vport->dev, max_headroom);
2005 }
2006
2007 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2008 {
2009 struct nlattr **a = info->attrs;
2010 struct ovs_header *ovs_header = info->userhdr;
2011 struct vport_parms parms;
2012 struct sk_buff *reply;
2013 struct vport *vport;
2014 struct datapath *dp;
2015 u32 port_no;
2016 int err;
2017
2018 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2019 !a[OVS_VPORT_ATTR_UPCALL_PID])
2020 return -EINVAL;
2021 if (a[OVS_VPORT_ATTR_IFINDEX])
2022 return -EOPNOTSUPP;
2023
2024 port_no = a[OVS_VPORT_ATTR_PORT_NO]
2025 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2026 if (port_no >= DP_MAX_PORTS)
2027 return -EFBIG;
2028
2029 reply = ovs_vport_cmd_alloc_info();
2030 if (!reply)
2031 return -ENOMEM;
2032
2033 ovs_lock();
2034 restart:
2035 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2036 err = -ENODEV;
2037 if (!dp)
2038 goto exit_unlock_free;
2039
2040 if (port_no) {
2041 vport = ovs_vport_ovsl(dp, port_no);
2042 err = -EBUSY;
2043 if (vport)
2044 goto exit_unlock_free;
2045 } else {
2046 for (port_no = 1; ; port_no++) {
2047 if (port_no >= DP_MAX_PORTS) {
2048 err = -EFBIG;
2049 goto exit_unlock_free;
2050 }
2051 vport = ovs_vport_ovsl(dp, port_no);
2052 if (!vport)
2053 break;
2054 }
2055 }
2056
2057 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2058 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2059 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2060 parms.dp = dp;
2061 parms.port_no = port_no;
2062 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2063
2064 vport = new_vport(&parms);
2065 err = PTR_ERR(vport);
2066 if (IS_ERR(vport)) {
2067 if (err == -EAGAIN)
2068 goto restart;
2069 goto exit_unlock_free;
2070 }
2071
2072 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2073 info->snd_portid, info->snd_seq, 0,
2074 OVS_VPORT_CMD_NEW);
2075 BUG_ON(err < 0);
2076
2077 if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2078 update_headroom(dp);
2079 else
2080 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2081
2082 ovs_unlock();
2083
2084 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
2085 return 0;
2086
2087 exit_unlock_free:
2088 ovs_unlock();
2089 kfree_skb(reply);
2090 return err;
2091 }
2092
2093 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2094 {
2095 struct nlattr **a = info->attrs;
2096 struct sk_buff *reply;
2097 struct vport *vport;
2098 int err;
2099
2100 reply = ovs_vport_cmd_alloc_info();
2101 if (!reply)
2102 return -ENOMEM;
2103
2104 ovs_lock();
2105 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2106 err = PTR_ERR(vport);
2107 if (IS_ERR(vport))
2108 goto exit_unlock_free;
2109
2110 if (a[OVS_VPORT_ATTR_TYPE] &&
2111 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2112 err = -EINVAL;
2113 goto exit_unlock_free;
2114 }
2115
2116 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2117 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2118 if (err)
2119 goto exit_unlock_free;
2120 }
2121
2122 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2123 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2124
2125 err = ovs_vport_set_upcall_portids(vport, ids);
2126 if (err)
2127 goto exit_unlock_free;
2128 }
2129
2130 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2131 info->snd_portid, info->snd_seq, 0,
2132 OVS_VPORT_CMD_SET);
2133 BUG_ON(err < 0);
2134 ovs_unlock();
2135
2136 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
2137 return 0;
2138
2139 exit_unlock_free:
2140 ovs_unlock();
2141 kfree_skb(reply);
2142 return err;
2143 }
2144
2145 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2146 {
2147 bool must_update_headroom = false;
2148 struct nlattr **a = info->attrs;
2149 struct sk_buff *reply;
2150 struct datapath *dp;
2151 struct vport *vport;
2152 int err;
2153
2154 reply = ovs_vport_cmd_alloc_info();
2155 if (!reply)
2156 return -ENOMEM;
2157
2158 ovs_lock();
2159 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2160 err = PTR_ERR(vport);
2161 if (IS_ERR(vport))
2162 goto exit_unlock_free;
2163
2164 if (vport->port_no == OVSP_LOCAL) {
2165 err = -EINVAL;
2166 goto exit_unlock_free;
2167 }
2168
2169 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2170 info->snd_portid, info->snd_seq, 0,
2171 OVS_VPORT_CMD_DEL);
2172 BUG_ON(err < 0);
2173
2174 /* the vport deletion may trigger dp headroom update */
2175 dp = vport->dp;
2176 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2177 must_update_headroom = true;
2178 netdev_reset_rx_headroom(vport->dev);
2179 ovs_dp_detach_port(vport);
2180
2181 if (must_update_headroom)
2182 update_headroom(dp);
2183
2184 ovs_unlock();
2185
2186 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
2187 return 0;
2188
2189 exit_unlock_free:
2190 ovs_unlock();
2191 kfree_skb(reply);
2192 return err;
2193 }
2194
2195 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2196 {
2197 struct nlattr **a = info->attrs;
2198 struct ovs_header *ovs_header = info->userhdr;
2199 struct sk_buff *reply;
2200 struct vport *vport;
2201 int err;
2202
2203 reply = ovs_vport_cmd_alloc_info();
2204 if (!reply)
2205 return -ENOMEM;
2206
2207 rcu_read_lock();
2208 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2209 err = PTR_ERR(vport);
2210 if (IS_ERR(vport))
2211 goto exit_unlock_free;
2212 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2213 info->snd_portid, info->snd_seq, 0,
2214 OVS_VPORT_CMD_GET);
2215 BUG_ON(err < 0);
2216 rcu_read_unlock();
2217
2218 return genlmsg_reply(reply, info);
2219
2220 exit_unlock_free:
2221 rcu_read_unlock();
2222 kfree_skb(reply);
2223 return err;
2224 }
2225
2226 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2227 {
2228 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2229 struct datapath *dp;
2230 int bucket = cb->args[0], skip = cb->args[1];
2231 int i, j = 0;
2232
2233 rcu_read_lock();
2234 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2235 if (!dp) {
2236 rcu_read_unlock();
2237 return -ENODEV;
2238 }
2239 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2240 struct vport *vport;
2241
2242 j = 0;
2243 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2244 if (j >= skip &&
2245 ovs_vport_cmd_fill_info(vport, skb,
2246 sock_net(skb->sk),
2247 NETLINK_CB(cb->skb).portid,
2248 cb->nlh->nlmsg_seq,
2249 NLM_F_MULTI,
2250 OVS_VPORT_CMD_GET) < 0)
2251 goto out;
2252
2253 j++;
2254 }
2255 skip = 0;
2256 }
2257 out:
2258 rcu_read_unlock();
2259
2260 cb->args[0] = i;
2261 cb->args[1] = j;
2262
2263 return skb->len;
2264 }
2265
2266 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2267 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2268 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2269 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2270 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2271 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2272 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2273 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2274 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2275 };
2276
2277 static struct genl_ops dp_vport_genl_ops[] = {
2278 { .cmd = OVS_VPORT_CMD_NEW,
2279 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2280 .policy = vport_policy,
2281 .doit = ovs_vport_cmd_new
2282 },
2283 { .cmd = OVS_VPORT_CMD_DEL,
2284 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2285 .policy = vport_policy,
2286 .doit = ovs_vport_cmd_del
2287 },
2288 { .cmd = OVS_VPORT_CMD_GET,
2289 .flags = 0, /* OK for unprivileged users. */
2290 .policy = vport_policy,
2291 .doit = ovs_vport_cmd_get,
2292 .dumpit = ovs_vport_cmd_dump
2293 },
2294 { .cmd = OVS_VPORT_CMD_SET,
2295 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2296 .policy = vport_policy,
2297 .doit = ovs_vport_cmd_set,
2298 },
2299 };
2300
2301 struct genl_family dp_vport_genl_family __ro_after_init = {
2302 .hdrsize = sizeof(struct ovs_header),
2303 .name = OVS_VPORT_FAMILY,
2304 .version = OVS_VPORT_VERSION,
2305 .maxattr = OVS_VPORT_ATTR_MAX,
2306 .netnsok = true,
2307 .parallel_ops = true,
2308 .ops = dp_vport_genl_ops,
2309 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2310 .mcgrps = &ovs_dp_vport_multicast_group,
2311 .n_mcgrps = 1,
2312 .module = THIS_MODULE,
2313 };
2314
2315 static struct genl_family *dp_genl_families[] = {
2316 &dp_datapath_genl_family,
2317 &dp_vport_genl_family,
2318 &dp_flow_genl_family,
2319 &dp_packet_genl_family,
2320 &dp_meter_genl_family,
2321 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2322 &dp_ct_limit_genl_family,
2323 #endif
2324 };
2325
2326 static void dp_unregister_genl(int n_families)
2327 {
2328 int i;
2329
2330 for (i = 0; i < n_families; i++)
2331 genl_unregister_family(dp_genl_families[i]);
2332 }
2333
2334 static int __init dp_register_genl(void)
2335 {
2336 int err;
2337 int i;
2338
2339 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2340
2341 err = genl_register_family(dp_genl_families[i]);
2342 if (err)
2343 goto error;
2344 }
2345
2346 return 0;
2347
2348 error:
2349 dp_unregister_genl(i);
2350 return err;
2351 }
2352
2353 static int __net_init ovs_init_net(struct net *net)
2354 {
2355 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2356
2357 INIT_LIST_HEAD(&ovs_net->dps);
2358 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2359 ovs_netns_frags_init(net);
2360 ovs_netns_frags6_init(net);
2361 return ovs_ct_init(net);
2362 }
2363
2364 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2365 struct list_head *head)
2366 {
2367 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2368 struct datapath *dp;
2369
2370 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2371 int i;
2372
2373 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2374 struct vport *vport;
2375
2376 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2377
2378 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2379 continue;
2380
2381 if (dev_net(vport->dev) == dnet)
2382 list_add(&vport->detach_list, head);
2383 }
2384 }
2385 }
2386 }
2387
2388 static void __net_exit ovs_exit_net(struct net *dnet)
2389 {
2390 struct datapath *dp, *dp_next;
2391 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2392 struct vport *vport, *vport_next;
2393 struct net *net;
2394 LIST_HEAD(head);
2395
2396 ovs_netns_frags6_exit(dnet);
2397 ovs_netns_frags_exit(dnet);
2398 ovs_ct_exit(dnet);
2399 ovs_lock();
2400 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2401 __dp_destroy(dp);
2402
2403 #ifdef HAVE_NET_RWSEM
2404 down_read(&net_rwsem);
2405 #else
2406 rtnl_lock();
2407 #endif
2408 for_each_net(net)
2409 list_vports_from_net(net, dnet, &head);
2410 #ifdef HAVE_NET_RWSEM
2411 up_read(&net_rwsem);
2412 #else
2413 rtnl_unlock();
2414 #endif
2415
2416 /* Detach all vports from given namespace. */
2417 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2418 list_del(&vport->detach_list);
2419 ovs_dp_detach_port(vport);
2420 }
2421
2422 ovs_unlock();
2423
2424 cancel_work_sync(&ovs_net->dp_notify_work);
2425 }
2426
2427 static struct pernet_operations ovs_net_ops = {
2428 .init = ovs_init_net,
2429 .exit = ovs_exit_net,
2430 .id = &ovs_net_id,
2431 .size = sizeof(struct ovs_net),
2432 };
2433
2434 static int __init dp_init(void)
2435 {
2436 int err;
2437
2438 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2439
2440 pr_info("Open vSwitch switching datapath %s\n", VERSION);
2441
2442 ovs_nsh_init();
2443 err = action_fifos_init();
2444 if (err)
2445 goto error;
2446
2447 err = ovs_internal_dev_rtnl_link_register();
2448 if (err)
2449 goto error_action_fifos_exit;
2450
2451 err = ovs_flow_init();
2452 if (err)
2453 goto error_unreg_rtnl_link;
2454
2455 err = ovs_vport_init();
2456 if (err)
2457 goto error_flow_exit;
2458
2459 err = register_pernet_device(&ovs_net_ops);
2460 if (err)
2461 goto error_vport_exit;
2462
2463 err = compat_init();
2464 if (err)
2465 goto error_netns_exit;
2466
2467 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2468 if (err)
2469 goto error_compat_exit;
2470
2471 err = ovs_netdev_init();
2472 if (err)
2473 goto error_unreg_notifier;
2474
2475 err = dp_register_genl();
2476 if (err < 0)
2477 goto error_unreg_netdev;
2478
2479 return 0;
2480
2481 error_unreg_netdev:
2482 ovs_netdev_exit();
2483 error_unreg_notifier:
2484 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2485 error_compat_exit:
2486 compat_exit();
2487 error_netns_exit:
2488 unregister_pernet_device(&ovs_net_ops);
2489 error_vport_exit:
2490 ovs_vport_exit();
2491 error_flow_exit:
2492 ovs_flow_exit();
2493 error_unreg_rtnl_link:
2494 ovs_internal_dev_rtnl_link_unregister();
2495 error_action_fifos_exit:
2496 action_fifos_exit();
2497 error:
2498 ovs_nsh_cleanup();
2499 return err;
2500 }
2501
2502 static void dp_cleanup(void)
2503 {
2504 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2505 ovs_netdev_exit();
2506 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2507 compat_exit();
2508 unregister_pernet_device(&ovs_net_ops);
2509 rcu_barrier();
2510 ovs_vport_exit();
2511 ovs_flow_exit();
2512 ovs_internal_dev_rtnl_link_unregister();
2513 action_fifos_exit();
2514 ovs_nsh_cleanup();
2515 }
2516
2517 module_init(dp_init);
2518 module_exit(dp_cleanup);
2519
2520 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2521 MODULE_LICENSE("GPL");
2522 MODULE_VERSION(VERSION);
2523 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2524 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2525 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2526 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2527 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2528 MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);