]> git.proxmox.com Git - mirror_ovs.git/blob - datapath/datapath.c
22a08baa3b8bde36f4be308967e3f509411f33f5
[mirror_ovs.git] / datapath / datapath.c
1 /*
2 * Copyright (c) 2007-2015 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54 #include <net/nsh.h>
55
56 #include "datapath.h"
57 #include "conntrack.h"
58 #include "flow.h"
59 #include "flow_table.h"
60 #include "flow_netlink.h"
61 #include "meter.h"
62 #include "gso.h"
63 #include "vport-internal_dev.h"
64 #include "vport-netdev.h"
65
66 unsigned int ovs_net_id __read_mostly;
67
68 static struct genl_family dp_packet_genl_family;
69 static struct genl_family dp_flow_genl_family;
70 static struct genl_family dp_datapath_genl_family;
71
72 static const struct nla_policy flow_policy[];
73
74 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
75 .name = OVS_FLOW_MCGROUP,
76 };
77
78 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
79 .name = OVS_DATAPATH_MCGROUP,
80 };
81
82 const struct genl_multicast_group ovs_dp_vport_multicast_group = {
83 .name = OVS_VPORT_MCGROUP,
84 };
85
86 /* Check if need to build a reply message.
87 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
88 */
89 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
90 unsigned int group)
91 {
92 return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
93 genl_has_listeners(family, genl_info_net(info), group);
94 }
95
96 static void ovs_notify(struct genl_family *family,
97 const struct genl_multicast_group *grp,
98 struct sk_buff *skb, struct genl_info *info)
99 {
100 genl_notify(family, skb, info, GROUP_ID(grp), GFP_KERNEL);
101 }
102
103 /**
104 * DOC: Locking:
105 *
106 * All writes e.g. Writes to device state (add/remove datapath, port, set
107 * operations on vports, etc.), Writes to other state (flow table
108 * modifications, set miscellaneous datapath parameters, etc.) are protected
109 * by ovs_lock.
110 *
111 * Reads are protected by RCU.
112 *
113 * There are a few special cases (mostly stats) that have their own
114 * synchronization but they nest under all of above and don't interact with
115 * each other.
116 *
117 * The RTNL lock nests inside ovs_mutex.
118 */
119
120 static DEFINE_MUTEX(ovs_mutex);
121
122 void ovs_lock(void)
123 {
124 mutex_lock(&ovs_mutex);
125 }
126
127 void ovs_unlock(void)
128 {
129 mutex_unlock(&ovs_mutex);
130 }
131
132 #ifdef CONFIG_LOCKDEP
133 int lockdep_ovsl_is_held(void)
134 {
135 if (debug_locks)
136 return lockdep_is_held(&ovs_mutex);
137 else
138 return 1;
139 }
140 #endif
141
142 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
143 const struct sw_flow_key *,
144 const struct dp_upcall_info *,
145 uint32_t cutlen);
146 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
147 const struct sw_flow_key *,
148 const struct dp_upcall_info *,
149 uint32_t cutlen);
150
151 /* Must be called with rcu_read_lock or ovs_mutex. */
152 const char *ovs_dp_name(const struct datapath *dp)
153 {
154 struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
155 return ovs_vport_name(vport);
156 }
157
158 static int get_dpifindex(const struct datapath *dp)
159 {
160 struct vport *local;
161 int ifindex;
162
163 rcu_read_lock();
164
165 local = ovs_vport_rcu(dp, OVSP_LOCAL);
166 if (local)
167 ifindex = local->dev->ifindex;
168 else
169 ifindex = 0;
170
171 rcu_read_unlock();
172
173 return ifindex;
174 }
175
176 static void destroy_dp_rcu(struct rcu_head *rcu)
177 {
178 struct datapath *dp = container_of(rcu, struct datapath, rcu);
179
180 ovs_flow_tbl_destroy(&dp->table);
181 free_percpu(dp->stats_percpu);
182 kfree(dp->ports);
183 ovs_meters_exit(dp);
184 kfree(dp);
185 }
186
187 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
188 u16 port_no)
189 {
190 return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
191 }
192
193 /* Called with ovs_mutex or RCU read lock. */
194 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
195 {
196 struct vport *vport;
197 struct hlist_head *head;
198
199 head = vport_hash_bucket(dp, port_no);
200 hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
201 if (vport->port_no == port_no)
202 return vport;
203 }
204 return NULL;
205 }
206
207 /* Called with ovs_mutex. */
208 static struct vport *new_vport(const struct vport_parms *parms)
209 {
210 struct vport *vport;
211
212 vport = ovs_vport_add(parms);
213 if (!IS_ERR(vport)) {
214 struct datapath *dp = parms->dp;
215 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
216
217 hlist_add_head_rcu(&vport->dp_hash_node, head);
218 }
219 return vport;
220 }
221
222 void ovs_dp_detach_port(struct vport *p)
223 {
224 ASSERT_OVSL();
225
226 /* First drop references to device. */
227 hlist_del_rcu(&p->dp_hash_node);
228
229 /* Then destroy it. */
230 ovs_vport_del(p);
231 }
232
233 /* Must be called with rcu_read_lock. */
234 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
235 {
236 const struct vport *p = OVS_CB(skb)->input_vport;
237 struct datapath *dp = p->dp;
238 struct sw_flow *flow;
239 struct sw_flow_actions *sf_acts;
240 struct dp_stats_percpu *stats;
241 u64 *stats_counter;
242 u32 n_mask_hit;
243 int error;
244
245 stats = this_cpu_ptr(dp->stats_percpu);
246
247 /* Look up flow. */
248 flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
249 &n_mask_hit);
250 if (unlikely(!flow)) {
251 struct dp_upcall_info upcall;
252
253 memset(&upcall, 0, sizeof(upcall));
254 upcall.cmd = OVS_PACKET_CMD_MISS;
255 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
256 upcall.mru = OVS_CB(skb)->mru;
257 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
258 if (unlikely(error))
259 kfree_skb(skb);
260 else
261 consume_skb(skb);
262 stats_counter = &stats->n_missed;
263 goto out;
264 }
265
266 ovs_flow_stats_update(flow, key->tp.flags, skb);
267 sf_acts = rcu_dereference(flow->sf_acts);
268 error = ovs_execute_actions(dp, skb, sf_acts, key);
269 if (unlikely(error))
270 net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
271 ovs_dp_name(dp), error);
272
273 stats_counter = &stats->n_hit;
274
275 out:
276 /* Update datapath statistics. */
277 u64_stats_update_begin(&stats->syncp);
278 (*stats_counter)++;
279 stats->n_mask_hit += n_mask_hit;
280 u64_stats_update_end(&stats->syncp);
281 }
282
283 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
284 const struct sw_flow_key *key,
285 const struct dp_upcall_info *upcall_info,
286 uint32_t cutlen)
287 {
288 struct dp_stats_percpu *stats;
289 int err;
290
291 if (upcall_info->portid == 0) {
292 err = -ENOTCONN;
293 goto err;
294 }
295
296 if (!skb_is_gso(skb))
297 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
298 else
299 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
300 if (err)
301 goto err;
302
303 return 0;
304
305 err:
306 stats = this_cpu_ptr(dp->stats_percpu);
307
308 u64_stats_update_begin(&stats->syncp);
309 stats->n_lost++;
310 u64_stats_update_end(&stats->syncp);
311
312 return err;
313 }
314
315 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
316 const struct sw_flow_key *key,
317 const struct dp_upcall_info *upcall_info,
318 uint32_t cutlen)
319 {
320 #ifdef HAVE_SKB_GSO_UDP
321 unsigned int gso_type = skb_shinfo(skb)->gso_type;
322 struct sw_flow_key later_key;
323 #endif
324 struct sk_buff *segs, *nskb;
325 struct ovs_skb_cb ovs_cb;
326 int err;
327
328 ovs_cb = *OVS_CB(skb);
329 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
330 *OVS_CB(skb) = ovs_cb;
331 if (IS_ERR(segs))
332 return PTR_ERR(segs);
333 if (segs == NULL)
334 return -EINVAL;
335 #ifdef HAVE_SKB_GSO_UDP
336 if (gso_type & SKB_GSO_UDP) {
337 /* The initial flow key extracted by ovs_flow_key_extract()
338 * in this case is for a first fragment, so we need to
339 * properly mark later fragments.
340 */
341 later_key = *key;
342 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
343 }
344 #endif
345 /* Queue all of the segments. */
346 skb = segs;
347 do {
348 *OVS_CB(skb) = ovs_cb;
349 #ifdef HAVE_SKB_GSO_UDP
350 if (gso_type & SKB_GSO_UDP && skb != segs)
351 key = &later_key;
352 #endif
353 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
354 if (err)
355 break;
356
357 } while ((skb = skb->next));
358
359 /* Free all of the segments. */
360 skb = segs;
361 do {
362 nskb = skb->next;
363 if (err)
364 kfree_skb(skb);
365 else
366 consume_skb(skb);
367 } while ((skb = nskb));
368 return err;
369 }
370
371 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
372 unsigned int hdrlen, int actions_attrlen)
373 {
374 size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
375 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
376 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
377 + nla_total_size(sizeof(unsigned int)) /* OVS_PACKET_ATTR_LEN */
378 + nla_total_size(sizeof(u64)); /* OVS_PACKET_ATTR_HASH */
379
380 /* OVS_PACKET_ATTR_USERDATA */
381 if (upcall_info->userdata)
382 size += NLA_ALIGN(upcall_info->userdata->nla_len);
383
384 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
385 if (upcall_info->egress_tun_info)
386 size += nla_total_size(ovs_tun_key_attr_size());
387
388 /* OVS_PACKET_ATTR_ACTIONS */
389 if (upcall_info->actions_len)
390 size += nla_total_size(actions_attrlen);
391
392 /* OVS_PACKET_ATTR_MRU */
393 if (upcall_info->mru)
394 size += nla_total_size(sizeof(upcall_info->mru));
395
396 return size;
397 }
398
399 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
400 {
401 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
402 size_t plen = NLA_ALIGN(skb->len) - skb->len;
403
404 if (plen > 0)
405 skb_put_zero(skb, plen);
406 }
407 }
408
409 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
410 const struct sw_flow_key *key,
411 const struct dp_upcall_info *upcall_info,
412 uint32_t cutlen)
413 {
414 struct ovs_header *upcall;
415 struct sk_buff *nskb = NULL;
416 struct sk_buff *user_skb = NULL; /* to be queued to userspace */
417 struct nlattr *nla;
418 size_t len;
419 unsigned int hlen;
420 int err, dp_ifindex;
421 u64 hash;
422
423 dp_ifindex = get_dpifindex(dp);
424 if (!dp_ifindex)
425 return -ENODEV;
426
427 if (skb_vlan_tag_present(skb)) {
428 nskb = skb_clone(skb, GFP_ATOMIC);
429 if (!nskb)
430 return -ENOMEM;
431
432 nskb = __vlan_hwaccel_push_inside(nskb);
433 if (!nskb)
434 return -ENOMEM;
435
436 skb = nskb;
437 }
438
439 if (nla_attr_size(skb->len) > USHRT_MAX) {
440 err = -EFBIG;
441 goto out;
442 }
443
444 /* Complete checksum if needed */
445 if (skb->ip_summed == CHECKSUM_PARTIAL &&
446 (err = skb_csum_hwoffload_help(skb, 0)))
447 goto out;
448
449 /* Older versions of OVS user space enforce alignment of the last
450 * Netlink attribute to NLA_ALIGNTO which would require extensive
451 * padding logic. Only perform zerocopy if padding is not required.
452 */
453 if (dp->user_features & OVS_DP_F_UNALIGNED)
454 hlen = skb_zerocopy_headlen(skb);
455 else
456 hlen = skb->len;
457
458 len = upcall_msg_size(upcall_info, hlen - cutlen,
459 OVS_CB(skb)->acts_origlen);
460 user_skb = genlmsg_new(len, GFP_ATOMIC);
461 if (!user_skb) {
462 err = -ENOMEM;
463 goto out;
464 }
465
466 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
467 0, upcall_info->cmd);
468 if (!upcall) {
469 err = -EINVAL;
470 goto out;
471 }
472 upcall->dp_ifindex = dp_ifindex;
473
474 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
475 if (err)
476 goto out;
477
478 if (upcall_info->userdata)
479 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
480 nla_len(upcall_info->userdata),
481 nla_data(upcall_info->userdata));
482
483
484 if (upcall_info->egress_tun_info) {
485 nla = nla_nest_start_noflag(user_skb,
486 OVS_PACKET_ATTR_EGRESS_TUN_KEY);
487 if (!nla) {
488 err = -EMSGSIZE;
489 goto out;
490 }
491 err = ovs_nla_put_tunnel_info(user_skb,
492 upcall_info->egress_tun_info);
493 if (err)
494 goto out;
495
496 nla_nest_end(user_skb, nla);
497 }
498
499 if (upcall_info->actions_len) {
500 nla = nla_nest_start_noflag(user_skb, OVS_PACKET_ATTR_ACTIONS);
501 if (!nla) {
502 err = -EMSGSIZE;
503 goto out;
504 }
505 err = ovs_nla_put_actions(upcall_info->actions,
506 upcall_info->actions_len,
507 user_skb);
508 if (!err)
509 nla_nest_end(user_skb, nla);
510 else
511 nla_nest_cancel(user_skb, nla);
512 }
513
514 /* Add OVS_PACKET_ATTR_MRU */
515 if (upcall_info->mru) {
516 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
517 upcall_info->mru)) {
518 err = -ENOBUFS;
519 goto out;
520 }
521 pad_packet(dp, user_skb);
522 }
523
524 /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
525 if (cutlen > 0) {
526 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
527 skb->len)) {
528 err = -ENOBUFS;
529 goto out;
530 }
531 pad_packet(dp, user_skb);
532 }
533
534 /* Add OVS_PACKET_ATTR_HASH */
535 hash = skb_get_hash_raw(skb);
536 #ifdef HAVE_SW_HASH
537 if (skb->sw_hash)
538 hash |= OVS_PACKET_HASH_SW_BIT;
539 #endif
540
541 #ifdef HAVE_L4_RXHASH
542 if (skb->l4_rxhash)
543 #else
544 if (skb->l4_hash)
545 #endif
546 hash |= OVS_PACKET_HASH_L4_BIT;
547
548 if (nla_put(user_skb, OVS_PACKET_ATTR_HASH, sizeof (u64), &hash)) {
549 err = -ENOBUFS;
550 goto out;
551 }
552
553 /* Only reserve room for attribute header, packet data is added
554 * in skb_zerocopy()
555 */
556 if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
557 err = -ENOBUFS;
558 goto out;
559 }
560 nla->nla_len = nla_attr_size(skb->len - cutlen);
561
562 err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
563 if (err)
564 goto out;
565
566 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
567 pad_packet(dp, user_skb);
568
569 ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
570
571 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
572 user_skb = NULL;
573 out:
574 if (err)
575 skb_tx_error(skb);
576 kfree_skb(user_skb);
577 kfree_skb(nskb);
578 return err;
579 }
580
581 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
582 {
583 struct ovs_header *ovs_header = info->userhdr;
584 struct net *net = sock_net(skb->sk);
585 struct nlattr **a = info->attrs;
586 struct sw_flow_actions *acts;
587 struct sk_buff *packet;
588 struct sw_flow *flow;
589 struct sw_flow_actions *sf_acts;
590 struct datapath *dp;
591 struct vport *input_vport;
592 u16 mru = 0;
593 u64 hash;
594 int len;
595 int err;
596 bool log = !a[OVS_PACKET_ATTR_PROBE];
597
598 err = -EINVAL;
599 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
600 !a[OVS_PACKET_ATTR_ACTIONS])
601 goto err;
602
603 len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
604 packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
605 err = -ENOMEM;
606 if (!packet)
607 goto err;
608 skb_reserve(packet, NET_IP_ALIGN);
609
610 nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
611
612 /* Set packet's mru */
613 if (a[OVS_PACKET_ATTR_MRU]) {
614 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
615 packet->ignore_df = 1;
616 }
617 OVS_CB(packet)->mru = mru;
618
619 if (a[OVS_PACKET_ATTR_HASH]) {
620 hash = nla_get_u64(a[OVS_PACKET_ATTR_HASH]);
621
622 __skb_set_hash(packet, hash & 0xFFFFFFFFULL,
623 !!(hash & OVS_PACKET_HASH_SW_BIT),
624 !!(hash & OVS_PACKET_HASH_L4_BIT));
625 }
626
627 /* Build an sw_flow for sending this packet. */
628 flow = ovs_flow_alloc();
629 err = PTR_ERR(flow);
630 if (IS_ERR(flow))
631 goto err_kfree_skb;
632
633 err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
634 packet, &flow->key, log);
635 if (err)
636 goto err_flow_free;
637
638 err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
639 &flow->key, &acts, log);
640 if (err)
641 goto err_flow_free;
642
643 rcu_assign_pointer(flow->sf_acts, acts);
644 packet->priority = flow->key.phy.priority;
645 packet->mark = flow->key.phy.skb_mark;
646
647 rcu_read_lock();
648 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
649 err = -ENODEV;
650 if (!dp)
651 goto err_unlock;
652
653 input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
654 if (!input_vport)
655 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
656
657 if (!input_vport)
658 goto err_unlock;
659
660 packet->dev = input_vport->dev;
661 OVS_CB(packet)->input_vport = input_vport;
662 sf_acts = rcu_dereference(flow->sf_acts);
663
664 local_bh_disable();
665 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
666 local_bh_enable();
667 rcu_read_unlock();
668
669 ovs_flow_free(flow, false);
670 return err;
671
672 err_unlock:
673 rcu_read_unlock();
674 err_flow_free:
675 ovs_flow_free(flow, false);
676 err_kfree_skb:
677 kfree_skb(packet);
678 err:
679 return err;
680 }
681
682 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
683 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
684 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
685 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
686 [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
687 [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
688 [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
689 };
690
691 static struct genl_ops dp_packet_genl_ops[] = {
692 { .cmd = OVS_PACKET_CMD_EXECUTE,
693 #ifdef HAVE_GENL_VALIDATE_FLAGS
694 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
695 #endif
696 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
697 #ifdef HAVE_GENL_OPS_POLICY
698 .policy = packet_policy,
699 #endif
700 .doit = ovs_packet_cmd_execute
701 }
702 };
703
704 static struct genl_family dp_packet_genl_family __ro_after_init = {
705 .hdrsize = sizeof(struct ovs_header),
706 .name = OVS_PACKET_FAMILY,
707 .version = OVS_PACKET_VERSION,
708 .maxattr = OVS_PACKET_ATTR_MAX,
709 #ifndef HAVE_GENL_OPS_POLICY
710 .policy = packet_policy,
711 #endif
712 .netnsok = true,
713 .parallel_ops = true,
714 .ops = dp_packet_genl_ops,
715 .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
716 .module = THIS_MODULE,
717 };
718
719 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
720 struct ovs_dp_megaflow_stats *mega_stats)
721 {
722 int i;
723
724 memset(mega_stats, 0, sizeof(*mega_stats));
725
726 stats->n_flows = ovs_flow_tbl_count(&dp->table);
727 mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
728
729 stats->n_hit = stats->n_missed = stats->n_lost = 0;
730
731 for_each_possible_cpu(i) {
732 const struct dp_stats_percpu *percpu_stats;
733 struct dp_stats_percpu local_stats;
734 unsigned int start;
735
736 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
737
738 do {
739 start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
740 local_stats = *percpu_stats;
741 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
742
743 stats->n_hit += local_stats.n_hit;
744 stats->n_missed += local_stats.n_missed;
745 stats->n_lost += local_stats.n_lost;
746 mega_stats->n_mask_hit += local_stats.n_mask_hit;
747 }
748 }
749
750 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
751 {
752 return ovs_identifier_is_ufid(sfid) &&
753 !(ufid_flags & OVS_UFID_F_OMIT_KEY);
754 }
755
756 static bool should_fill_mask(uint32_t ufid_flags)
757 {
758 return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
759 }
760
761 static bool should_fill_actions(uint32_t ufid_flags)
762 {
763 return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
764 }
765
766 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
767 const struct sw_flow_id *sfid,
768 uint32_t ufid_flags)
769 {
770 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
771
772 /* OVS_FLOW_ATTR_UFID */
773 if (sfid && ovs_identifier_is_ufid(sfid))
774 len += nla_total_size(sfid->ufid_len);
775
776 /* OVS_FLOW_ATTR_KEY */
777 if (!sfid || should_fill_key(sfid, ufid_flags))
778 len += nla_total_size(ovs_key_attr_size());
779
780 /* OVS_FLOW_ATTR_MASK */
781 if (should_fill_mask(ufid_flags))
782 len += nla_total_size(ovs_key_attr_size());
783
784 /* OVS_FLOW_ATTR_ACTIONS */
785 if (should_fill_actions(ufid_flags))
786 len += nla_total_size(acts->orig_len);
787
788 return len
789 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
790 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
791 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
792 }
793
794 /* Called with ovs_mutex or RCU read lock. */
795 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
796 struct sk_buff *skb)
797 {
798 struct ovs_flow_stats stats;
799 __be16 tcp_flags;
800 unsigned long used;
801
802 ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
803
804 if (used &&
805 nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
806 OVS_FLOW_ATTR_PAD))
807 return -EMSGSIZE;
808
809 if (stats.n_packets &&
810 nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
811 sizeof(struct ovs_flow_stats), &stats,
812 OVS_FLOW_ATTR_PAD))
813 return -EMSGSIZE;
814
815 if ((u8)ntohs(tcp_flags) &&
816 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
817 return -EMSGSIZE;
818
819 return 0;
820 }
821
822 /* Called with ovs_mutex or RCU read lock. */
823 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
824 struct sk_buff *skb, int skb_orig_len)
825 {
826 struct nlattr *start;
827 int err;
828
829 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
830 * this is the first flow to be dumped into 'skb'. This is unusual for
831 * Netlink but individual action lists can be longer than
832 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
833 * The userspace caller can always fetch the actions separately if it
834 * really wants them. (Most userspace callers in fact don't care.)
835 *
836 * This can only fail for dump operations because the skb is always
837 * properly sized for single flows.
838 */
839 start = nla_nest_start_noflag(skb, OVS_FLOW_ATTR_ACTIONS);
840 if (start) {
841 const struct sw_flow_actions *sf_acts;
842
843 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
844 err = ovs_nla_put_actions(sf_acts->actions,
845 sf_acts->actions_len, skb);
846
847 if (!err)
848 nla_nest_end(skb, start);
849 else {
850 if (skb_orig_len)
851 return err;
852
853 nla_nest_cancel(skb, start);
854 }
855 } else if (skb_orig_len) {
856 return -EMSGSIZE;
857 }
858
859 return 0;
860 }
861
862 /* Called with ovs_mutex or RCU read lock. */
863 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
864 struct sk_buff *skb, u32 portid,
865 u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
866 {
867 const int skb_orig_len = skb->len;
868 struct ovs_header *ovs_header;
869 int err;
870
871 ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
872 flags, cmd);
873 if (!ovs_header)
874 return -EMSGSIZE;
875
876 ovs_header->dp_ifindex = dp_ifindex;
877
878 err = ovs_nla_put_identifier(flow, skb);
879 if (err)
880 goto error;
881
882 if (should_fill_key(&flow->id, ufid_flags)) {
883 err = ovs_nla_put_masked_key(flow, skb);
884 if (err)
885 goto error;
886 }
887
888 if (should_fill_mask(ufid_flags)) {
889 err = ovs_nla_put_mask(flow, skb);
890 if (err)
891 goto error;
892 }
893
894 err = ovs_flow_cmd_fill_stats(flow, skb);
895 if (err)
896 goto error;
897
898 if (should_fill_actions(ufid_flags)) {
899 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
900 if (err)
901 goto error;
902 }
903
904 genlmsg_end(skb, ovs_header);
905 return 0;
906
907 error:
908 genlmsg_cancel(skb, ovs_header);
909 return err;
910 }
911
912 /* May not be called with RCU read lock. */
913 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
914 const struct sw_flow_id *sfid,
915 struct genl_info *info,
916 bool always,
917 uint32_t ufid_flags)
918 {
919 struct sk_buff *skb;
920 size_t len;
921
922 if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
923 GROUP_ID(&ovs_dp_flow_multicast_group)))
924 return NULL;
925
926 len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
927 skb = genlmsg_new(len, GFP_KERNEL);
928 if (!skb)
929 return ERR_PTR(-ENOMEM);
930
931 return skb;
932 }
933
934 /* Called with ovs_mutex. */
935 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
936 int dp_ifindex,
937 struct genl_info *info, u8 cmd,
938 bool always, u32 ufid_flags)
939 {
940 struct sk_buff *skb;
941 int retval;
942
943 skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
944 &flow->id, info, always, ufid_flags);
945 if (IS_ERR_OR_NULL(skb))
946 return skb;
947
948 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
949 info->snd_portid, info->snd_seq, 0,
950 cmd, ufid_flags);
951 BUG_ON(retval < 0);
952 return skb;
953 }
954
955 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
956 {
957 struct net *net = sock_net(skb->sk);
958 struct nlattr **a = info->attrs;
959 struct ovs_header *ovs_header = info->userhdr;
960 struct sw_flow *flow = NULL, *new_flow;
961 struct sw_flow_mask mask;
962 struct sk_buff *reply;
963 struct datapath *dp;
964 struct sw_flow_actions *acts;
965 struct sw_flow_match match;
966 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
967 int error;
968 bool log = !a[OVS_FLOW_ATTR_PROBE];
969
970 /* Must have key and actions. */
971 error = -EINVAL;
972 if (!a[OVS_FLOW_ATTR_KEY]) {
973 OVS_NLERR(log, "Flow key attr not present in new flow.");
974 goto error;
975 }
976 if (!a[OVS_FLOW_ATTR_ACTIONS]) {
977 OVS_NLERR(log, "Flow actions attr not present in new flow.");
978 goto error;
979 }
980
981 /* Most of the time we need to allocate a new flow, do it before
982 * locking.
983 */
984 new_flow = ovs_flow_alloc();
985 if (IS_ERR(new_flow)) {
986 error = PTR_ERR(new_flow);
987 goto error;
988 }
989
990 /* Extract key. */
991 ovs_match_init(&match, &new_flow->key, false, &mask);
992 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
993 a[OVS_FLOW_ATTR_MASK], log);
994 if (error)
995 goto err_kfree_flow;
996
997 /* Extract flow identifier. */
998 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
999 &new_flow->key, log);
1000 if (error)
1001 goto err_kfree_flow;
1002
1003 /* unmasked key is needed to match when ufid is not used. */
1004 if (ovs_identifier_is_key(&new_flow->id))
1005 match.key = new_flow->id.unmasked_key;
1006
1007 ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
1008
1009 /* Validate actions. */
1010 error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
1011 &new_flow->key, &acts, log);
1012 if (error) {
1013 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
1014 goto err_kfree_flow;
1015 }
1016
1017 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
1018 ufid_flags);
1019 if (IS_ERR(reply)) {
1020 error = PTR_ERR(reply);
1021 goto err_kfree_acts;
1022 }
1023
1024 ovs_lock();
1025 dp = get_dp(net, ovs_header->dp_ifindex);
1026 if (unlikely(!dp)) {
1027 error = -ENODEV;
1028 goto err_unlock_ovs;
1029 }
1030
1031 /* Check if this is a duplicate flow */
1032 if (ovs_identifier_is_ufid(&new_flow->id))
1033 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1034 if (!flow)
1035 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
1036 if (likely(!flow)) {
1037 rcu_assign_pointer(new_flow->sf_acts, acts);
1038
1039 /* Put flow in bucket. */
1040 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1041 if (unlikely(error)) {
1042 acts = NULL;
1043 goto err_unlock_ovs;
1044 }
1045
1046 if (unlikely(reply)) {
1047 error = ovs_flow_cmd_fill_info(new_flow,
1048 ovs_header->dp_ifindex,
1049 reply, info->snd_portid,
1050 info->snd_seq, 0,
1051 OVS_FLOW_CMD_NEW,
1052 ufid_flags);
1053 BUG_ON(error < 0);
1054 }
1055 ovs_unlock();
1056 } else {
1057 struct sw_flow_actions *old_acts;
1058
1059 /* Bail out if we're not allowed to modify an existing flow.
1060 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1061 * because Generic Netlink treats the latter as a dump
1062 * request. We also accept NLM_F_EXCL in case that bug ever
1063 * gets fixed.
1064 */
1065 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1066 | NLM_F_EXCL))) {
1067 error = -EEXIST;
1068 goto err_unlock_ovs;
1069 }
1070 /* The flow identifier has to be the same for flow updates.
1071 * Look for any overlapping flow.
1072 */
1073 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1074 if (ovs_identifier_is_key(&flow->id))
1075 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1076 &match);
1077 else /* UFID matches but key is different */
1078 flow = NULL;
1079 if (!flow) {
1080 error = -ENOENT;
1081 goto err_unlock_ovs;
1082 }
1083 }
1084 /* Update actions. */
1085 old_acts = ovsl_dereference(flow->sf_acts);
1086 rcu_assign_pointer(flow->sf_acts, acts);
1087
1088 if (unlikely(reply)) {
1089 error = ovs_flow_cmd_fill_info(flow,
1090 ovs_header->dp_ifindex,
1091 reply, info->snd_portid,
1092 info->snd_seq, 0,
1093 OVS_FLOW_CMD_NEW,
1094 ufid_flags);
1095 BUG_ON(error < 0);
1096 }
1097 ovs_unlock();
1098
1099 ovs_nla_free_flow_actions_rcu(old_acts);
1100 ovs_flow_free(new_flow, false);
1101 }
1102
1103 if (reply)
1104 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1105 return 0;
1106
1107 err_unlock_ovs:
1108 ovs_unlock();
1109 kfree_skb(reply);
1110 err_kfree_acts:
1111 ovs_nla_free_flow_actions(acts);
1112 err_kfree_flow:
1113 ovs_flow_free(new_flow, false);
1114 error:
1115 return error;
1116 }
1117
1118 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1119 static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
1120 const struct nlattr *a,
1121 const struct sw_flow_key *key,
1122 const struct sw_flow_mask *mask,
1123 bool log)
1124 {
1125 struct sw_flow_actions *acts;
1126 struct sw_flow_key masked_key;
1127 int error;
1128
1129 ovs_flow_mask_key(&masked_key, key, true, mask);
1130 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1131 if (error) {
1132 OVS_NLERR(log,
1133 "Actions may not be safe on all matching packets");
1134 return ERR_PTR(error);
1135 }
1136
1137 return acts;
1138 }
1139
1140 /* Factor out match-init and action-copy to avoid
1141 * "Wframe-larger-than=1024" warning. Because mask is only
1142 * used to get actions, we new a function to save some
1143 * stack space.
1144 *
1145 * If there are not key and action attrs, we return 0
1146 * directly. In the case, the caller will also not use the
1147 * match as before. If there is action attr, we try to get
1148 * actions and save them to *acts. Before returning from
1149 * the function, we reset the match->mask pointer. Because
1150 * we should not to return match object with dangling reference
1151 * to mask.
1152 * */
1153 static noinline_for_stack int
1154 ovs_nla_init_match_and_action(struct net *net,
1155 struct sw_flow_match *match,
1156 struct sw_flow_key *key,
1157 struct nlattr **a,
1158 struct sw_flow_actions **acts,
1159 bool log)
1160 {
1161 struct sw_flow_mask mask;
1162 int error = 0;
1163
1164 if (a[OVS_FLOW_ATTR_KEY]) {
1165 ovs_match_init(match, key, true, &mask);
1166 error = ovs_nla_get_match(net, match, a[OVS_FLOW_ATTR_KEY],
1167 a[OVS_FLOW_ATTR_MASK], log);
1168 if (error)
1169 goto error;
1170 }
1171
1172 if (a[OVS_FLOW_ATTR_ACTIONS]) {
1173 if (!a[OVS_FLOW_ATTR_KEY]) {
1174 OVS_NLERR(log,
1175 "Flow key attribute not present in set flow.");
1176 error = -EINVAL;
1177 goto error;
1178 }
1179
1180 *acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], key,
1181 &mask, log);
1182 if (IS_ERR(*acts)) {
1183 error = PTR_ERR(*acts);
1184 goto error;
1185 }
1186 }
1187
1188 /* On success, error is 0. */
1189 error:
1190 match->mask = NULL;
1191 return error;
1192 }
1193
1194 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1195 {
1196 struct net *net = sock_net(skb->sk);
1197 struct nlattr **a = info->attrs;
1198 struct ovs_header *ovs_header = info->userhdr;
1199 struct sw_flow_key key;
1200 struct sw_flow *flow;
1201 struct sk_buff *reply = NULL;
1202 struct datapath *dp;
1203 struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1204 struct sw_flow_match match;
1205 struct sw_flow_id sfid;
1206 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1207 int error = 0;
1208 bool log = !a[OVS_FLOW_ATTR_PROBE];
1209 bool ufid_present;
1210
1211 ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1212 if (!a[OVS_FLOW_ATTR_KEY] && !ufid_present) {
1213 OVS_NLERR(log,
1214 "Flow set message rejected, Key attribute missing.");
1215 return -EINVAL;
1216 }
1217
1218 error = ovs_nla_init_match_and_action(net, &match, &key, a,
1219 &acts, log);
1220 if (error)
1221 goto error;
1222
1223 if (acts) {
1224 /* Can allocate before locking if have acts. */
1225 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1226 ufid_flags);
1227 if (IS_ERR(reply)) {
1228 error = PTR_ERR(reply);
1229 goto err_kfree_acts;
1230 }
1231 }
1232
1233 ovs_lock();
1234 dp = get_dp(net, ovs_header->dp_ifindex);
1235 if (unlikely(!dp)) {
1236 error = -ENODEV;
1237 goto err_unlock_ovs;
1238 }
1239 /* Check that the flow exists. */
1240 if (ufid_present)
1241 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1242 else
1243 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1244 if (unlikely(!flow)) {
1245 error = -ENOENT;
1246 goto err_unlock_ovs;
1247 }
1248
1249 /* Update actions, if present. */
1250 if (likely(acts)) {
1251 old_acts = ovsl_dereference(flow->sf_acts);
1252 rcu_assign_pointer(flow->sf_acts, acts);
1253
1254 if (unlikely(reply)) {
1255 error = ovs_flow_cmd_fill_info(flow,
1256 ovs_header->dp_ifindex,
1257 reply, info->snd_portid,
1258 info->snd_seq, 0,
1259 OVS_FLOW_CMD_SET,
1260 ufid_flags);
1261 BUG_ON(error < 0);
1262 }
1263 } else {
1264 /* Could not alloc without acts before locking. */
1265 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1266 info, OVS_FLOW_CMD_SET, false,
1267 ufid_flags);
1268
1269 if (unlikely(IS_ERR(reply))) {
1270 error = PTR_ERR(reply);
1271 goto err_unlock_ovs;
1272 }
1273 }
1274
1275 /* Clear stats. */
1276 if (a[OVS_FLOW_ATTR_CLEAR])
1277 ovs_flow_stats_clear(flow);
1278 ovs_unlock();
1279
1280 if (reply)
1281 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1282 if (old_acts)
1283 ovs_nla_free_flow_actions_rcu(old_acts);
1284
1285 return 0;
1286
1287 err_unlock_ovs:
1288 ovs_unlock();
1289 kfree_skb(reply);
1290 err_kfree_acts:
1291 ovs_nla_free_flow_actions(acts);
1292 error:
1293 return error;
1294 }
1295
1296 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1297 {
1298 struct nlattr **a = info->attrs;
1299 struct ovs_header *ovs_header = info->userhdr;
1300 struct net *net = sock_net(skb->sk);
1301 struct sw_flow_key key;
1302 struct sk_buff *reply;
1303 struct sw_flow *flow;
1304 struct datapath *dp;
1305 struct sw_flow_match match;
1306 struct sw_flow_id ufid;
1307 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1308 int err = 0;
1309 bool log = !a[OVS_FLOW_ATTR_PROBE];
1310 bool ufid_present;
1311
1312 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1313 if (a[OVS_FLOW_ATTR_KEY]) {
1314 ovs_match_init(&match, &key, true, NULL);
1315 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1316 log);
1317 } else if (!ufid_present) {
1318 OVS_NLERR(log,
1319 "Flow get message rejected, Key attribute missing.");
1320 err = -EINVAL;
1321 }
1322 if (err)
1323 return err;
1324
1325 ovs_lock();
1326 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1327 if (!dp) {
1328 err = -ENODEV;
1329 goto unlock;
1330 }
1331
1332 if (ufid_present)
1333 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1334 else
1335 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1336 if (!flow) {
1337 err = -ENOENT;
1338 goto unlock;
1339 }
1340
1341 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1342 OVS_FLOW_CMD_GET, true, ufid_flags);
1343 if (IS_ERR(reply)) {
1344 err = PTR_ERR(reply);
1345 goto unlock;
1346 }
1347
1348 ovs_unlock();
1349 return genlmsg_reply(reply, info);
1350 unlock:
1351 ovs_unlock();
1352 return err;
1353 }
1354
1355 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1356 {
1357 struct nlattr **a = info->attrs;
1358 struct ovs_header *ovs_header = info->userhdr;
1359 struct net *net = sock_net(skb->sk);
1360 struct sw_flow_key key;
1361 struct sk_buff *reply;
1362 struct sw_flow *flow = NULL;
1363 struct datapath *dp;
1364 struct sw_flow_match match;
1365 struct sw_flow_id ufid;
1366 u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1367 int err;
1368 bool log = !a[OVS_FLOW_ATTR_PROBE];
1369 bool ufid_present;
1370
1371 ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1372 if (a[OVS_FLOW_ATTR_KEY]) {
1373 ovs_match_init(&match, &key, true, NULL);
1374 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1375 NULL, log);
1376 if (unlikely(err))
1377 return err;
1378 }
1379
1380 ovs_lock();
1381 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1382 if (unlikely(!dp)) {
1383 err = -ENODEV;
1384 goto unlock;
1385 }
1386
1387 if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1388 err = ovs_flow_tbl_flush(&dp->table);
1389 goto unlock;
1390 }
1391
1392 if (ufid_present)
1393 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1394 else
1395 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1396 if (unlikely(!flow)) {
1397 err = -ENOENT;
1398 goto unlock;
1399 }
1400
1401 ovs_flow_tbl_remove(&dp->table, flow);
1402 ovs_unlock();
1403
1404 reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
1405 &flow->id, info, false, ufid_flags);
1406
1407 if (likely(reply)) {
1408 if (!IS_ERR(reply)) {
1409 rcu_read_lock(); /*To keep RCU checker happy. */
1410 err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1411 reply, info->snd_portid,
1412 info->snd_seq, 0,
1413 OVS_FLOW_CMD_DEL,
1414 ufid_flags);
1415 rcu_read_unlock();
1416 BUG_ON(err < 0);
1417 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1418 } else {
1419 genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1420 GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1421
1422 }
1423 }
1424
1425 ovs_flow_free(flow, true);
1426 return 0;
1427 unlock:
1428 ovs_unlock();
1429 return err;
1430 }
1431
1432 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1433 {
1434 struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1435 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1436 struct table_instance *ti;
1437 struct datapath *dp;
1438 u32 ufid_flags;
1439 int err;
1440
1441 err = genlmsg_parse_deprecated(cb->nlh, &dp_flow_genl_family, a,
1442 OVS_FLOW_ATTR_MAX, flow_policy, NULL);
1443 if (err)
1444 return err;
1445 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1446
1447 rcu_read_lock();
1448 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1449 if (!dp) {
1450 rcu_read_unlock();
1451 return -ENODEV;
1452 }
1453
1454 ti = rcu_dereference(dp->table.ti);
1455 for (;;) {
1456 struct sw_flow *flow;
1457 u32 bucket, obj;
1458
1459 bucket = cb->args[0];
1460 obj = cb->args[1];
1461 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1462 if (!flow)
1463 break;
1464
1465 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1466 NETLINK_CB(cb->skb).portid,
1467 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1468 OVS_FLOW_CMD_GET, ufid_flags) < 0)
1469 break;
1470
1471 cb->args[0] = bucket;
1472 cb->args[1] = obj;
1473 }
1474 rcu_read_unlock();
1475 return skb->len;
1476 }
1477
1478 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1479 [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1480 [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1481 [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1482 [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1483 [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1484 [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1485 [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1486 };
1487
1488 static const struct genl_ops dp_flow_genl_ops[] = {
1489 { .cmd = OVS_FLOW_CMD_NEW,
1490 #ifdef HAVE_GENL_VALIDATE_FLAGS
1491 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1492 #endif
1493 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1494 #ifdef HAVE_GENL_OPS_POLICY
1495 .policy = flow_policy,
1496 #endif
1497 .doit = ovs_flow_cmd_new
1498 },
1499 { .cmd = OVS_FLOW_CMD_DEL,
1500 #ifdef HAVE_GENL_VALIDATE_FLAGS
1501 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1502 #endif
1503 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1504 #ifdef HAVE_GENL_OPS_POLICY
1505 .policy = flow_policy,
1506 #endif
1507 .doit = ovs_flow_cmd_del
1508 },
1509 { .cmd = OVS_FLOW_CMD_GET,
1510 #ifdef HAVE_GENL_VALIDATE_FLAGS
1511 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1512 #endif
1513 .flags = 0, /* OK for unprivileged users. */
1514 #ifdef HAVE_GENL_OPS_POLICY
1515 .policy = flow_policy,
1516 #endif
1517 .doit = ovs_flow_cmd_get,
1518 .dumpit = ovs_flow_cmd_dump
1519 },
1520 { .cmd = OVS_FLOW_CMD_SET,
1521 #ifdef HAVE_GENL_VALIDATE_FLAGS
1522 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1523 #endif
1524 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1525 #ifdef HAVE_GENL_OPS_POLICY
1526 .policy = flow_policy,
1527 #endif
1528 .doit = ovs_flow_cmd_set,
1529 },
1530 };
1531
1532 static struct genl_family dp_flow_genl_family __ro_after_init = {
1533 .hdrsize = sizeof(struct ovs_header),
1534 .name = OVS_FLOW_FAMILY,
1535 .version = OVS_FLOW_VERSION,
1536 .maxattr = OVS_FLOW_ATTR_MAX,
1537 #ifndef HAVE_GENL_OPS_POLICY
1538 .policy = flow_policy,
1539 #endif
1540 .netnsok = true,
1541 .parallel_ops = true,
1542 .ops = dp_flow_genl_ops,
1543 .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1544 .mcgrps = &ovs_dp_flow_multicast_group,
1545 .n_mcgrps = 1,
1546 .module = THIS_MODULE,
1547 };
1548
1549 static size_t ovs_dp_cmd_msg_size(void)
1550 {
1551 size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1552
1553 msgsize += nla_total_size(IFNAMSIZ);
1554 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1555 msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1556 msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1557
1558 return msgsize;
1559 }
1560
1561 /* Called with ovs_mutex. */
1562 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1563 u32 portid, u32 seq, u32 flags, u8 cmd)
1564 {
1565 struct ovs_header *ovs_header;
1566 struct ovs_dp_stats dp_stats;
1567 struct ovs_dp_megaflow_stats dp_megaflow_stats;
1568 int err;
1569
1570 ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1571 flags, cmd);
1572 if (!ovs_header)
1573 goto error;
1574
1575 ovs_header->dp_ifindex = get_dpifindex(dp);
1576
1577 err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1578 if (err)
1579 goto nla_put_failure;
1580
1581 get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1582 if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1583 &dp_stats, OVS_DP_ATTR_PAD))
1584 goto nla_put_failure;
1585
1586 if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1587 sizeof(struct ovs_dp_megaflow_stats),
1588 &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1589 goto nla_put_failure;
1590
1591 if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1592 goto nla_put_failure;
1593
1594 genlmsg_end(skb, ovs_header);
1595 return 0;
1596
1597 nla_put_failure:
1598 genlmsg_cancel(skb, ovs_header);
1599 error:
1600 return -EMSGSIZE;
1601 }
1602
1603 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1604 {
1605 return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1606 }
1607
1608 /* Called with rcu_read_lock or ovs_mutex. */
1609 static struct datapath *lookup_datapath(struct net *net,
1610 const struct ovs_header *ovs_header,
1611 struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1612 {
1613 struct datapath *dp;
1614
1615 if (!a[OVS_DP_ATTR_NAME])
1616 dp = get_dp(net, ovs_header->dp_ifindex);
1617 else {
1618 struct vport *vport;
1619
1620 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1621 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1622 }
1623 return dp ? dp : ERR_PTR(-ENODEV);
1624 }
1625
1626 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1627 {
1628 struct datapath *dp;
1629
1630 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1631 if (IS_ERR(dp))
1632 return;
1633
1634 WARN(dp->user_features, "Dropping previously announced user features\n");
1635 dp->user_features = 0;
1636 }
1637
1638 DEFINE_STATIC_KEY_FALSE(tc_recirc_sharing_support);
1639
1640 static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1641 {
1642 u32 user_features = 0;
1643
1644 if (a[OVS_DP_ATTR_USER_FEATURES]) {
1645 user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1646
1647 if (user_features & ~(OVS_DP_F_VPORT_PIDS |
1648 OVS_DP_F_UNALIGNED |
1649 OVS_DP_F_TC_RECIRC_SHARING))
1650 return -EOPNOTSUPP;
1651
1652 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1653 if (user_features & OVS_DP_F_TC_RECIRC_SHARING)
1654 return -EOPNOTSUPP;
1655 #endif
1656 }
1657
1658 dp->user_features = user_features;
1659
1660 if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING)
1661 static_branch_enable(&tc_recirc_sharing_support);
1662 else
1663 static_branch_disable(&tc_recirc_sharing_support);
1664
1665 return 0;
1666 }
1667
1668 static int ovs_dp_stats_init(struct datapath *dp)
1669 {
1670 dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1671 if (!dp->stats_percpu)
1672 return -ENOMEM;
1673
1674 return 0;
1675 }
1676
1677 static int ovs_dp_vport_init(struct datapath *dp)
1678 {
1679 int i;
1680
1681 dp->ports = kmalloc_array(DP_VPORT_HASH_BUCKETS,
1682 sizeof(struct hlist_head),
1683 GFP_KERNEL);
1684 if (!dp->ports)
1685 return -ENOMEM;
1686
1687 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1688 INIT_HLIST_HEAD(&dp->ports[i]);
1689
1690 return 0;
1691 }
1692
1693 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1694 {
1695 struct nlattr **a = info->attrs;
1696 struct vport_parms parms;
1697 struct sk_buff *reply;
1698 struct datapath *dp;
1699 struct vport *vport;
1700 struct ovs_net *ovs_net;
1701 int err;
1702
1703 err = -EINVAL;
1704 if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1705 goto err;
1706
1707 reply = ovs_dp_cmd_alloc_info();
1708 if (!reply)
1709 return -ENOMEM;
1710
1711 err = -ENOMEM;
1712 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1713 if (dp == NULL)
1714 goto err_destroy_reply;
1715
1716 ovs_dp_set_net(dp, sock_net(skb->sk));
1717
1718 /* Allocate table. */
1719 err = ovs_flow_tbl_init(&dp->table);
1720 if (err)
1721 goto err_destroy_dp;
1722
1723 err = ovs_dp_stats_init(dp);
1724 if (err)
1725 goto err_destroy_table;
1726
1727 err = ovs_dp_vport_init(dp);
1728 if (err)
1729 goto err_destroy_stats;
1730
1731 err = ovs_meters_init(dp);
1732 if (err)
1733 goto err_destroy_ports;
1734
1735 /* Set up our datapath device. */
1736 parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1737 parms.type = OVS_VPORT_TYPE_INTERNAL;
1738 parms.options = NULL;
1739 parms.dp = dp;
1740 parms.port_no = OVSP_LOCAL;
1741 parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1742
1743 err = ovs_dp_change(dp, a);
1744 if (err)
1745 goto err_destroy_meters;
1746
1747 /* So far only local changes have been made, now need the lock. */
1748 ovs_lock();
1749
1750 vport = new_vport(&parms);
1751 if (IS_ERR(vport)) {
1752 err = PTR_ERR(vport);
1753 if (err == -EBUSY)
1754 err = -EEXIST;
1755
1756 if (err == -EEXIST) {
1757 /* An outdated user space instance that does not understand
1758 * the concept of user_features has attempted to create a new
1759 * datapath and is likely to reuse it. Drop all user features.
1760 */
1761 if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1762 ovs_dp_reset_user_features(skb, info);
1763 }
1764
1765 ovs_unlock();
1766 goto err_destroy_meters;
1767 }
1768
1769 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1770 info->snd_seq, 0, OVS_DP_CMD_NEW);
1771 BUG_ON(err < 0);
1772
1773 ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1774 list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1775
1776 ovs_unlock();
1777
1778 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1779 return 0;
1780
1781 err_destroy_meters:
1782 ovs_meters_exit(dp);
1783 err_destroy_ports:
1784 kfree(dp->ports);
1785 err_destroy_stats:
1786 free_percpu(dp->stats_percpu);
1787 err_destroy_table:
1788 ovs_flow_tbl_destroy(&dp->table);
1789 err_destroy_dp:
1790 kfree(dp);
1791 err_destroy_reply:
1792 kfree_skb(reply);
1793 err:
1794 return err;
1795 }
1796
1797 /* Called with ovs_mutex. */
1798 static void __dp_destroy(struct datapath *dp)
1799 {
1800 int i;
1801
1802 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1803 struct vport *vport;
1804 struct hlist_node *n;
1805
1806 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1807 if (vport->port_no != OVSP_LOCAL)
1808 ovs_dp_detach_port(vport);
1809 }
1810
1811 list_del_rcu(&dp->list_node);
1812
1813 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1814 * all ports in datapath are destroyed first before freeing datapath.
1815 */
1816 ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1817
1818 /* RCU destroy the flow table */
1819 call_rcu(&dp->rcu, destroy_dp_rcu);
1820 }
1821
1822 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1823 {
1824 struct sk_buff *reply;
1825 struct datapath *dp;
1826 int err;
1827
1828 reply = ovs_dp_cmd_alloc_info();
1829 if (!reply)
1830 return -ENOMEM;
1831
1832 ovs_lock();
1833 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1834 err = PTR_ERR(dp);
1835 if (IS_ERR(dp))
1836 goto err_unlock_free;
1837
1838 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1839 info->snd_seq, 0, OVS_DP_CMD_DEL);
1840 BUG_ON(err < 0);
1841
1842 __dp_destroy(dp);
1843 ovs_unlock();
1844
1845 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1846 return 0;
1847
1848 err_unlock_free:
1849 ovs_unlock();
1850 kfree_skb(reply);
1851 return err;
1852 }
1853
1854 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1855 {
1856 struct sk_buff *reply;
1857 struct datapath *dp;
1858 int err;
1859
1860 reply = ovs_dp_cmd_alloc_info();
1861 if (!reply)
1862 return -ENOMEM;
1863
1864 ovs_lock();
1865 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1866 err = PTR_ERR(dp);
1867 if (IS_ERR(dp))
1868 goto err_unlock_free;
1869
1870 err = ovs_dp_change(dp, info->attrs);
1871 if (err)
1872 goto err_unlock_free;
1873
1874 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1875 info->snd_seq, 0, OVS_DP_CMD_GET);
1876 BUG_ON(err < 0);
1877
1878 ovs_unlock();
1879
1880 ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1881 return 0;
1882
1883 err_unlock_free:
1884 ovs_unlock();
1885 kfree_skb(reply);
1886 return err;
1887 }
1888
1889 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1890 {
1891 struct sk_buff *reply;
1892 struct datapath *dp;
1893 int err;
1894
1895 reply = ovs_dp_cmd_alloc_info();
1896 if (!reply)
1897 return -ENOMEM;
1898
1899 ovs_lock();
1900 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1901 if (IS_ERR(dp)) {
1902 err = PTR_ERR(dp);
1903 goto err_unlock_free;
1904 }
1905 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1906 info->snd_seq, 0, OVS_DP_CMD_GET);
1907 BUG_ON(err < 0);
1908 ovs_unlock();
1909
1910 return genlmsg_reply(reply, info);
1911
1912 err_unlock_free:
1913 ovs_unlock();
1914 kfree_skb(reply);
1915 return err;
1916 }
1917
1918 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1919 {
1920 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1921 struct datapath *dp;
1922 int skip = cb->args[0];
1923 int i = 0;
1924
1925 ovs_lock();
1926 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1927 if (i >= skip &&
1928 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1929 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1930 OVS_DP_CMD_GET) < 0)
1931 break;
1932 i++;
1933 }
1934 ovs_unlock();
1935
1936 cb->args[0] = i;
1937
1938 return skb->len;
1939 }
1940
1941 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1942 [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1943 [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1944 [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1945 };
1946
1947 static const struct genl_ops dp_datapath_genl_ops[] = {
1948 { .cmd = OVS_DP_CMD_NEW,
1949 #ifdef HAVE_GENL_VALIDATE_FLAGS
1950 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1951 #endif
1952 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1953 #ifdef HAVE_GENL_OPS_POLICY
1954 .policy = datapath_policy,
1955 #endif
1956 .doit = ovs_dp_cmd_new
1957 },
1958 { .cmd = OVS_DP_CMD_DEL,
1959 #ifdef HAVE_GENL_VALIDATE_FLAGS
1960 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1961 #endif
1962 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1963 #ifdef HAVE_GENL_OPS_POLICY
1964 .policy = datapath_policy,
1965 #endif
1966 .doit = ovs_dp_cmd_del
1967 },
1968 { .cmd = OVS_DP_CMD_GET,
1969 #ifdef HAVE_GENL_VALIDATE_FLAGS
1970 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1971 #endif
1972 .flags = 0, /* OK for unprivileged users. */
1973 #ifdef HAVE_GENL_OPS_POLICY
1974 .policy = datapath_policy,
1975 #endif
1976 .doit = ovs_dp_cmd_get,
1977 .dumpit = ovs_dp_cmd_dump
1978 },
1979 { .cmd = OVS_DP_CMD_SET,
1980 #ifdef HAVE_GENL_VALIDATE_FLAGS
1981 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1982 #endif
1983 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1984 #ifdef HAVE_GENL_OPS_POLICY
1985 .policy = datapath_policy,
1986 #endif
1987 .doit = ovs_dp_cmd_set,
1988 },
1989 };
1990
1991 static struct genl_family dp_datapath_genl_family __ro_after_init = {
1992 .hdrsize = sizeof(struct ovs_header),
1993 .name = OVS_DATAPATH_FAMILY,
1994 .version = OVS_DATAPATH_VERSION,
1995 .maxattr = OVS_DP_ATTR_MAX,
1996 #ifndef HAVE_GENL_OPS_POLICY
1997 .policy = datapath_policy,
1998 #endif
1999 .netnsok = true,
2000 .parallel_ops = true,
2001 .ops = dp_datapath_genl_ops,
2002 .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
2003 .mcgrps = &ovs_dp_datapath_multicast_group,
2004 .n_mcgrps = 1,
2005 .module = THIS_MODULE,
2006 };
2007
2008 /* Called with ovs_mutex or RCU read lock. */
2009 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
2010 struct net *net, u32 portid, u32 seq,
2011 u32 flags, u8 cmd, gfp_t gfp)
2012 {
2013 struct ovs_header *ovs_header;
2014 struct ovs_vport_stats vport_stats;
2015 int err;
2016
2017 ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
2018 flags, cmd);
2019 if (!ovs_header)
2020 return -EMSGSIZE;
2021
2022 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
2023
2024 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
2025 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
2026 nla_put_string(skb, OVS_VPORT_ATTR_NAME,
2027 ovs_vport_name(vport)) ||
2028 nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex))
2029 goto nla_put_failure;
2030
2031 #ifdef HAVE_PEERNET2ID_ALLOC
2032 if (!net_eq(net, dev_net(vport->dev))) {
2033 int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
2034
2035 if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
2036 goto nla_put_failure;
2037 }
2038
2039 #endif
2040 ovs_vport_get_stats(vport, &vport_stats);
2041 if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
2042 sizeof(struct ovs_vport_stats), &vport_stats,
2043 OVS_VPORT_ATTR_PAD))
2044 goto nla_put_failure;
2045
2046 if (ovs_vport_get_upcall_portids(vport, skb))
2047 goto nla_put_failure;
2048
2049 err = ovs_vport_get_options(vport, skb);
2050 if (err == -EMSGSIZE)
2051 goto error;
2052
2053 genlmsg_end(skb, ovs_header);
2054 return 0;
2055
2056 nla_put_failure:
2057 err = -EMSGSIZE;
2058 error:
2059 genlmsg_cancel(skb, ovs_header);
2060 return err;
2061 }
2062
2063 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
2064 {
2065 return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2066 }
2067
2068 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
2069 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
2070 u32 portid, u32 seq, u8 cmd)
2071 {
2072 struct sk_buff *skb;
2073 int retval;
2074
2075 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2076 if (!skb)
2077 return ERR_PTR(-ENOMEM);
2078
2079 retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
2080 GFP_KERNEL);
2081 BUG_ON(retval < 0);
2082
2083 return skb;
2084 }
2085
2086 /* Called with ovs_mutex or RCU read lock. */
2087 static struct vport *lookup_vport(struct net *net,
2088 const struct ovs_header *ovs_header,
2089 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
2090 {
2091 struct datapath *dp;
2092 struct vport *vport;
2093
2094 if (a[OVS_VPORT_ATTR_IFINDEX])
2095 return ERR_PTR(-EOPNOTSUPP);
2096 if (a[OVS_VPORT_ATTR_NAME]) {
2097 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
2098 if (!vport)
2099 return ERR_PTR(-ENODEV);
2100 if (ovs_header->dp_ifindex &&
2101 ovs_header->dp_ifindex != get_dpifindex(vport->dp))
2102 return ERR_PTR(-ENODEV);
2103 return vport;
2104 } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
2105 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
2106
2107 if (port_no >= DP_MAX_PORTS)
2108 return ERR_PTR(-EFBIG);
2109
2110 dp = get_dp(net, ovs_header->dp_ifindex);
2111 if (!dp)
2112 return ERR_PTR(-ENODEV);
2113
2114 vport = ovs_vport_ovsl_rcu(dp, port_no);
2115 if (!vport)
2116 return ERR_PTR(-ENODEV);
2117 return vport;
2118 } else
2119 return ERR_PTR(-EINVAL);
2120
2121 }
2122
2123 static unsigned int ovs_get_max_headroom(struct datapath *dp)
2124 {
2125 unsigned int dev_headroom, max_headroom = 0;
2126 struct net_device *dev;
2127 struct vport *vport;
2128 int i;
2129
2130 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2131 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2132 dev = vport->dev;
2133 dev_headroom = netdev_get_fwd_headroom(dev);
2134 if (dev_headroom > max_headroom)
2135 max_headroom = dev_headroom;
2136 }
2137 }
2138
2139 return max_headroom;
2140 }
2141
2142 /* Called with ovs_mutex */
2143 static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
2144 {
2145 struct vport *vport;
2146 int i;
2147
2148 dp->max_headroom = new_headroom;
2149 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
2150 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
2151 netdev_set_rx_headroom(vport->dev, new_headroom);
2152 }
2153
2154 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
2155 {
2156 struct nlattr **a = info->attrs;
2157 struct ovs_header *ovs_header = info->userhdr;
2158 struct vport_parms parms;
2159 struct sk_buff *reply;
2160 struct vport *vport;
2161 struct datapath *dp;
2162 unsigned int new_headroom;
2163 u32 port_no;
2164 int err;
2165
2166 if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
2167 !a[OVS_VPORT_ATTR_UPCALL_PID])
2168 return -EINVAL;
2169 if (a[OVS_VPORT_ATTR_IFINDEX])
2170 return -EOPNOTSUPP;
2171
2172 port_no = a[OVS_VPORT_ATTR_PORT_NO]
2173 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
2174 if (port_no >= DP_MAX_PORTS)
2175 return -EFBIG;
2176
2177 reply = ovs_vport_cmd_alloc_info();
2178 if (!reply)
2179 return -ENOMEM;
2180
2181 ovs_lock();
2182 restart:
2183 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2184 err = -ENODEV;
2185 if (!dp)
2186 goto exit_unlock_free;
2187
2188 if (port_no) {
2189 vport = ovs_vport_ovsl(dp, port_no);
2190 err = -EBUSY;
2191 if (vport)
2192 goto exit_unlock_free;
2193 } else {
2194 for (port_no = 1; ; port_no++) {
2195 if (port_no >= DP_MAX_PORTS) {
2196 err = -EFBIG;
2197 goto exit_unlock_free;
2198 }
2199 vport = ovs_vport_ovsl(dp, port_no);
2200 if (!vport)
2201 break;
2202 }
2203 }
2204
2205 parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2206 parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2207 parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2208 parms.dp = dp;
2209 parms.port_no = port_no;
2210 parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2211
2212 vport = new_vport(&parms);
2213 err = PTR_ERR(vport);
2214 if (IS_ERR(vport)) {
2215 if (err == -EAGAIN)
2216 goto restart;
2217 goto exit_unlock_free;
2218 }
2219
2220 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2221 info->snd_portid, info->snd_seq, 0,
2222 OVS_VPORT_CMD_NEW, GFP_KERNEL);
2223 BUG_ON(err < 0);
2224
2225 new_headroom = netdev_get_fwd_headroom(vport->dev);
2226
2227 if (new_headroom > dp->max_headroom)
2228 ovs_update_headroom(dp, new_headroom);
2229 else
2230 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2231
2232 ovs_unlock();
2233
2234 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
2235 return 0;
2236
2237 exit_unlock_free:
2238 ovs_unlock();
2239 kfree_skb(reply);
2240 return err;
2241 }
2242
2243 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2244 {
2245 struct nlattr **a = info->attrs;
2246 struct sk_buff *reply;
2247 struct vport *vport;
2248 int err;
2249
2250 reply = ovs_vport_cmd_alloc_info();
2251 if (!reply)
2252 return -ENOMEM;
2253
2254 ovs_lock();
2255 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2256 err = PTR_ERR(vport);
2257 if (IS_ERR(vport))
2258 goto exit_unlock_free;
2259
2260 if (a[OVS_VPORT_ATTR_TYPE] &&
2261 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2262 err = -EINVAL;
2263 goto exit_unlock_free;
2264 }
2265
2266 if (a[OVS_VPORT_ATTR_OPTIONS]) {
2267 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2268 if (err)
2269 goto exit_unlock_free;
2270 }
2271
2272 if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2273 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2274
2275 err = ovs_vport_set_upcall_portids(vport, ids);
2276 if (err)
2277 goto exit_unlock_free;
2278 }
2279
2280 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2281 info->snd_portid, info->snd_seq, 0,
2282 OVS_VPORT_CMD_SET, GFP_KERNEL);
2283 BUG_ON(err < 0);
2284 ovs_unlock();
2285
2286 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
2287 return 0;
2288
2289 exit_unlock_free:
2290 ovs_unlock();
2291 kfree_skb(reply);
2292 return err;
2293 }
2294
2295 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2296 {
2297 bool update_headroom = false;
2298 struct nlattr **a = info->attrs;
2299 struct sk_buff *reply;
2300 struct datapath *dp;
2301 struct vport *vport;
2302 unsigned int new_headroom;
2303 int err;
2304
2305 reply = ovs_vport_cmd_alloc_info();
2306 if (!reply)
2307 return -ENOMEM;
2308
2309 ovs_lock();
2310 vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2311 err = PTR_ERR(vport);
2312 if (IS_ERR(vport))
2313 goto exit_unlock_free;
2314
2315 if (vport->port_no == OVSP_LOCAL) {
2316 err = -EINVAL;
2317 goto exit_unlock_free;
2318 }
2319
2320 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2321 info->snd_portid, info->snd_seq, 0,
2322 OVS_VPORT_CMD_DEL, GFP_KERNEL);
2323 BUG_ON(err < 0);
2324
2325 /* the vport deletion may trigger dp headroom update */
2326 dp = vport->dp;
2327 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2328 update_headroom = true;
2329
2330 netdev_reset_rx_headroom(vport->dev);
2331 ovs_dp_detach_port(vport);
2332
2333 if (update_headroom) {
2334 new_headroom = ovs_get_max_headroom(dp);
2335
2336 if (new_headroom < dp->max_headroom)
2337 ovs_update_headroom(dp, new_headroom);
2338 }
2339 ovs_unlock();
2340
2341 ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
2342 return 0;
2343
2344 exit_unlock_free:
2345 ovs_unlock();
2346 kfree_skb(reply);
2347 return err;
2348 }
2349
2350 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2351 {
2352 struct nlattr **a = info->attrs;
2353 struct ovs_header *ovs_header = info->userhdr;
2354 struct sk_buff *reply;
2355 struct vport *vport;
2356 int err;
2357
2358 reply = ovs_vport_cmd_alloc_info();
2359 if (!reply)
2360 return -ENOMEM;
2361
2362 rcu_read_lock();
2363 vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2364 err = PTR_ERR(vport);
2365 if (IS_ERR(vport))
2366 goto exit_unlock_free;
2367 err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
2368 info->snd_portid, info->snd_seq, 0,
2369 OVS_VPORT_CMD_GET, GFP_ATOMIC);
2370 BUG_ON(err < 0);
2371 rcu_read_unlock();
2372
2373 return genlmsg_reply(reply, info);
2374
2375 exit_unlock_free:
2376 rcu_read_unlock();
2377 kfree_skb(reply);
2378 return err;
2379 }
2380
2381 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2382 {
2383 struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2384 struct datapath *dp;
2385 int bucket = cb->args[0], skip = cb->args[1];
2386 int i, j = 0;
2387
2388 rcu_read_lock();
2389 dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2390 if (!dp) {
2391 rcu_read_unlock();
2392 return -ENODEV;
2393 }
2394 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2395 struct vport *vport;
2396
2397 j = 0;
2398 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2399 if (j >= skip &&
2400 ovs_vport_cmd_fill_info(vport, skb,
2401 sock_net(skb->sk),
2402 NETLINK_CB(cb->skb).portid,
2403 cb->nlh->nlmsg_seq,
2404 NLM_F_MULTI,
2405 OVS_VPORT_CMD_GET,
2406 GFP_ATOMIC) < 0)
2407 goto out;
2408
2409 j++;
2410 }
2411 skip = 0;
2412 }
2413 out:
2414 rcu_read_unlock();
2415
2416 cb->args[0] = i;
2417 cb->args[1] = j;
2418
2419 return skb->len;
2420 }
2421
2422 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2423 [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2424 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2425 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2426 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2427 [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2428 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2429 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
2430 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
2431 };
2432
2433 static const struct genl_ops dp_vport_genl_ops[] = {
2434 { .cmd = OVS_VPORT_CMD_NEW,
2435 #ifdef HAVE_GENL_VALIDATE_FLAGS
2436 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2437 #endif
2438 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2439 #ifdef HAVE_GENL_OPS_POLICY
2440 .policy = vport_policy,
2441 #endif
2442 .doit = ovs_vport_cmd_new
2443 },
2444 { .cmd = OVS_VPORT_CMD_DEL,
2445 #ifdef HAVE_GENL_VALIDATE_FLAGS
2446 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2447 #endif
2448 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2449 #ifdef HAVE_GENL_OPS_POLICY
2450 .policy = vport_policy,
2451 #endif
2452 .doit = ovs_vport_cmd_del
2453 },
2454 { .cmd = OVS_VPORT_CMD_GET,
2455 #ifdef HAVE_GENL_VALIDATE_FLAGS
2456 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2457 #endif
2458 .flags = 0, /* OK for unprivileged users. */
2459 #ifdef HAVE_GENL_OPS_POLICY
2460 .policy = vport_policy,
2461 #endif
2462 .doit = ovs_vport_cmd_get,
2463 .dumpit = ovs_vport_cmd_dump
2464 },
2465 { .cmd = OVS_VPORT_CMD_SET,
2466 #ifdef HAVE_GENL_VALIDATE_FLAGS
2467 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2468 #endif
2469 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2470 #ifdef HAVE_GENL_OPS_POLICY
2471 .policy = vport_policy,
2472 #endif
2473 .doit = ovs_vport_cmd_set,
2474 },
2475 };
2476
2477 struct genl_family dp_vport_genl_family __ro_after_init = {
2478 .hdrsize = sizeof(struct ovs_header),
2479 .name = OVS_VPORT_FAMILY,
2480 .version = OVS_VPORT_VERSION,
2481 .maxattr = OVS_VPORT_ATTR_MAX,
2482 #ifndef HAVE_GENL_OPS_POLICY
2483 .policy = vport_policy,
2484 #endif
2485 .netnsok = true,
2486 .parallel_ops = true,
2487 .ops = dp_vport_genl_ops,
2488 .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2489 .mcgrps = &ovs_dp_vport_multicast_group,
2490 .n_mcgrps = 1,
2491 .module = THIS_MODULE,
2492 };
2493
2494 static struct genl_family *dp_genl_families[] = {
2495 &dp_datapath_genl_family,
2496 &dp_vport_genl_family,
2497 &dp_flow_genl_family,
2498 &dp_packet_genl_family,
2499 &dp_meter_genl_family,
2500 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2501 &dp_ct_limit_genl_family,
2502 #endif
2503 };
2504
2505 static void dp_unregister_genl(int n_families)
2506 {
2507 int i;
2508
2509 for (i = 0; i < n_families; i++)
2510 genl_unregister_family(dp_genl_families[i]);
2511 }
2512
2513 static int __init dp_register_genl(void)
2514 {
2515 int err;
2516 int i;
2517
2518 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2519
2520 err = genl_register_family(dp_genl_families[i]);
2521 if (err)
2522 goto error;
2523 }
2524
2525 return 0;
2526
2527 error:
2528 dp_unregister_genl(i);
2529 return err;
2530 }
2531
2532 static int __net_init ovs_init_net(struct net *net)
2533 {
2534 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2535
2536 INIT_LIST_HEAD(&ovs_net->dps);
2537 INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2538 ovs_netns_frags_init(net);
2539 ovs_netns_frags6_init(net);
2540 return ovs_ct_init(net);
2541 }
2542
2543 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2544 struct list_head *head)
2545 {
2546 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2547 struct datapath *dp;
2548
2549 list_for_each_entry(dp, &ovs_net->dps, list_node) {
2550 int i;
2551
2552 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2553 struct vport *vport;
2554
2555 hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2556
2557 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2558 continue;
2559
2560 if (dev_net(vport->dev) == dnet)
2561 list_add(&vport->detach_list, head);
2562 }
2563 }
2564 }
2565 }
2566
2567 static void __net_exit ovs_exit_net(struct net *dnet)
2568 {
2569 struct datapath *dp, *dp_next;
2570 struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2571 struct vport *vport, *vport_next;
2572 struct net *net;
2573 LIST_HEAD(head);
2574
2575 ovs_netns_frags6_exit(dnet);
2576 ovs_netns_frags_exit(dnet);
2577 ovs_ct_exit(dnet);
2578 ovs_lock();
2579 list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2580 __dp_destroy(dp);
2581
2582 #ifdef HAVE_NET_RWSEM
2583 down_read(&net_rwsem);
2584 #else
2585 rtnl_lock();
2586 #endif
2587 for_each_net(net)
2588 list_vports_from_net(net, dnet, &head);
2589 #ifdef HAVE_NET_RWSEM
2590 up_read(&net_rwsem);
2591 #else
2592 rtnl_unlock();
2593 #endif
2594
2595 /* Detach all vports from given namespace. */
2596 list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2597 list_del(&vport->detach_list);
2598 ovs_dp_detach_port(vport);
2599 }
2600
2601 ovs_unlock();
2602
2603 cancel_work_sync(&ovs_net->dp_notify_work);
2604 }
2605
2606 static struct pernet_operations ovs_net_ops = {
2607 .init = ovs_init_net,
2608 .exit = ovs_exit_net,
2609 .id = &ovs_net_id,
2610 .size = sizeof(struct ovs_net),
2611 };
2612
2613 static int __init dp_init(void)
2614 {
2615 int err;
2616
2617 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof_field(struct sk_buff, cb));
2618
2619 pr_info("Open vSwitch switching datapath %s\n", VERSION);
2620
2621 ovs_nsh_init();
2622 err = action_fifos_init();
2623 if (err)
2624 goto error;
2625
2626 err = ovs_internal_dev_rtnl_link_register();
2627 if (err)
2628 goto error_action_fifos_exit;
2629
2630 err = ovs_flow_init();
2631 if (err)
2632 goto error_unreg_rtnl_link;
2633
2634 err = ovs_vport_init();
2635 if (err)
2636 goto error_flow_exit;
2637
2638 err = register_pernet_device(&ovs_net_ops);
2639 if (err)
2640 goto error_vport_exit;
2641
2642 err = compat_init();
2643 if (err)
2644 goto error_netns_exit;
2645
2646 err = register_netdevice_notifier(&ovs_dp_device_notifier);
2647 if (err)
2648 goto error_compat_exit;
2649
2650 err = ovs_netdev_init();
2651 if (err)
2652 goto error_unreg_notifier;
2653
2654 err = dp_register_genl();
2655 if (err < 0)
2656 goto error_unreg_netdev;
2657
2658 return 0;
2659
2660 error_unreg_netdev:
2661 ovs_netdev_exit();
2662 error_unreg_notifier:
2663 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2664 error_compat_exit:
2665 compat_exit();
2666 error_netns_exit:
2667 unregister_pernet_device(&ovs_net_ops);
2668 error_vport_exit:
2669 ovs_vport_exit();
2670 error_flow_exit:
2671 ovs_flow_exit();
2672 error_unreg_rtnl_link:
2673 ovs_internal_dev_rtnl_link_unregister();
2674 error_action_fifos_exit:
2675 action_fifos_exit();
2676 error:
2677 ovs_nsh_cleanup();
2678 return err;
2679 }
2680
2681 static void dp_cleanup(void)
2682 {
2683 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2684 ovs_netdev_exit();
2685 unregister_netdevice_notifier(&ovs_dp_device_notifier);
2686 compat_exit();
2687 unregister_pernet_device(&ovs_net_ops);
2688 rcu_barrier();
2689 ovs_vport_exit();
2690 ovs_flow_exit();
2691 ovs_internal_dev_rtnl_link_unregister();
2692 action_fifos_exit();
2693 ovs_nsh_cleanup();
2694 }
2695
2696 module_init(dp_init);
2697 module_exit(dp_cleanup);
2698
2699 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2700 MODULE_LICENSE("GPL");
2701 MODULE_VERSION(VERSION);
2702 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2703 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2704 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2705 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
2706 MODULE_ALIAS_GENL_FAMILY(OVS_METER_FAMILY);
2707 MODULE_ALIAS_GENL_FAMILY(OVS_CT_LIMIT_FAMILY);