]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/bareudp.c
Merge tag 'devprop-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / bareudp.c
CommitLineData
571912c6
MV
1// SPDX-License-Identifier: GPL-2.0
2/* Bareudp: UDP tunnel encasulation for different Payload types like
3 * MPLS, NSH, IP, etc.
4 * Copyright (c) 2019 Nokia, Inc.
5 * Authors: Martin Varghese, <martin.varghese@nokia.com>
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/etherdevice.h>
13#include <linux/hash.h>
14#include <net/dst_metadata.h>
15#include <net/gro_cells.h>
16#include <net/rtnetlink.h>
17#include <net/protocol.h>
18#include <net/ip6_tunnel.h>
19#include <net/ip_tunnels.h>
20#include <net/udp_tunnel.h>
21#include <net/bareudp.h>
22
23#define BAREUDP_BASE_HLEN sizeof(struct udphdr)
24#define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
25 sizeof(struct udphdr))
26#define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
27 sizeof(struct udphdr))
28
29static bool log_ecn_error = true;
30module_param(log_ecn_error, bool, 0644);
31MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
32
33/* per-network namespace private data for this module */
34
35static unsigned int bareudp_net_id;
36
37struct bareudp_net {
38 struct list_head bareudp_list;
39};
40
41/* Pseudo network device */
42struct bareudp_dev {
43 struct net *net; /* netns for packet i/o */
44 struct net_device *dev; /* netdev for bareudp tunnel */
45 __be16 ethertype;
46 __be16 port;
47 u16 sport_min;
4b5f6723 48 bool multi_proto_mode;
571912c6
MV
49 struct socket __rcu *sock;
50 struct list_head next; /* bareudp node on namespace list */
51 struct gro_cells gro_cells;
52};
53
54static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
55{
56 struct metadata_dst *tun_dst = NULL;
571912c6
MV
57 struct bareudp_dev *bareudp;
58 unsigned short family;
59 unsigned int len;
60 __be16 proto;
61 void *oiph;
62 int err;
63
64 bareudp = rcu_dereference_sk_user_data(sk);
65 if (!bareudp)
66 goto drop;
67
68 if (skb->protocol == htons(ETH_P_IP))
69 family = AF_INET;
70 else
71 family = AF_INET6;
72
4b5f6723
MV
73 if (bareudp->ethertype == htons(ETH_P_IP)) {
74 struct iphdr *iphdr;
75
76 iphdr = (struct iphdr *)(skb->data + BAREUDP_BASE_HLEN);
77 if (iphdr->version == 4) {
78 proto = bareudp->ethertype;
79 } else if (bareudp->multi_proto_mode && (iphdr->version == 6)) {
80 proto = htons(ETH_P_IPV6);
81 } else {
82 bareudp->dev->stats.rx_dropped++;
83 goto drop;
84 }
85 } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
86 struct iphdr *tunnel_hdr;
87
88 tunnel_hdr = (struct iphdr *)skb_network_header(skb);
89 if (tunnel_hdr->version == 4) {
90 if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
91 proto = bareudp->ethertype;
92 } else if (bareudp->multi_proto_mode &&
93 ipv4_is_multicast(tunnel_hdr->daddr)) {
94 proto = htons(ETH_P_MPLS_MC);
95 } else {
96 bareudp->dev->stats.rx_dropped++;
97 goto drop;
98 }
99 } else {
100 int addr_type;
101 struct ipv6hdr *tunnel_hdr_v6;
102
103 tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
104 addr_type =
105 ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
106 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
107 proto = bareudp->ethertype;
108 } else if (bareudp->multi_proto_mode &&
109 (addr_type & IPV6_ADDR_MULTICAST)) {
110 proto = htons(ETH_P_MPLS_MC);
111 } else {
112 bareudp->dev->stats.rx_dropped++;
113 goto drop;
114 }
115 }
116 } else {
117 proto = bareudp->ethertype;
118 }
571912c6
MV
119
120 if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
121 proto,
122 !net_eq(bareudp->net,
123 dev_net(bareudp->dev)))) {
124 bareudp->dev->stats.rx_dropped++;
125 goto drop;
126 }
4787dd58
MV
127 tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
128 if (!tun_dst) {
129 bareudp->dev->stats.rx_dropped++;
130 goto drop;
571912c6 131 }
4787dd58 132 skb_dst_set(skb, &tun_dst->dst);
571912c6
MV
133 skb->dev = bareudp->dev;
134 oiph = skb_network_header(skb);
135 skb_reset_network_header(skb);
136
ee287556 137 if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
571912c6 138 err = IP_ECN_decapsulate(oiph, skb);
571912c6
MV
139 else
140 err = IP6_ECN_decapsulate(oiph, skb);
571912c6
MV
141
142 if (unlikely(err)) {
143 if (log_ecn_error) {
ee287556 144 if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
571912c6
MV
145 net_info_ratelimited("non-ECT from %pI4 "
146 "with TOS=%#x\n",
147 &((struct iphdr *)oiph)->saddr,
148 ((struct iphdr *)oiph)->tos);
571912c6
MV
149 else
150 net_info_ratelimited("non-ECT from %pI6\n",
151 &((struct ipv6hdr *)oiph)->saddr);
571912c6
MV
152 }
153 if (err > 1) {
154 ++bareudp->dev->stats.rx_frame_errors;
155 ++bareudp->dev->stats.rx_errors;
156 goto drop;
157 }
158 }
159
160 len = skb->len;
161 err = gro_cells_receive(&bareudp->gro_cells, skb);
8fdfffd0
FF
162 if (likely(err == NET_RX_SUCCESS))
163 dev_sw_netstats_rx_add(bareudp->dev, len);
164
571912c6
MV
165 return 0;
166drop:
167 /* Consume bad packet */
168 kfree_skb(skb);
169
170 return 0;
171}
172
173static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
174{
175 return 0;
176}
177
178static int bareudp_init(struct net_device *dev)
179{
180 struct bareudp_dev *bareudp = netdev_priv(dev);
181 int err;
182
183 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
184 if (!dev->tstats)
185 return -ENOMEM;
186
187 err = gro_cells_init(&bareudp->gro_cells, dev);
188 if (err) {
189 free_percpu(dev->tstats);
190 return err;
191 }
192 return 0;
193}
194
195static void bareudp_uninit(struct net_device *dev)
196{
197 struct bareudp_dev *bareudp = netdev_priv(dev);
198
199 gro_cells_destroy(&bareudp->gro_cells);
200 free_percpu(dev->tstats);
201}
202
203static struct socket *bareudp_create_sock(struct net *net, __be16 port)
204{
205 struct udp_port_cfg udp_conf;
206 struct socket *sock;
207 int err;
208
209 memset(&udp_conf, 0, sizeof(udp_conf));
210#if IS_ENABLED(CONFIG_IPV6)
211 udp_conf.family = AF_INET6;
212#else
213 udp_conf.family = AF_INET;
214#endif
215 udp_conf.local_udp_port = port;
216 /* Open UDP socket */
217 err = udp_sock_create(net, &udp_conf, &sock);
218 if (err < 0)
219 return ERR_PTR(err);
220
221 return sock;
222}
223
224/* Create new listen socket if needed */
225static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
226{
227 struct udp_tunnel_sock_cfg tunnel_cfg;
228 struct socket *sock;
229
230 sock = bareudp_create_sock(bareudp->net, port);
231 if (IS_ERR(sock))
232 return PTR_ERR(sock);
233
234 /* Mark socket as an encapsulation socket */
235 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
236 tunnel_cfg.sk_user_data = bareudp;
237 tunnel_cfg.encap_type = 1;
238 tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
239 tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
240 tunnel_cfg.encap_destroy = NULL;
241 setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
242
81f954a4
MV
243 /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
244 * socket type is v6 an explicit call to udp_encap_enable is needed.
245 */
246 if (sock->sk->sk_family == AF_INET6)
247 udp_encap_enable();
248
571912c6
MV
249 rcu_assign_pointer(bareudp->sock, sock);
250 return 0;
251}
252
253static int bareudp_open(struct net_device *dev)
254{
255 struct bareudp_dev *bareudp = netdev_priv(dev);
256 int ret = 0;
257
258 ret = bareudp_socket_create(bareudp, bareudp->port);
259 return ret;
260}
261
262static void bareudp_sock_release(struct bareudp_dev *bareudp)
263{
264 struct socket *sock;
265
266 sock = bareudp->sock;
267 rcu_assign_pointer(bareudp->sock, NULL);
268 synchronize_net();
269 udp_tunnel_sock_release(sock);
270}
271
272static int bareudp_stop(struct net_device *dev)
273{
274 struct bareudp_dev *bareudp = netdev_priv(dev);
275
276 bareudp_sock_release(bareudp);
277 return 0;
278}
279
280static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
281 struct bareudp_dev *bareudp,
282 const struct ip_tunnel_info *info)
283{
284 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
285 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
286 struct socket *sock = rcu_dereference(bareudp->sock);
287 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
288 const struct ip_tunnel_key *key = &info->key;
289 struct rtable *rt;
290 __be16 sport, df;
291 int min_headroom;
292 __u8 tos, ttl;
293 __be32 saddr;
294 int err;
295
296 if (!sock)
297 return -ESHUTDOWN;
298
299 rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
300 IPPROTO_UDP, use_cache);
301
302 if (IS_ERR(rt))
303 return PTR_ERR(rt);
304
305 skb_tunnel_check_pmtu(skb, &rt->dst,
4cb47a86 306 BAREUDP_IPV4_HLEN + info->options_len, false);
571912c6
MV
307
308 sport = udp_flow_src_port(bareudp->net, skb,
309 bareudp->sport_min, USHRT_MAX,
310 true);
311 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
312 ttl = key->ttl;
313 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
314 skb_scrub_packet(skb, xnet);
315
c102b6fd 316 err = -ENOSPC;
571912c6
MV
317 if (!skb_pull(skb, skb_network_offset(skb)))
318 goto free_dst;
319
320 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
321 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
322
323 err = skb_cow_head(skb, min_headroom);
324 if (unlikely(err))
325 goto free_dst;
326
327 err = udp_tunnel_handle_offloads(skb, udp_sum);
328 if (err)
329 goto free_dst;
330
331 skb_set_inner_protocol(skb, bareudp->ethertype);
332 udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
333 tos, ttl, df, sport, bareudp->port,
334 !net_eq(bareudp->net, dev_net(bareudp->dev)),
335 !(info->key.tun_flags & TUNNEL_CSUM));
336 return 0;
337
338free_dst:
339 dst_release(&rt->dst);
340 return err;
341}
342
571912c6
MV
343static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
344 struct bareudp_dev *bareudp,
345 const struct ip_tunnel_info *info)
346{
347 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
348 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
349 struct socket *sock = rcu_dereference(bareudp->sock);
350 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
351 const struct ip_tunnel_key *key = &info->key;
352 struct dst_entry *dst = NULL;
353 struct in6_addr saddr, daddr;
354 int min_headroom;
355 __u8 prio, ttl;
356 __be16 sport;
357 int err;
358
359 if (!sock)
360 return -ESHUTDOWN;
361
362 dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
363 IPPROTO_UDP, use_cache);
364 if (IS_ERR(dst))
365 return PTR_ERR(dst);
366
4cb47a86
SB
367 skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
368 false);
571912c6
MV
369
370 sport = udp_flow_src_port(bareudp->net, skb,
371 bareudp->sport_min, USHRT_MAX,
372 true);
373 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
374 ttl = key->ttl;
375
376 skb_scrub_packet(skb, xnet);
377
c102b6fd 378 err = -ENOSPC;
571912c6
MV
379 if (!skb_pull(skb, skb_network_offset(skb)))
380 goto free_dst;
381
382 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
383 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
384
385 err = skb_cow_head(skb, min_headroom);
386 if (unlikely(err))
387 goto free_dst;
388
389 err = udp_tunnel_handle_offloads(skb, udp_sum);
390 if (err)
391 goto free_dst;
392
393 daddr = info->key.u.ipv6.dst;
394 udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
395 &saddr, &daddr, prio, ttl,
396 info->key.label, sport, bareudp->port,
397 !(info->key.tun_flags & TUNNEL_CSUM));
398 return 0;
399
400free_dst:
401 dst_release(dst);
402 return err;
403}
571912c6 404
302d201b
GN
405static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
406{
407 if (bareudp->ethertype == proto)
408 return true;
409
410 if (!bareudp->multi_proto_mode)
411 return false;
412
413 if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
414 proto == htons(ETH_P_MPLS_MC))
415 return true;
416
417 if (bareudp->ethertype == htons(ETH_P_IP) &&
418 proto == htons(ETH_P_IPV6))
419 return true;
420
421 return false;
422}
423
571912c6
MV
424static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
425{
426 struct bareudp_dev *bareudp = netdev_priv(dev);
427 struct ip_tunnel_info *info = NULL;
428 int err;
429
302d201b
GN
430 if (!bareudp_proto_valid(bareudp, skb->protocol)) {
431 err = -EINVAL;
432 goto tx_error;
571912c6
MV
433 }
434
435 info = skb_tunnel_info(skb);
436 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
437 err = -EINVAL;
438 goto tx_error;
439 }
440
441 rcu_read_lock();
ee287556 442 if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
571912c6
MV
443 err = bareudp6_xmit_skb(skb, dev, bareudp, info);
444 else
571912c6
MV
445 err = bareudp_xmit_skb(skb, dev, bareudp, info);
446
447 rcu_read_unlock();
448
449 if (likely(!err))
450 return NETDEV_TX_OK;
451tx_error:
452 dev_kfree_skb(skb);
453
454 if (err == -ELOOP)
455 dev->stats.collisions++;
456 else if (err == -ENETUNREACH)
457 dev->stats.tx_carrier_errors++;
458
459 dev->stats.tx_errors++;
460 return NETDEV_TX_OK;
461}
462
463static int bareudp_fill_metadata_dst(struct net_device *dev,
464 struct sk_buff *skb)
465{
466 struct ip_tunnel_info *info = skb_tunnel_info(skb);
467 struct bareudp_dev *bareudp = netdev_priv(dev);
468 bool use_cache;
469
470 use_cache = ip_tunnel_dst_cache_usable(skb, info);
471
ee287556 472 if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
571912c6
MV
473 struct rtable *rt;
474 __be32 saddr;
475
476 rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
477 info, IPPROTO_UDP, use_cache);
478 if (IS_ERR(rt))
479 return PTR_ERR(rt);
480
481 ip_rt_put(rt);
482 info->key.u.ipv4.src = saddr;
571912c6
MV
483 } else if (ip_tunnel_info_af(info) == AF_INET6) {
484 struct dst_entry *dst;
485 struct in6_addr saddr;
486 struct socket *sock = rcu_dereference(bareudp->sock);
487
488 dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
489 &saddr, info, IPPROTO_UDP,
490 use_cache);
491 if (IS_ERR(dst))
492 return PTR_ERR(dst);
493
494 dst_release(dst);
495 info->key.u.ipv6.src = saddr;
571912c6
MV
496 } else {
497 return -EINVAL;
498 }
499
500 info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
501 bareudp->sport_min,
502 USHRT_MAX, true);
503 info->key.tp_dst = bareudp->port;
504 return 0;
505}
506
507static const struct net_device_ops bareudp_netdev_ops = {
508 .ndo_init = bareudp_init,
509 .ndo_uninit = bareudp_uninit,
510 .ndo_open = bareudp_open,
511 .ndo_stop = bareudp_stop,
512 .ndo_start_xmit = bareudp_xmit,
513 .ndo_get_stats64 = ip_tunnel_get_stats64,
514 .ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
515};
516
517static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
518 [IFLA_BAREUDP_PORT] = { .type = NLA_U16 },
519 [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 },
520 [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 },
4b5f6723 521 [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG },
571912c6
MV
522};
523
524/* Info for udev, that this is a virtual tunnel endpoint */
525static struct device_type bareudp_type = {
526 .name = "bareudp",
527};
528
529/* Initialize the device structure. */
530static void bareudp_setup(struct net_device *dev)
531{
532 dev->netdev_ops = &bareudp_netdev_ops;
533 dev->needs_free_netdev = true;
534 SET_NETDEV_DEVTYPE(dev, &bareudp_type);
535 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
536 dev->features |= NETIF_F_RXCSUM;
537 dev->features |= NETIF_F_GSO_SOFTWARE;
538 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
539 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
540 dev->hard_header_len = 0;
541 dev->addr_len = 0;
542 dev->mtu = ETH_DATA_LEN;
543 dev->min_mtu = IPV4_MIN_MTU;
544 dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
545 dev->type = ARPHRD_NONE;
546 netif_keep_dst(dev);
547 dev->priv_flags |= IFF_NO_QUEUE;
548 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
549}
550
551static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
552 struct netlink_ext_ack *extack)
553{
554 if (!data) {
555 NL_SET_ERR_MSG(extack,
556 "Not enough attributes provided to perform the operation");
557 return -EINVAL;
558 }
559 return 0;
560}
561
c46a49a4
TY
562static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
563 struct netlink_ext_ack *extack)
571912c6 564{
b15bb881
M
565 memset(conf, 0, sizeof(*conf));
566
c46a49a4
TY
567 if (!data[IFLA_BAREUDP_PORT]) {
568 NL_SET_ERR_MSG(extack, "port not specified");
571912c6 569 return -EINVAL;
c46a49a4
TY
570 }
571 if (!data[IFLA_BAREUDP_ETHERTYPE]) {
572 NL_SET_ERR_MSG(extack, "ethertype not specified");
573 return -EINVAL;
574 }
571912c6
MV
575
576 if (data[IFLA_BAREUDP_PORT])
577 conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
578
579 if (data[IFLA_BAREUDP_ETHERTYPE])
580 conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
581
582 if (data[IFLA_BAREUDP_SRCPORT_MIN])
583 conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
584
4c98045c
M
585 if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
586 conf->multi_proto_mode = true;
587
571912c6
MV
588 return 0;
589}
590
591static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
592 const struct bareudp_conf *conf)
593{
594 struct bareudp_dev *bareudp, *t = NULL;
595
596 list_for_each_entry(bareudp, &bn->bareudp_list, next) {
597 if (conf->port == bareudp->port)
598 t = bareudp;
599 }
600 return t;
601}
602
603static int bareudp_configure(struct net *net, struct net_device *dev,
604 struct bareudp_conf *conf)
605{
606 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
607 struct bareudp_dev *t, *bareudp = netdev_priv(dev);
608 int err;
609
610 bareudp->net = net;
611 bareudp->dev = dev;
612 t = bareudp_find_dev(bn, conf);
613 if (t)
614 return -EBUSY;
615
4b5f6723
MV
616 if (conf->multi_proto_mode &&
617 (conf->ethertype != htons(ETH_P_MPLS_UC) &&
618 conf->ethertype != htons(ETH_P_IP)))
619 return -EINVAL;
620
571912c6
MV
621 bareudp->port = conf->port;
622 bareudp->ethertype = conf->ethertype;
623 bareudp->sport_min = conf->sport_min;
4b5f6723 624 bareudp->multi_proto_mode = conf->multi_proto_mode;
fe80536a 625
571912c6
MV
626 err = register_netdevice(dev);
627 if (err)
628 return err;
629
630 list_add(&bareudp->next, &bn->bareudp_list);
631 return 0;
632}
633
634static int bareudp_link_config(struct net_device *dev,
635 struct nlattr *tb[])
636{
637 int err;
638
639 if (tb[IFLA_MTU]) {
640 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
641 if (err)
642 return err;
643 }
644 return 0;
645}
646
647static int bareudp_newlink(struct net *net, struct net_device *dev,
648 struct nlattr *tb[], struct nlattr *data[],
649 struct netlink_ext_ack *extack)
650{
651 struct bareudp_conf conf;
652 int err;
653
c46a49a4 654 err = bareudp2info(data, &conf, extack);
571912c6
MV
655 if (err)
656 return err;
657
658 err = bareudp_configure(net, dev, &conf);
659 if (err)
660 return err;
661
662 err = bareudp_link_config(dev, tb);
663 if (err)
664 return err;
665
666 return 0;
667}
668
669static void bareudp_dellink(struct net_device *dev, struct list_head *head)
670{
671 struct bareudp_dev *bareudp = netdev_priv(dev);
672
673 list_del(&bareudp->next);
674 unregister_netdevice_queue(dev, head);
675}
676
677static size_t bareudp_get_size(const struct net_device *dev)
678{
679 return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */
680 nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */
681 nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */
4b5f6723 682 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
571912c6
MV
683 0;
684}
685
686static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
687{
688 struct bareudp_dev *bareudp = netdev_priv(dev);
689
690 if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
691 goto nla_put_failure;
692 if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
693 goto nla_put_failure;
694 if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
695 goto nla_put_failure;
4b5f6723
MV
696 if (bareudp->multi_proto_mode &&
697 nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
698 goto nla_put_failure;
571912c6
MV
699
700 return 0;
701
702nla_put_failure:
703 return -EMSGSIZE;
704}
705
706static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
707 .kind = "bareudp",
708 .maxtype = IFLA_BAREUDP_MAX,
709 .policy = bareudp_policy,
710 .priv_size = sizeof(struct bareudp_dev),
711 .setup = bareudp_setup,
712 .validate = bareudp_validate,
713 .newlink = bareudp_newlink,
714 .dellink = bareudp_dellink,
715 .get_size = bareudp_get_size,
716 .fill_info = bareudp_fill_info,
717};
718
719struct net_device *bareudp_dev_create(struct net *net, const char *name,
720 u8 name_assign_type,
721 struct bareudp_conf *conf)
722{
723 struct nlattr *tb[IFLA_MAX + 1];
724 struct net_device *dev;
725 LIST_HEAD(list_kill);
726 int err;
727
728 memset(tb, 0, sizeof(tb));
729 dev = rtnl_create_link(net, name, name_assign_type,
730 &bareudp_link_ops, tb, NULL);
731 if (IS_ERR(dev))
732 return dev;
733
734 err = bareudp_configure(net, dev, conf);
735 if (err) {
736 free_netdev(dev);
737 return ERR_PTR(err);
738 }
739 err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
740 if (err)
741 goto err;
742
743 err = rtnl_configure_link(dev, NULL);
744 if (err < 0)
745 goto err;
746
747 return dev;
748err:
749 bareudp_dellink(dev, &list_kill);
750 unregister_netdevice_many(&list_kill);
751 return ERR_PTR(err);
752}
753EXPORT_SYMBOL_GPL(bareudp_dev_create);
754
755static __net_init int bareudp_init_net(struct net *net)
756{
757 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
758
759 INIT_LIST_HEAD(&bn->bareudp_list);
760 return 0;
761}
762
763static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
764{
765 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
766 struct bareudp_dev *bareudp, *next;
767
768 list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
769 unregister_netdevice_queue(bareudp->dev, head);
770}
771
772static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
773{
774 struct net *net;
775 LIST_HEAD(list);
776
777 rtnl_lock();
778 list_for_each_entry(net, net_list, exit_list)
779 bareudp_destroy_tunnels(net, &list);
780
781 /* unregister the devices gathered above */
782 unregister_netdevice_many(&list);
783 rtnl_unlock();
784}
785
786static struct pernet_operations bareudp_net_ops = {
787 .init = bareudp_init_net,
788 .exit_batch = bareudp_exit_batch_net,
789 .id = &bareudp_net_id,
790 .size = sizeof(struct bareudp_net),
791};
792
793static int __init bareudp_init_module(void)
794{
795 int rc;
796
797 rc = register_pernet_subsys(&bareudp_net_ops);
798 if (rc)
799 goto out1;
800
801 rc = rtnl_link_register(&bareudp_link_ops);
802 if (rc)
803 goto out2;
804
805 return 0;
806out2:
807 unregister_pernet_subsys(&bareudp_net_ops);
808out1:
809 return rc;
810}
811late_initcall(bareudp_init_module);
812
813static void __exit bareudp_cleanup_module(void)
814{
815 rtnl_link_unregister(&bareudp_link_ops);
816 unregister_pernet_subsys(&bareudp_net_ops);
817}
818module_exit(bareudp_cleanup_module);
819
eea45da4 820MODULE_ALIAS_RTNL_LINK("bareudp");
571912c6
MV
821MODULE_LICENSE("GPL");
822MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
823MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");