]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - net/ipv4/ip_gre.c
tcp: restore autocorking
[mirror_ubuntu-hirsute-kernel.git] / net / ipv4 / ip_gre.c
1 /*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
52
53 /*
54 Problems & solutions
55 --------------------
56
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
61
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
68
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
77
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
86
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
89
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
93
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
106
107 Alexey Kuznetsov.
108 */
109
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
117 u32 id, u32 index,
118 bool truncate, bool is_ipv4);
119
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
123
124 static void ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
126 {
127
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
131
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
135 them "thank you".
136
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
139 by themselves???
140 */
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
147 struct ip_tunnel *t;
148
149 switch (type) {
150 default:
151 case ICMP_PARAMETERPROB:
152 return;
153
154 case ICMP_DEST_UNREACH:
155 switch (code) {
156 case ICMP_SR_FAILED:
157 case ICMP_PORT_UNREACH:
158 /* Impossible event. */
159 return;
160 default:
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
164 */
165 break;
166 }
167 break;
168
169 case ICMP_TIME_EXCEEDED:
170 if (code != ICMP_EXC_TTL)
171 return;
172 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
173 break;
174
175 case ICMP_REDIRECT:
176 break;
177 }
178
179 if (tpi->proto == htons(ETH_P_TEB))
180 itn = net_generic(net, gre_tap_net_id);
181 else
182 itn = net_generic(net, ipgre_net_id);
183
184 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
185 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
186 iph->daddr, iph->saddr, tpi->key);
187
188 if (!t)
189 return;
190
191 #if IS_ENABLED(CONFIG_IPV6)
192 if (tpi->proto == htons(ETH_P_IPV6) &&
193 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
194 type, data_len))
195 return;
196 #endif
197
198 if (t->parms.iph.daddr == 0 ||
199 ipv4_is_multicast(t->parms.iph.daddr))
200 return;
201
202 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
203 return;
204
205 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
206 t->err_count++;
207 else
208 t->err_count = 1;
209 t->err_time = jiffies;
210 }
211
212 static void gre_err(struct sk_buff *skb, u32 info)
213 {
214 /* All the routers (except for Linux) return only
215 * 8 bytes of packet payload. It means, that precise relaying of
216 * ICMP in the real Internet is absolutely infeasible.
217 *
218 * Moreover, Cisco "wise men" put GRE key to the third word
219 * in GRE header. It makes impossible maintaining even soft
220 * state for keyed
221 * GRE tunnels with enabled checksum. Tell them "thank you".
222 *
223 * Well, I wonder, rfc1812 was written by Cisco employee,
224 * what the hell these idiots break standards established
225 * by themselves???
226 */
227
228 const struct iphdr *iph = (struct iphdr *)skb->data;
229 const int type = icmp_hdr(skb)->type;
230 const int code = icmp_hdr(skb)->code;
231 struct tnl_ptk_info tpi;
232 bool csum_err = false;
233
234 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
235 iph->ihl * 4) < 0) {
236 if (!csum_err) /* ignore csum errors. */
237 return;
238 }
239
240 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
241 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
242 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
243 return;
244 }
245 if (type == ICMP_REDIRECT) {
246 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
247 IPPROTO_GRE, 0);
248 return;
249 }
250
251 ipgre_err(skb, info, &tpi);
252 }
253
254 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
255 int gre_hdr_len)
256 {
257 struct net *net = dev_net(skb->dev);
258 struct metadata_dst *tun_dst = NULL;
259 struct erspan_base_hdr *ershdr;
260 struct erspan_metadata *pkt_md;
261 struct ip_tunnel_net *itn;
262 struct ip_tunnel *tunnel;
263 const struct iphdr *iph;
264 struct erspan_md2 *md2;
265 int ver;
266 int len;
267
268 itn = net_generic(net, erspan_net_id);
269 len = gre_hdr_len + sizeof(*ershdr);
270
271 /* Check based hdr len */
272 if (unlikely(!pskb_may_pull(skb, len)))
273 return PACKET_REJECT;
274
275 iph = ip_hdr(skb);
276 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
277 ver = ershdr->ver;
278
279 /* The original GRE header does not have key field,
280 * Use ERSPAN 10-bit session ID as key.
281 */
282 tpi->key = cpu_to_be32(get_session_id(ershdr));
283 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
284 tpi->flags | TUNNEL_KEY,
285 iph->saddr, iph->daddr, tpi->key);
286
287 if (tunnel) {
288 len = gre_hdr_len + erspan_hdr_len(ver);
289 if (unlikely(!pskb_may_pull(skb, len)))
290 return PACKET_REJECT;
291
292 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
293 pkt_md = (struct erspan_metadata *)(ershdr + 1);
294
295 if (__iptunnel_pull_header(skb,
296 len,
297 htons(ETH_P_TEB),
298 false, false) < 0)
299 goto drop;
300
301 if (tunnel->collect_md) {
302 struct ip_tunnel_info *info;
303 struct erspan_metadata *md;
304 __be64 tun_id;
305 __be16 flags;
306
307 tpi->flags |= TUNNEL_KEY;
308 flags = tpi->flags;
309 tun_id = key32_to_tunnel_id(tpi->key);
310
311 tun_dst = ip_tun_rx_dst(skb, flags,
312 tun_id, sizeof(*md));
313 if (!tun_dst)
314 return PACKET_REJECT;
315
316 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
317 md->version = ver;
318 md2 = &md->u.md2;
319 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
320 ERSPAN_V2_MDSIZE);
321
322 info = &tun_dst->u.tun_info;
323 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
324 info->options_len = sizeof(*md);
325 }
326
327 skb_reset_mac_header(skb);
328 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
329 return PACKET_RCVD;
330 }
331 drop:
332 kfree_skb(skb);
333 return PACKET_RCVD;
334 }
335
336 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
337 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
338 {
339 struct metadata_dst *tun_dst = NULL;
340 const struct iphdr *iph;
341 struct ip_tunnel *tunnel;
342
343 iph = ip_hdr(skb);
344 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
345 iph->saddr, iph->daddr, tpi->key);
346
347 if (tunnel) {
348 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
349 raw_proto, false) < 0)
350 goto drop;
351
352 if (tunnel->dev->type != ARPHRD_NONE)
353 skb_pop_mac_header(skb);
354 else
355 skb_reset_mac_header(skb);
356 if (tunnel->collect_md) {
357 __be16 flags;
358 __be64 tun_id;
359
360 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
361 tun_id = key32_to_tunnel_id(tpi->key);
362 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
363 if (!tun_dst)
364 return PACKET_REJECT;
365 }
366
367 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
368 return PACKET_RCVD;
369 }
370 return PACKET_NEXT;
371
372 drop:
373 kfree_skb(skb);
374 return PACKET_RCVD;
375 }
376
377 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
378 int hdr_len)
379 {
380 struct net *net = dev_net(skb->dev);
381 struct ip_tunnel_net *itn;
382 int res;
383
384 if (tpi->proto == htons(ETH_P_TEB))
385 itn = net_generic(net, gre_tap_net_id);
386 else
387 itn = net_generic(net, ipgre_net_id);
388
389 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
390 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
391 /* ipgre tunnels in collect metadata mode should receive
392 * also ETH_P_TEB traffic.
393 */
394 itn = net_generic(net, ipgre_net_id);
395 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
396 }
397 return res;
398 }
399
400 static int gre_rcv(struct sk_buff *skb)
401 {
402 struct tnl_ptk_info tpi;
403 bool csum_err = false;
404 int hdr_len;
405
406 #ifdef CONFIG_NET_IPGRE_BROADCAST
407 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
408 /* Looped back packet, drop it! */
409 if (rt_is_output_route(skb_rtable(skb)))
410 goto drop;
411 }
412 #endif
413
414 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
415 if (hdr_len < 0)
416 goto drop;
417
418 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
419 tpi.proto == htons(ETH_P_ERSPAN2))) {
420 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
421 return 0;
422 goto out;
423 }
424
425 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
426 return 0;
427
428 out:
429 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
430 drop:
431 kfree_skb(skb);
432 return 0;
433 }
434
435 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
436 const struct iphdr *tnl_params,
437 __be16 proto)
438 {
439 struct ip_tunnel *tunnel = netdev_priv(dev);
440
441 if (tunnel->parms.o_flags & TUNNEL_SEQ)
442 tunnel->o_seqno++;
443
444 /* Push GRE header. */
445 gre_build_header(skb, tunnel->tun_hlen,
446 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
447 htonl(tunnel->o_seqno));
448
449 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
450 }
451
452 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
453 {
454 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
455 }
456
457 static struct rtable *gre_get_rt(struct sk_buff *skb,
458 struct net_device *dev,
459 struct flowi4 *fl,
460 const struct ip_tunnel_key *key)
461 {
462 struct net *net = dev_net(dev);
463
464 memset(fl, 0, sizeof(*fl));
465 fl->daddr = key->u.ipv4.dst;
466 fl->saddr = key->u.ipv4.src;
467 fl->flowi4_tos = RT_TOS(key->tos);
468 fl->flowi4_mark = skb->mark;
469 fl->flowi4_proto = IPPROTO_GRE;
470
471 return ip_route_output_key(net, fl);
472 }
473
474 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
475 struct net_device *dev,
476 struct flowi4 *fl,
477 int tunnel_hlen)
478 {
479 struct ip_tunnel_info *tun_info;
480 const struct ip_tunnel_key *key;
481 struct rtable *rt = NULL;
482 int min_headroom;
483 bool use_cache;
484 int err;
485
486 tun_info = skb_tunnel_info(skb);
487 key = &tun_info->key;
488 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
489
490 if (use_cache)
491 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
492 if (!rt) {
493 rt = gre_get_rt(skb, dev, fl, key);
494 if (IS_ERR(rt))
495 goto err_free_skb;
496 if (use_cache)
497 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
498 fl->saddr);
499 }
500
501 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
502 + tunnel_hlen + sizeof(struct iphdr);
503 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
504 int head_delta = SKB_DATA_ALIGN(min_headroom -
505 skb_headroom(skb) +
506 16);
507 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
508 0, GFP_ATOMIC);
509 if (unlikely(err))
510 goto err_free_rt;
511 }
512 return rt;
513
514 err_free_rt:
515 ip_rt_put(rt);
516 err_free_skb:
517 kfree_skb(skb);
518 dev->stats.tx_dropped++;
519 return NULL;
520 }
521
522 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
523 __be16 proto)
524 {
525 struct ip_tunnel *tunnel = netdev_priv(dev);
526 struct ip_tunnel_info *tun_info;
527 const struct ip_tunnel_key *key;
528 struct rtable *rt = NULL;
529 struct flowi4 fl;
530 int tunnel_hlen;
531 __be16 df, flags;
532
533 tun_info = skb_tunnel_info(skb);
534 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
535 ip_tunnel_info_af(tun_info) != AF_INET))
536 goto err_free_skb;
537
538 key = &tun_info->key;
539 tunnel_hlen = gre_calc_hlen(key->tun_flags);
540
541 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
542 if (!rt)
543 return;
544
545 /* Push Tunnel header. */
546 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
547 goto err_free_rt;
548
549 flags = tun_info->key.tun_flags &
550 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
551 gre_build_header(skb, tunnel_hlen, flags, proto,
552 tunnel_id_to_key32(tun_info->key.tun_id),
553 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
554
555 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
556
557 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
558 key->tos, key->ttl, df, false);
559 return;
560
561 err_free_rt:
562 ip_rt_put(rt);
563 err_free_skb:
564 kfree_skb(skb);
565 dev->stats.tx_dropped++;
566 }
567
568 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
569 __be16 proto)
570 {
571 struct ip_tunnel *tunnel = netdev_priv(dev);
572 struct ip_tunnel_info *tun_info;
573 const struct ip_tunnel_key *key;
574 struct erspan_metadata *md;
575 struct rtable *rt = NULL;
576 bool truncate = false;
577 struct flowi4 fl;
578 int tunnel_hlen;
579 int version;
580 __be16 df;
581
582 tun_info = skb_tunnel_info(skb);
583 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
584 ip_tunnel_info_af(tun_info) != AF_INET))
585 goto err_free_skb;
586
587 key = &tun_info->key;
588 md = ip_tunnel_info_opts(tun_info);
589 if (!md)
590 goto err_free_rt;
591
592 /* ERSPAN has fixed 8 byte GRE header */
593 version = md->version;
594 tunnel_hlen = 8 + erspan_hdr_len(version);
595
596 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
597 if (!rt)
598 return;
599
600 if (gre_handle_offloads(skb, false))
601 goto err_free_rt;
602
603 if (skb->len > dev->mtu + dev->hard_header_len) {
604 pskb_trim(skb, dev->mtu + dev->hard_header_len);
605 truncate = true;
606 }
607
608 if (version == 1) {
609 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
610 ntohl(md->u.index), truncate, true);
611 } else if (version == 2) {
612 erspan_build_header_v2(skb,
613 ntohl(tunnel_id_to_key32(key->tun_id)),
614 md->u.md2.dir,
615 get_hwid(&md->u.md2),
616 truncate, true);
617 } else {
618 goto err_free_rt;
619 }
620
621 gre_build_header(skb, 8, TUNNEL_SEQ,
622 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
623
624 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
625
626 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
627 key->tos, key->ttl, df, false);
628 return;
629
630 err_free_rt:
631 ip_rt_put(rt);
632 err_free_skb:
633 kfree_skb(skb);
634 dev->stats.tx_dropped++;
635 }
636
637 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
638 {
639 struct ip_tunnel_info *info = skb_tunnel_info(skb);
640 struct rtable *rt;
641 struct flowi4 fl4;
642
643 if (ip_tunnel_info_af(info) != AF_INET)
644 return -EINVAL;
645
646 rt = gre_get_rt(skb, dev, &fl4, &info->key);
647 if (IS_ERR(rt))
648 return PTR_ERR(rt);
649
650 ip_rt_put(rt);
651 info->key.u.ipv4.src = fl4.saddr;
652 return 0;
653 }
654
655 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
656 struct net_device *dev)
657 {
658 struct ip_tunnel *tunnel = netdev_priv(dev);
659 const struct iphdr *tnl_params;
660
661 if (tunnel->collect_md) {
662 gre_fb_xmit(skb, dev, skb->protocol);
663 return NETDEV_TX_OK;
664 }
665
666 if (dev->header_ops) {
667 /* Need space for new headers */
668 if (skb_cow_head(skb, dev->needed_headroom -
669 (tunnel->hlen + sizeof(struct iphdr))))
670 goto free_skb;
671
672 tnl_params = (const struct iphdr *)skb->data;
673
674 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
675 * to gre header.
676 */
677 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
678 skb_reset_mac_header(skb);
679 } else {
680 if (skb_cow_head(skb, dev->needed_headroom))
681 goto free_skb;
682
683 tnl_params = &tunnel->parms.iph;
684 }
685
686 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
687 goto free_skb;
688
689 __gre_xmit(skb, dev, tnl_params, skb->protocol);
690 return NETDEV_TX_OK;
691
692 free_skb:
693 kfree_skb(skb);
694 dev->stats.tx_dropped++;
695 return NETDEV_TX_OK;
696 }
697
698 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
699 struct net_device *dev)
700 {
701 struct ip_tunnel *tunnel = netdev_priv(dev);
702 bool truncate = false;
703
704 if (tunnel->collect_md) {
705 erspan_fb_xmit(skb, dev, skb->protocol);
706 return NETDEV_TX_OK;
707 }
708
709 if (gre_handle_offloads(skb, false))
710 goto free_skb;
711
712 if (skb_cow_head(skb, dev->needed_headroom))
713 goto free_skb;
714
715 if (skb->len > dev->mtu + dev->hard_header_len) {
716 pskb_trim(skb, dev->mtu + dev->hard_header_len);
717 truncate = true;
718 }
719
720 /* Push ERSPAN header */
721 if (tunnel->erspan_ver == 1)
722 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
723 tunnel->index,
724 truncate, true);
725 else
726 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
727 tunnel->dir, tunnel->hwid,
728 truncate, true);
729
730 tunnel->parms.o_flags &= ~TUNNEL_KEY;
731 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
732 return NETDEV_TX_OK;
733
734 free_skb:
735 kfree_skb(skb);
736 dev->stats.tx_dropped++;
737 return NETDEV_TX_OK;
738 }
739
740 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
741 struct net_device *dev)
742 {
743 struct ip_tunnel *tunnel = netdev_priv(dev);
744
745 if (tunnel->collect_md) {
746 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
747 return NETDEV_TX_OK;
748 }
749
750 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
751 goto free_skb;
752
753 if (skb_cow_head(skb, dev->needed_headroom))
754 goto free_skb;
755
756 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
757 return NETDEV_TX_OK;
758
759 free_skb:
760 kfree_skb(skb);
761 dev->stats.tx_dropped++;
762 return NETDEV_TX_OK;
763 }
764
765 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
766 {
767 struct ip_tunnel *tunnel = netdev_priv(dev);
768 int len;
769
770 len = tunnel->tun_hlen;
771 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
772 len = tunnel->tun_hlen - len;
773 tunnel->hlen = tunnel->hlen + len;
774
775 dev->needed_headroom = dev->needed_headroom + len;
776 if (set_mtu)
777 dev->mtu = max_t(int, dev->mtu - len, 68);
778
779 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
780 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
781 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
782 dev->features |= NETIF_F_GSO_SOFTWARE;
783 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
784 } else {
785 dev->features &= ~NETIF_F_GSO_SOFTWARE;
786 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
787 }
788 dev->features |= NETIF_F_LLTX;
789 } else {
790 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
791 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
792 }
793 }
794
795 static int ipgre_tunnel_ioctl(struct net_device *dev,
796 struct ifreq *ifr, int cmd)
797 {
798 struct ip_tunnel_parm p;
799 int err;
800
801 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
802 return -EFAULT;
803
804 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
805 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
806 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
807 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
808 return -EINVAL;
809 }
810
811 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
812 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
813
814 err = ip_tunnel_ioctl(dev, &p, cmd);
815 if (err)
816 return err;
817
818 if (cmd == SIOCCHGTUNNEL) {
819 struct ip_tunnel *t = netdev_priv(dev);
820
821 t->parms.i_flags = p.i_flags;
822 t->parms.o_flags = p.o_flags;
823
824 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
825 ipgre_link_update(dev, true);
826 }
827
828 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
829 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
830
831 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
832 return -EFAULT;
833
834 return 0;
835 }
836
837 /* Nice toy. Unfortunately, useless in real life :-)
838 It allows to construct virtual multiprotocol broadcast "LAN"
839 over the Internet, provided multicast routing is tuned.
840
841
842 I have no idea was this bicycle invented before me,
843 so that I had to set ARPHRD_IPGRE to a random value.
844 I have an impression, that Cisco could make something similar,
845 but this feature is apparently missing in IOS<=11.2(8).
846
847 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
848 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
849
850 ping -t 255 224.66.66.66
851
852 If nobody answers, mbone does not work.
853
854 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
855 ip addr add 10.66.66.<somewhat>/24 dev Universe
856 ifconfig Universe up
857 ifconfig Universe add fe80::<Your_real_addr>/10
858 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
859 ftp 10.66.66.66
860 ...
861 ftp fec0:6666:6666::193.233.7.65
862 ...
863 */
864 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
865 unsigned short type,
866 const void *daddr, const void *saddr, unsigned int len)
867 {
868 struct ip_tunnel *t = netdev_priv(dev);
869 struct iphdr *iph;
870 struct gre_base_hdr *greh;
871
872 iph = skb_push(skb, t->hlen + sizeof(*iph));
873 greh = (struct gre_base_hdr *)(iph+1);
874 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
875 greh->protocol = htons(type);
876
877 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
878
879 /* Set the source hardware address. */
880 if (saddr)
881 memcpy(&iph->saddr, saddr, 4);
882 if (daddr)
883 memcpy(&iph->daddr, daddr, 4);
884 if (iph->daddr)
885 return t->hlen + sizeof(*iph);
886
887 return -(t->hlen + sizeof(*iph));
888 }
889
890 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
891 {
892 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
893 memcpy(haddr, &iph->saddr, 4);
894 return 4;
895 }
896
897 static const struct header_ops ipgre_header_ops = {
898 .create = ipgre_header,
899 .parse = ipgre_header_parse,
900 };
901
902 #ifdef CONFIG_NET_IPGRE_BROADCAST
903 static int ipgre_open(struct net_device *dev)
904 {
905 struct ip_tunnel *t = netdev_priv(dev);
906
907 if (ipv4_is_multicast(t->parms.iph.daddr)) {
908 struct flowi4 fl4;
909 struct rtable *rt;
910
911 rt = ip_route_output_gre(t->net, &fl4,
912 t->parms.iph.daddr,
913 t->parms.iph.saddr,
914 t->parms.o_key,
915 RT_TOS(t->parms.iph.tos),
916 t->parms.link);
917 if (IS_ERR(rt))
918 return -EADDRNOTAVAIL;
919 dev = rt->dst.dev;
920 ip_rt_put(rt);
921 if (!__in_dev_get_rtnl(dev))
922 return -EADDRNOTAVAIL;
923 t->mlink = dev->ifindex;
924 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
925 }
926 return 0;
927 }
928
929 static int ipgre_close(struct net_device *dev)
930 {
931 struct ip_tunnel *t = netdev_priv(dev);
932
933 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
934 struct in_device *in_dev;
935 in_dev = inetdev_by_index(t->net, t->mlink);
936 if (in_dev)
937 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
938 }
939 return 0;
940 }
941 #endif
942
943 static const struct net_device_ops ipgre_netdev_ops = {
944 .ndo_init = ipgre_tunnel_init,
945 .ndo_uninit = ip_tunnel_uninit,
946 #ifdef CONFIG_NET_IPGRE_BROADCAST
947 .ndo_open = ipgre_open,
948 .ndo_stop = ipgre_close,
949 #endif
950 .ndo_start_xmit = ipgre_xmit,
951 .ndo_do_ioctl = ipgre_tunnel_ioctl,
952 .ndo_change_mtu = ip_tunnel_change_mtu,
953 .ndo_get_stats64 = ip_tunnel_get_stats64,
954 .ndo_get_iflink = ip_tunnel_get_iflink,
955 };
956
957 #define GRE_FEATURES (NETIF_F_SG | \
958 NETIF_F_FRAGLIST | \
959 NETIF_F_HIGHDMA | \
960 NETIF_F_HW_CSUM)
961
962 static void ipgre_tunnel_setup(struct net_device *dev)
963 {
964 dev->netdev_ops = &ipgre_netdev_ops;
965 dev->type = ARPHRD_IPGRE;
966 ip_tunnel_setup(dev, ipgre_net_id);
967 }
968
969 static void __gre_tunnel_init(struct net_device *dev)
970 {
971 struct ip_tunnel *tunnel;
972 int t_hlen;
973
974 tunnel = netdev_priv(dev);
975 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
976 tunnel->parms.iph.protocol = IPPROTO_GRE;
977
978 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
979
980 t_hlen = tunnel->hlen + sizeof(struct iphdr);
981
982 dev->features |= GRE_FEATURES;
983 dev->hw_features |= GRE_FEATURES;
984
985 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
986 /* TCP offload with GRE SEQ is not supported, nor
987 * can we support 2 levels of outer headers requiring
988 * an update.
989 */
990 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
991 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
992 dev->features |= NETIF_F_GSO_SOFTWARE;
993 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
994 }
995
996 /* Can use a lockless transmit, unless we generate
997 * output sequences
998 */
999 dev->features |= NETIF_F_LLTX;
1000 }
1001 }
1002
1003 static int ipgre_tunnel_init(struct net_device *dev)
1004 {
1005 struct ip_tunnel *tunnel = netdev_priv(dev);
1006 struct iphdr *iph = &tunnel->parms.iph;
1007
1008 __gre_tunnel_init(dev);
1009
1010 memcpy(dev->dev_addr, &iph->saddr, 4);
1011 memcpy(dev->broadcast, &iph->daddr, 4);
1012
1013 dev->flags = IFF_NOARP;
1014 netif_keep_dst(dev);
1015 dev->addr_len = 4;
1016
1017 if (iph->daddr && !tunnel->collect_md) {
1018 #ifdef CONFIG_NET_IPGRE_BROADCAST
1019 if (ipv4_is_multicast(iph->daddr)) {
1020 if (!iph->saddr)
1021 return -EINVAL;
1022 dev->flags = IFF_BROADCAST;
1023 dev->header_ops = &ipgre_header_ops;
1024 }
1025 #endif
1026 } else if (!tunnel->collect_md) {
1027 dev->header_ops = &ipgre_header_ops;
1028 }
1029
1030 return ip_tunnel_init(dev);
1031 }
1032
1033 static const struct gre_protocol ipgre_protocol = {
1034 .handler = gre_rcv,
1035 .err_handler = gre_err,
1036 };
1037
1038 static int __net_init ipgre_init_net(struct net *net)
1039 {
1040 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1041 }
1042
1043 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1044 {
1045 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1046 }
1047
1048 static struct pernet_operations ipgre_net_ops = {
1049 .init = ipgre_init_net,
1050 .exit_batch = ipgre_exit_batch_net,
1051 .id = &ipgre_net_id,
1052 .size = sizeof(struct ip_tunnel_net),
1053 };
1054
1055 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1056 struct netlink_ext_ack *extack)
1057 {
1058 __be16 flags;
1059
1060 if (!data)
1061 return 0;
1062
1063 flags = 0;
1064 if (data[IFLA_GRE_IFLAGS])
1065 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1066 if (data[IFLA_GRE_OFLAGS])
1067 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1068 if (flags & (GRE_VERSION|GRE_ROUTING))
1069 return -EINVAL;
1070
1071 if (data[IFLA_GRE_COLLECT_METADATA] &&
1072 data[IFLA_GRE_ENCAP_TYPE] &&
1073 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1074 return -EINVAL;
1075
1076 return 0;
1077 }
1078
1079 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1080 struct netlink_ext_ack *extack)
1081 {
1082 __be32 daddr;
1083
1084 if (tb[IFLA_ADDRESS]) {
1085 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1086 return -EINVAL;
1087 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1088 return -EADDRNOTAVAIL;
1089 }
1090
1091 if (!data)
1092 goto out;
1093
1094 if (data[IFLA_GRE_REMOTE]) {
1095 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1096 if (!daddr)
1097 return -EINVAL;
1098 }
1099
1100 out:
1101 return ipgre_tunnel_validate(tb, data, extack);
1102 }
1103
1104 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1105 struct netlink_ext_ack *extack)
1106 {
1107 __be16 flags = 0;
1108 int ret;
1109
1110 if (!data)
1111 return 0;
1112
1113 ret = ipgre_tap_validate(tb, data, extack);
1114 if (ret)
1115 return ret;
1116
1117 /* ERSPAN should only have GRE sequence and key flag */
1118 if (data[IFLA_GRE_OFLAGS])
1119 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1120 if (data[IFLA_GRE_IFLAGS])
1121 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1122 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1123 flags != (GRE_SEQ | GRE_KEY))
1124 return -EINVAL;
1125
1126 /* ERSPAN Session ID only has 10-bit. Since we reuse
1127 * 32-bit key field as ID, check it's range.
1128 */
1129 if (data[IFLA_GRE_IKEY] &&
1130 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1131 return -EINVAL;
1132
1133 if (data[IFLA_GRE_OKEY] &&
1134 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1135 return -EINVAL;
1136
1137 return 0;
1138 }
1139
1140 static int ipgre_netlink_parms(struct net_device *dev,
1141 struct nlattr *data[],
1142 struct nlattr *tb[],
1143 struct ip_tunnel_parm *parms,
1144 __u32 *fwmark)
1145 {
1146 struct ip_tunnel *t = netdev_priv(dev);
1147
1148 memset(parms, 0, sizeof(*parms));
1149
1150 parms->iph.protocol = IPPROTO_GRE;
1151
1152 if (!data)
1153 return 0;
1154
1155 if (data[IFLA_GRE_LINK])
1156 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1157
1158 if (data[IFLA_GRE_IFLAGS])
1159 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1160
1161 if (data[IFLA_GRE_OFLAGS])
1162 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1163
1164 if (data[IFLA_GRE_IKEY])
1165 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1166
1167 if (data[IFLA_GRE_OKEY])
1168 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1169
1170 if (data[IFLA_GRE_LOCAL])
1171 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1172
1173 if (data[IFLA_GRE_REMOTE])
1174 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1175
1176 if (data[IFLA_GRE_TTL])
1177 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1178
1179 if (data[IFLA_GRE_TOS])
1180 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1181
1182 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1183 if (t->ignore_df)
1184 return -EINVAL;
1185 parms->iph.frag_off = htons(IP_DF);
1186 }
1187
1188 if (data[IFLA_GRE_COLLECT_METADATA]) {
1189 t->collect_md = true;
1190 if (dev->type == ARPHRD_IPGRE)
1191 dev->type = ARPHRD_NONE;
1192 }
1193
1194 if (data[IFLA_GRE_IGNORE_DF]) {
1195 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1196 && (parms->iph.frag_off & htons(IP_DF)))
1197 return -EINVAL;
1198 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1199 }
1200
1201 if (data[IFLA_GRE_FWMARK])
1202 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1203
1204 if (data[IFLA_GRE_ERSPAN_VER]) {
1205 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1206
1207 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1208 return -EINVAL;
1209 }
1210
1211 if (t->erspan_ver == 1) {
1212 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1213 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1214 if (t->index & ~INDEX_MASK)
1215 return -EINVAL;
1216 }
1217 } else if (t->erspan_ver == 2) {
1218 if (data[IFLA_GRE_ERSPAN_DIR]) {
1219 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1220 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1221 return -EINVAL;
1222 }
1223 if (data[IFLA_GRE_ERSPAN_HWID]) {
1224 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1225 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1226 return -EINVAL;
1227 }
1228 }
1229
1230 return 0;
1231 }
1232
1233 /* This function returns true when ENCAP attributes are present in the nl msg */
1234 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1235 struct ip_tunnel_encap *ipencap)
1236 {
1237 bool ret = false;
1238
1239 memset(ipencap, 0, sizeof(*ipencap));
1240
1241 if (!data)
1242 return ret;
1243
1244 if (data[IFLA_GRE_ENCAP_TYPE]) {
1245 ret = true;
1246 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1247 }
1248
1249 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1250 ret = true;
1251 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1252 }
1253
1254 if (data[IFLA_GRE_ENCAP_SPORT]) {
1255 ret = true;
1256 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1257 }
1258
1259 if (data[IFLA_GRE_ENCAP_DPORT]) {
1260 ret = true;
1261 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1262 }
1263
1264 return ret;
1265 }
1266
1267 static int gre_tap_init(struct net_device *dev)
1268 {
1269 __gre_tunnel_init(dev);
1270 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1271 netif_keep_dst(dev);
1272
1273 return ip_tunnel_init(dev);
1274 }
1275
1276 static const struct net_device_ops gre_tap_netdev_ops = {
1277 .ndo_init = gre_tap_init,
1278 .ndo_uninit = ip_tunnel_uninit,
1279 .ndo_start_xmit = gre_tap_xmit,
1280 .ndo_set_mac_address = eth_mac_addr,
1281 .ndo_validate_addr = eth_validate_addr,
1282 .ndo_change_mtu = ip_tunnel_change_mtu,
1283 .ndo_get_stats64 = ip_tunnel_get_stats64,
1284 .ndo_get_iflink = ip_tunnel_get_iflink,
1285 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1286 };
1287
1288 static int erspan_tunnel_init(struct net_device *dev)
1289 {
1290 struct ip_tunnel *tunnel = netdev_priv(dev);
1291 int t_hlen;
1292
1293 tunnel->tun_hlen = 8;
1294 tunnel->parms.iph.protocol = IPPROTO_GRE;
1295 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1296 erspan_hdr_len(tunnel->erspan_ver);
1297 t_hlen = tunnel->hlen + sizeof(struct iphdr);
1298
1299 dev->features |= GRE_FEATURES;
1300 dev->hw_features |= GRE_FEATURES;
1301 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1302 netif_keep_dst(dev);
1303
1304 return ip_tunnel_init(dev);
1305 }
1306
1307 static const struct net_device_ops erspan_netdev_ops = {
1308 .ndo_init = erspan_tunnel_init,
1309 .ndo_uninit = ip_tunnel_uninit,
1310 .ndo_start_xmit = erspan_xmit,
1311 .ndo_set_mac_address = eth_mac_addr,
1312 .ndo_validate_addr = eth_validate_addr,
1313 .ndo_change_mtu = ip_tunnel_change_mtu,
1314 .ndo_get_stats64 = ip_tunnel_get_stats64,
1315 .ndo_get_iflink = ip_tunnel_get_iflink,
1316 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1317 };
1318
1319 static void ipgre_tap_setup(struct net_device *dev)
1320 {
1321 ether_setup(dev);
1322 dev->max_mtu = 0;
1323 dev->netdev_ops = &gre_tap_netdev_ops;
1324 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1325 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1326 ip_tunnel_setup(dev, gre_tap_net_id);
1327 }
1328
1329 bool is_gretap_dev(const struct net_device *dev)
1330 {
1331 return dev->netdev_ops == &gre_tap_netdev_ops;
1332 }
1333 EXPORT_SYMBOL_GPL(is_gretap_dev);
1334
1335 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1336 struct nlattr *tb[], struct nlattr *data[],
1337 struct netlink_ext_ack *extack)
1338 {
1339 struct ip_tunnel_parm p;
1340 struct ip_tunnel_encap ipencap;
1341 __u32 fwmark = 0;
1342 int err;
1343
1344 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1345 struct ip_tunnel *t = netdev_priv(dev);
1346 err = ip_tunnel_encap_setup(t, &ipencap);
1347
1348 if (err < 0)
1349 return err;
1350 }
1351
1352 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1353 if (err < 0)
1354 return err;
1355 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1356 }
1357
1358 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1359 struct nlattr *data[],
1360 struct netlink_ext_ack *extack)
1361 {
1362 struct ip_tunnel *t = netdev_priv(dev);
1363 struct ip_tunnel_encap ipencap;
1364 __u32 fwmark = t->fwmark;
1365 struct ip_tunnel_parm p;
1366 int err;
1367
1368 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1369 err = ip_tunnel_encap_setup(t, &ipencap);
1370
1371 if (err < 0)
1372 return err;
1373 }
1374
1375 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1376 if (err < 0)
1377 return err;
1378
1379 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1380 if (err < 0)
1381 return err;
1382
1383 t->parms.i_flags = p.i_flags;
1384 t->parms.o_flags = p.o_flags;
1385
1386 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1387 ipgre_link_update(dev, !tb[IFLA_MTU]);
1388
1389 return 0;
1390 }
1391
1392 static size_t ipgre_get_size(const struct net_device *dev)
1393 {
1394 return
1395 /* IFLA_GRE_LINK */
1396 nla_total_size(4) +
1397 /* IFLA_GRE_IFLAGS */
1398 nla_total_size(2) +
1399 /* IFLA_GRE_OFLAGS */
1400 nla_total_size(2) +
1401 /* IFLA_GRE_IKEY */
1402 nla_total_size(4) +
1403 /* IFLA_GRE_OKEY */
1404 nla_total_size(4) +
1405 /* IFLA_GRE_LOCAL */
1406 nla_total_size(4) +
1407 /* IFLA_GRE_REMOTE */
1408 nla_total_size(4) +
1409 /* IFLA_GRE_TTL */
1410 nla_total_size(1) +
1411 /* IFLA_GRE_TOS */
1412 nla_total_size(1) +
1413 /* IFLA_GRE_PMTUDISC */
1414 nla_total_size(1) +
1415 /* IFLA_GRE_ENCAP_TYPE */
1416 nla_total_size(2) +
1417 /* IFLA_GRE_ENCAP_FLAGS */
1418 nla_total_size(2) +
1419 /* IFLA_GRE_ENCAP_SPORT */
1420 nla_total_size(2) +
1421 /* IFLA_GRE_ENCAP_DPORT */
1422 nla_total_size(2) +
1423 /* IFLA_GRE_COLLECT_METADATA */
1424 nla_total_size(0) +
1425 /* IFLA_GRE_IGNORE_DF */
1426 nla_total_size(1) +
1427 /* IFLA_GRE_FWMARK */
1428 nla_total_size(4) +
1429 /* IFLA_GRE_ERSPAN_INDEX */
1430 nla_total_size(4) +
1431 /* IFLA_GRE_ERSPAN_VER */
1432 nla_total_size(1) +
1433 /* IFLA_GRE_ERSPAN_DIR */
1434 nla_total_size(1) +
1435 /* IFLA_GRE_ERSPAN_HWID */
1436 nla_total_size(2) +
1437 0;
1438 }
1439
1440 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1441 {
1442 struct ip_tunnel *t = netdev_priv(dev);
1443 struct ip_tunnel_parm *p = &t->parms;
1444
1445 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1446 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1447 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1448 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1449 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1450 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1451 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1452 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1453 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1454 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1455 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1456 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1457 !!(p->iph.frag_off & htons(IP_DF))) ||
1458 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1459 goto nla_put_failure;
1460
1461 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1462 t->encap.type) ||
1463 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1464 t->encap.sport) ||
1465 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1466 t->encap.dport) ||
1467 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1468 t->encap.flags))
1469 goto nla_put_failure;
1470
1471 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1472 goto nla_put_failure;
1473
1474 if (t->collect_md) {
1475 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1476 goto nla_put_failure;
1477 }
1478
1479 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1480 goto nla_put_failure;
1481
1482 if (t->erspan_ver == 1) {
1483 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1484 goto nla_put_failure;
1485 } else if (t->erspan_ver == 2) {
1486 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1487 goto nla_put_failure;
1488 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1489 goto nla_put_failure;
1490 }
1491
1492 return 0;
1493
1494 nla_put_failure:
1495 return -EMSGSIZE;
1496 }
1497
1498 static void erspan_setup(struct net_device *dev)
1499 {
1500 ether_setup(dev);
1501 dev->netdev_ops = &erspan_netdev_ops;
1502 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1503 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1504 ip_tunnel_setup(dev, erspan_net_id);
1505 }
1506
1507 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1508 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1509 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1510 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1511 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1512 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1513 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1514 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1515 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1516 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1517 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1518 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1519 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1520 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1521 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1522 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1523 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1524 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1525 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1526 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1527 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1528 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1529 };
1530
1531 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1532 .kind = "gre",
1533 .maxtype = IFLA_GRE_MAX,
1534 .policy = ipgre_policy,
1535 .priv_size = sizeof(struct ip_tunnel),
1536 .setup = ipgre_tunnel_setup,
1537 .validate = ipgre_tunnel_validate,
1538 .newlink = ipgre_newlink,
1539 .changelink = ipgre_changelink,
1540 .dellink = ip_tunnel_dellink,
1541 .get_size = ipgre_get_size,
1542 .fill_info = ipgre_fill_info,
1543 .get_link_net = ip_tunnel_get_link_net,
1544 };
1545
1546 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1547 .kind = "gretap",
1548 .maxtype = IFLA_GRE_MAX,
1549 .policy = ipgre_policy,
1550 .priv_size = sizeof(struct ip_tunnel),
1551 .setup = ipgre_tap_setup,
1552 .validate = ipgre_tap_validate,
1553 .newlink = ipgre_newlink,
1554 .changelink = ipgre_changelink,
1555 .dellink = ip_tunnel_dellink,
1556 .get_size = ipgre_get_size,
1557 .fill_info = ipgre_fill_info,
1558 .get_link_net = ip_tunnel_get_link_net,
1559 };
1560
1561 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1562 .kind = "erspan",
1563 .maxtype = IFLA_GRE_MAX,
1564 .policy = ipgre_policy,
1565 .priv_size = sizeof(struct ip_tunnel),
1566 .setup = erspan_setup,
1567 .validate = erspan_validate,
1568 .newlink = ipgre_newlink,
1569 .changelink = ipgre_changelink,
1570 .dellink = ip_tunnel_dellink,
1571 .get_size = ipgre_get_size,
1572 .fill_info = ipgre_fill_info,
1573 .get_link_net = ip_tunnel_get_link_net,
1574 };
1575
1576 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1577 u8 name_assign_type)
1578 {
1579 struct nlattr *tb[IFLA_MAX + 1];
1580 struct net_device *dev;
1581 LIST_HEAD(list_kill);
1582 struct ip_tunnel *t;
1583 int err;
1584
1585 memset(&tb, 0, sizeof(tb));
1586
1587 dev = rtnl_create_link(net, name, name_assign_type,
1588 &ipgre_tap_ops, tb);
1589 if (IS_ERR(dev))
1590 return dev;
1591
1592 /* Configure flow based GRE device. */
1593 t = netdev_priv(dev);
1594 t->collect_md = true;
1595
1596 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1597 if (err < 0) {
1598 free_netdev(dev);
1599 return ERR_PTR(err);
1600 }
1601
1602 /* openvswitch users expect packet sizes to be unrestricted,
1603 * so set the largest MTU we can.
1604 */
1605 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1606 if (err)
1607 goto out;
1608
1609 err = rtnl_configure_link(dev, NULL);
1610 if (err < 0)
1611 goto out;
1612
1613 return dev;
1614 out:
1615 ip_tunnel_dellink(dev, &list_kill);
1616 unregister_netdevice_many(&list_kill);
1617 return ERR_PTR(err);
1618 }
1619 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1620
1621 static int __net_init ipgre_tap_init_net(struct net *net)
1622 {
1623 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1624 }
1625
1626 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1627 {
1628 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1629 }
1630
1631 static struct pernet_operations ipgre_tap_net_ops = {
1632 .init = ipgre_tap_init_net,
1633 .exit_batch = ipgre_tap_exit_batch_net,
1634 .id = &gre_tap_net_id,
1635 .size = sizeof(struct ip_tunnel_net),
1636 };
1637
1638 static int __net_init erspan_init_net(struct net *net)
1639 {
1640 return ip_tunnel_init_net(net, erspan_net_id,
1641 &erspan_link_ops, "erspan0");
1642 }
1643
1644 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1645 {
1646 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1647 }
1648
1649 static struct pernet_operations erspan_net_ops = {
1650 .init = erspan_init_net,
1651 .exit_batch = erspan_exit_batch_net,
1652 .id = &erspan_net_id,
1653 .size = sizeof(struct ip_tunnel_net),
1654 };
1655
1656 static int __init ipgre_init(void)
1657 {
1658 int err;
1659
1660 pr_info("GRE over IPv4 tunneling driver\n");
1661
1662 err = register_pernet_device(&ipgre_net_ops);
1663 if (err < 0)
1664 return err;
1665
1666 err = register_pernet_device(&ipgre_tap_net_ops);
1667 if (err < 0)
1668 goto pnet_tap_failed;
1669
1670 err = register_pernet_device(&erspan_net_ops);
1671 if (err < 0)
1672 goto pnet_erspan_failed;
1673
1674 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1675 if (err < 0) {
1676 pr_info("%s: can't add protocol\n", __func__);
1677 goto add_proto_failed;
1678 }
1679
1680 err = rtnl_link_register(&ipgre_link_ops);
1681 if (err < 0)
1682 goto rtnl_link_failed;
1683
1684 err = rtnl_link_register(&ipgre_tap_ops);
1685 if (err < 0)
1686 goto tap_ops_failed;
1687
1688 err = rtnl_link_register(&erspan_link_ops);
1689 if (err < 0)
1690 goto erspan_link_failed;
1691
1692 return 0;
1693
1694 erspan_link_failed:
1695 rtnl_link_unregister(&ipgre_tap_ops);
1696 tap_ops_failed:
1697 rtnl_link_unregister(&ipgre_link_ops);
1698 rtnl_link_failed:
1699 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1700 add_proto_failed:
1701 unregister_pernet_device(&erspan_net_ops);
1702 pnet_erspan_failed:
1703 unregister_pernet_device(&ipgre_tap_net_ops);
1704 pnet_tap_failed:
1705 unregister_pernet_device(&ipgre_net_ops);
1706 return err;
1707 }
1708
1709 static void __exit ipgre_fini(void)
1710 {
1711 rtnl_link_unregister(&ipgre_tap_ops);
1712 rtnl_link_unregister(&ipgre_link_ops);
1713 rtnl_link_unregister(&erspan_link_ops);
1714 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1715 unregister_pernet_device(&ipgre_tap_net_ops);
1716 unregister_pernet_device(&ipgre_net_ops);
1717 unregister_pernet_device(&erspan_net_ops);
1718 }
1719
1720 module_init(ipgre_init);
1721 module_exit(ipgre_fini);
1722 MODULE_LICENSE("GPL");
1723 MODULE_ALIAS_RTNL_LINK("gre");
1724 MODULE_ALIAS_RTNL_LINK("gretap");
1725 MODULE_ALIAS_RTNL_LINK("erspan");
1726 MODULE_ALIAS_NETDEV("gre0");
1727 MODULE_ALIAS_NETDEV("gretap0");
1728 MODULE_ALIAS_NETDEV("erspan0");