]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv4/ip_gre.c
UBUNTU: [Config] CONFIG_SND_SOC_ES8316=m
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / ip_gre.c
1 /*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51
52 /*
53 Problems & solutions
54 --------------------
55
56 1. The most important issue is detecting local dead loops.
57 They would cause complete host lockup in transmit, which
58 would be "resolved" by stack overflow or, if queueing is enabled,
59 with infinite looping in net_bh.
60
61 We cannot track such dead loops during route installation,
62 it is infeasible task. The most general solutions would be
63 to keep skb->encapsulation counter (sort of local ttl),
64 and silently drop packet when it expires. It is a good
65 solution, but it supposes maintaining new variable in ALL
66 skb, even if no tunneling is used.
67
68 Current solution: xmit_recursion breaks dead loops. This is a percpu
69 counter, since when we enter the first ndo_xmit(), cpu migration is
70 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
71
72 2. Networking dead loops would not kill routers, but would really
73 kill network. IP hop limit plays role of "t->recursion" in this case,
74 if we copy it from packet being encapsulated to upper header.
75 It is very good solution, but it introduces two problems:
76
77 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78 do not work over tunnels.
79 - traceroute does not work. I planned to relay ICMP from tunnel,
80 so that this problem would be solved and traceroute output
81 would even more informative. This idea appeared to be wrong:
82 only Linux complies to rfc1812 now (yes, guys, Linux is the only
83 true router now :-)), all routers (at least, in neighbourhood of mine)
84 return only 8 bytes of payload. It is the end.
85
86 Hence, if we want that OSPF worked or traceroute said something reasonable,
87 we should search for another solution.
88
89 One of them is to parse packet trying to detect inner encapsulation
90 made by our node. It is difficult or even impossible, especially,
91 taking into account fragmentation. TO be short, ttl is not solution at all.
92
93 Current solution: The solution was UNEXPECTEDLY SIMPLE.
94 We force DF flag on tunnels with preconfigured hop limit,
95 that is ALL. :-) Well, it does not remove the problem completely,
96 but exponential growth of network traffic is changed to linear
97 (branches, that exceed pmtu are pruned) and tunnel mtu
98 rapidly degrades to value <68, where looping stops.
99 Yes, it is not good if there exists a router in the loop,
100 which does not force DF, even when encapsulating packets have DF set.
101 But it is not our problem! Nobody could accuse us, we made
102 all that we could make. Even if it is your gated who injected
103 fatal route to network, even if it were you who configured
104 fatal static route: you are innocent. :-)
105
106 Alexey Kuznetsov.
107 */
108
109 static bool log_ecn_error = true;
110 module_param(log_ecn_error, bool, 0644);
111 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
112
113 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
114 static int ipgre_tunnel_init(struct net_device *dev);
115
116 static unsigned int ipgre_net_id __read_mostly;
117 static unsigned int gre_tap_net_id __read_mostly;
118
119 static void ipgre_err(struct sk_buff *skb, u32 info,
120 const struct tnl_ptk_info *tpi)
121 {
122
123 /* All the routers (except for Linux) return only
124 8 bytes of packet payload. It means, that precise relaying of
125 ICMP in the real Internet is absolutely infeasible.
126
127 Moreover, Cisco "wise men" put GRE key to the third word
128 in GRE header. It makes impossible maintaining even soft
129 state for keyed GRE tunnels with enabled checksum. Tell
130 them "thank you".
131
132 Well, I wonder, rfc1812 was written by Cisco employee,
133 what the hell these idiots break standards established
134 by themselves???
135 */
136 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn;
138 const struct iphdr *iph;
139 const int type = icmp_hdr(skb)->type;
140 const int code = icmp_hdr(skb)->code;
141 unsigned int data_len = 0;
142 struct ip_tunnel *t;
143
144 switch (type) {
145 default:
146 case ICMP_PARAMETERPROB:
147 return;
148
149 case ICMP_DEST_UNREACH:
150 switch (code) {
151 case ICMP_SR_FAILED:
152 case ICMP_PORT_UNREACH:
153 /* Impossible event. */
154 return;
155 default:
156 /* All others are translated to HOST_UNREACH.
157 rfc2003 contains "deep thoughts" about NET_UNREACH,
158 I believe they are just ether pollution. --ANK
159 */
160 break;
161 }
162 break;
163
164 case ICMP_TIME_EXCEEDED:
165 if (code != ICMP_EXC_TTL)
166 return;
167 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
168 break;
169
170 case ICMP_REDIRECT:
171 break;
172 }
173
174 if (tpi->proto == htons(ETH_P_TEB))
175 itn = net_generic(net, gre_tap_net_id);
176 else
177 itn = net_generic(net, ipgre_net_id);
178
179 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
180 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
181 iph->daddr, iph->saddr, tpi->key);
182
183 if (!t)
184 return;
185
186 #if IS_ENABLED(CONFIG_IPV6)
187 if (tpi->proto == htons(ETH_P_IPV6) &&
188 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
189 type, data_len))
190 return;
191 #endif
192
193 if (t->parms.iph.daddr == 0 ||
194 ipv4_is_multicast(t->parms.iph.daddr))
195 return;
196
197 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
198 return;
199
200 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
201 t->err_count++;
202 else
203 t->err_count = 1;
204 t->err_time = jiffies;
205 }
206
207 static void gre_err(struct sk_buff *skb, u32 info)
208 {
209 /* All the routers (except for Linux) return only
210 * 8 bytes of packet payload. It means, that precise relaying of
211 * ICMP in the real Internet is absolutely infeasible.
212 *
213 * Moreover, Cisco "wise men" put GRE key to the third word
214 * in GRE header. It makes impossible maintaining even soft
215 * state for keyed
216 * GRE tunnels with enabled checksum. Tell them "thank you".
217 *
218 * Well, I wonder, rfc1812 was written by Cisco employee,
219 * what the hell these idiots break standards established
220 * by themselves???
221 */
222
223 const struct iphdr *iph = (struct iphdr *)skb->data;
224 const int type = icmp_hdr(skb)->type;
225 const int code = icmp_hdr(skb)->code;
226 struct tnl_ptk_info tpi;
227 bool csum_err = false;
228
229 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
230 iph->ihl * 4) < 0) {
231 if (!csum_err) /* ignore csum errors. */
232 return;
233 }
234
235 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
236 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
237 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
238 return;
239 }
240 if (type == ICMP_REDIRECT) {
241 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
242 IPPROTO_GRE, 0);
243 return;
244 }
245
246 ipgre_err(skb, info, &tpi);
247 }
248
249 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
250 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
251 {
252 struct metadata_dst *tun_dst = NULL;
253 const struct iphdr *iph;
254 struct ip_tunnel *tunnel;
255
256 iph = ip_hdr(skb);
257 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
258 iph->saddr, iph->daddr, tpi->key);
259
260 if (tunnel) {
261 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
262 raw_proto, false) < 0)
263 goto drop;
264
265 if (tunnel->dev->type != ARPHRD_NONE)
266 skb_pop_mac_header(skb);
267 else
268 skb_reset_mac_header(skb);
269 if (tunnel->collect_md) {
270 __be16 flags;
271 __be64 tun_id;
272
273 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
274 tun_id = key32_to_tunnel_id(tpi->key);
275 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
276 if (!tun_dst)
277 return PACKET_REJECT;
278 }
279
280 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
281 return PACKET_RCVD;
282 }
283 return PACKET_NEXT;
284
285 drop:
286 kfree_skb(skb);
287 return PACKET_RCVD;
288 }
289
290 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
291 int hdr_len)
292 {
293 struct net *net = dev_net(skb->dev);
294 struct ip_tunnel_net *itn;
295 int res;
296
297 if (tpi->proto == htons(ETH_P_TEB))
298 itn = net_generic(net, gre_tap_net_id);
299 else
300 itn = net_generic(net, ipgre_net_id);
301
302 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
303 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
304 /* ipgre tunnels in collect metadata mode should receive
305 * also ETH_P_TEB traffic.
306 */
307 itn = net_generic(net, ipgre_net_id);
308 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
309 }
310 return res;
311 }
312
313 static int gre_rcv(struct sk_buff *skb)
314 {
315 struct tnl_ptk_info tpi;
316 bool csum_err = false;
317 int hdr_len;
318
319 #ifdef CONFIG_NET_IPGRE_BROADCAST
320 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
321 /* Looped back packet, drop it! */
322 if (rt_is_output_route(skb_rtable(skb)))
323 goto drop;
324 }
325 #endif
326
327 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
328 if (hdr_len < 0)
329 goto drop;
330
331 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
332 return 0;
333
334 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
335 drop:
336 kfree_skb(skb);
337 return 0;
338 }
339
340 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
341 const struct iphdr *tnl_params,
342 __be16 proto)
343 {
344 struct ip_tunnel *tunnel = netdev_priv(dev);
345
346 if (tunnel->parms.o_flags & TUNNEL_SEQ)
347 tunnel->o_seqno++;
348
349 /* Push GRE header. */
350 gre_build_header(skb, tunnel->tun_hlen,
351 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
352 htonl(tunnel->o_seqno));
353
354 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
355 }
356
357 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
358 {
359 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
360 }
361
362 static struct rtable *gre_get_rt(struct sk_buff *skb,
363 struct net_device *dev,
364 struct flowi4 *fl,
365 const struct ip_tunnel_key *key)
366 {
367 struct net *net = dev_net(dev);
368
369 memset(fl, 0, sizeof(*fl));
370 fl->daddr = key->u.ipv4.dst;
371 fl->saddr = key->u.ipv4.src;
372 fl->flowi4_tos = RT_TOS(key->tos);
373 fl->flowi4_mark = skb->mark;
374 fl->flowi4_proto = IPPROTO_GRE;
375
376 return ip_route_output_key(net, fl);
377 }
378
379 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
380 __be16 proto)
381 {
382 struct ip_tunnel_info *tun_info;
383 const struct ip_tunnel_key *key;
384 struct rtable *rt = NULL;
385 struct flowi4 fl;
386 int min_headroom;
387 int tunnel_hlen;
388 __be16 df, flags;
389 bool use_cache;
390 int err;
391
392 tun_info = skb_tunnel_info(skb);
393 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
394 ip_tunnel_info_af(tun_info) != AF_INET))
395 goto err_free_skb;
396
397 key = &tun_info->key;
398 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
399 if (use_cache)
400 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
401 if (!rt) {
402 rt = gre_get_rt(skb, dev, &fl, key);
403 if (IS_ERR(rt))
404 goto err_free_skb;
405 if (use_cache)
406 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
407 fl.saddr);
408 }
409
410 tunnel_hlen = gre_calc_hlen(key->tun_flags);
411
412 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
413 + tunnel_hlen + sizeof(struct iphdr);
414 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
415 int head_delta = SKB_DATA_ALIGN(min_headroom -
416 skb_headroom(skb) +
417 16);
418 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
419 0, GFP_ATOMIC);
420 if (unlikely(err))
421 goto err_free_rt;
422 }
423
424 /* Push Tunnel header. */
425 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
426 goto err_free_rt;
427
428 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
429 gre_build_header(skb, tunnel_hlen, flags, proto,
430 tunnel_id_to_key32(tun_info->key.tun_id), 0);
431
432 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
433
434 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
435 key->tos, key->ttl, df, false);
436 return;
437
438 err_free_rt:
439 ip_rt_put(rt);
440 err_free_skb:
441 kfree_skb(skb);
442 dev->stats.tx_dropped++;
443 }
444
445 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
446 {
447 struct ip_tunnel_info *info = skb_tunnel_info(skb);
448 struct rtable *rt;
449 struct flowi4 fl4;
450
451 if (ip_tunnel_info_af(info) != AF_INET)
452 return -EINVAL;
453
454 rt = gre_get_rt(skb, dev, &fl4, &info->key);
455 if (IS_ERR(rt))
456 return PTR_ERR(rt);
457
458 ip_rt_put(rt);
459 info->key.u.ipv4.src = fl4.saddr;
460 return 0;
461 }
462
463 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
464 struct net_device *dev)
465 {
466 struct ip_tunnel *tunnel = netdev_priv(dev);
467 const struct iphdr *tnl_params;
468
469 if (tunnel->collect_md) {
470 gre_fb_xmit(skb, dev, skb->protocol);
471 return NETDEV_TX_OK;
472 }
473
474 if (dev->header_ops) {
475 /* Need space for new headers */
476 if (skb_cow_head(skb, dev->needed_headroom -
477 (tunnel->hlen + sizeof(struct iphdr))))
478 goto free_skb;
479
480 tnl_params = (const struct iphdr *)skb->data;
481
482 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
483 * to gre header.
484 */
485 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
486 skb_reset_mac_header(skb);
487 } else {
488 if (skb_cow_head(skb, dev->needed_headroom))
489 goto free_skb;
490
491 tnl_params = &tunnel->parms.iph;
492 }
493
494 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
495 goto free_skb;
496
497 __gre_xmit(skb, dev, tnl_params, skb->protocol);
498 return NETDEV_TX_OK;
499
500 free_skb:
501 kfree_skb(skb);
502 dev->stats.tx_dropped++;
503 return NETDEV_TX_OK;
504 }
505
506 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
507 struct net_device *dev)
508 {
509 struct ip_tunnel *tunnel = netdev_priv(dev);
510
511 if (tunnel->collect_md) {
512 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
513 return NETDEV_TX_OK;
514 }
515
516 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
517 goto free_skb;
518
519 if (skb_cow_head(skb, dev->needed_headroom))
520 goto free_skb;
521
522 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
523 return NETDEV_TX_OK;
524
525 free_skb:
526 kfree_skb(skb);
527 dev->stats.tx_dropped++;
528 return NETDEV_TX_OK;
529 }
530
531 static int ipgre_tunnel_ioctl(struct net_device *dev,
532 struct ifreq *ifr, int cmd)
533 {
534 int err;
535 struct ip_tunnel_parm p;
536
537 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
538 return -EFAULT;
539 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
540 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
541 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
542 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
543 return -EINVAL;
544 }
545 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
546 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
547
548 err = ip_tunnel_ioctl(dev, &p, cmd);
549 if (err)
550 return err;
551
552 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
553 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
554
555 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
556 return -EFAULT;
557 return 0;
558 }
559
560 /* Nice toy. Unfortunately, useless in real life :-)
561 It allows to construct virtual multiprotocol broadcast "LAN"
562 over the Internet, provided multicast routing is tuned.
563
564
565 I have no idea was this bicycle invented before me,
566 so that I had to set ARPHRD_IPGRE to a random value.
567 I have an impression, that Cisco could make something similar,
568 but this feature is apparently missing in IOS<=11.2(8).
569
570 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
571 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
572
573 ping -t 255 224.66.66.66
574
575 If nobody answers, mbone does not work.
576
577 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
578 ip addr add 10.66.66.<somewhat>/24 dev Universe
579 ifconfig Universe up
580 ifconfig Universe add fe80::<Your_real_addr>/10
581 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
582 ftp 10.66.66.66
583 ...
584 ftp fec0:6666:6666::193.233.7.65
585 ...
586 */
587 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
588 unsigned short type,
589 const void *daddr, const void *saddr, unsigned int len)
590 {
591 struct ip_tunnel *t = netdev_priv(dev);
592 struct iphdr *iph;
593 struct gre_base_hdr *greh;
594
595 iph = skb_push(skb, t->hlen + sizeof(*iph));
596 greh = (struct gre_base_hdr *)(iph+1);
597 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
598 greh->protocol = htons(type);
599
600 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
601
602 /* Set the source hardware address. */
603 if (saddr)
604 memcpy(&iph->saddr, saddr, 4);
605 if (daddr)
606 memcpy(&iph->daddr, daddr, 4);
607 if (iph->daddr)
608 return t->hlen + sizeof(*iph);
609
610 return -(t->hlen + sizeof(*iph));
611 }
612
613 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
614 {
615 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
616 memcpy(haddr, &iph->saddr, 4);
617 return 4;
618 }
619
620 static const struct header_ops ipgre_header_ops = {
621 .create = ipgre_header,
622 .parse = ipgre_header_parse,
623 };
624
625 #ifdef CONFIG_NET_IPGRE_BROADCAST
626 static int ipgre_open(struct net_device *dev)
627 {
628 struct ip_tunnel *t = netdev_priv(dev);
629
630 if (ipv4_is_multicast(t->parms.iph.daddr)) {
631 struct flowi4 fl4;
632 struct rtable *rt;
633
634 rt = ip_route_output_gre(t->net, &fl4,
635 t->parms.iph.daddr,
636 t->parms.iph.saddr,
637 t->parms.o_key,
638 RT_TOS(t->parms.iph.tos),
639 t->parms.link);
640 if (IS_ERR(rt))
641 return -EADDRNOTAVAIL;
642 dev = rt->dst.dev;
643 ip_rt_put(rt);
644 if (!__in_dev_get_rtnl(dev))
645 return -EADDRNOTAVAIL;
646 t->mlink = dev->ifindex;
647 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
648 }
649 return 0;
650 }
651
652 static int ipgre_close(struct net_device *dev)
653 {
654 struct ip_tunnel *t = netdev_priv(dev);
655
656 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
657 struct in_device *in_dev;
658 in_dev = inetdev_by_index(t->net, t->mlink);
659 if (in_dev)
660 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
661 }
662 return 0;
663 }
664 #endif
665
666 static const struct net_device_ops ipgre_netdev_ops = {
667 .ndo_init = ipgre_tunnel_init,
668 .ndo_uninit = ip_tunnel_uninit,
669 #ifdef CONFIG_NET_IPGRE_BROADCAST
670 .ndo_open = ipgre_open,
671 .ndo_stop = ipgre_close,
672 #endif
673 .ndo_start_xmit = ipgre_xmit,
674 .ndo_do_ioctl = ipgre_tunnel_ioctl,
675 .ndo_change_mtu = ip_tunnel_change_mtu,
676 .ndo_get_stats64 = ip_tunnel_get_stats64,
677 .ndo_get_iflink = ip_tunnel_get_iflink,
678 };
679
680 #define GRE_FEATURES (NETIF_F_SG | \
681 NETIF_F_FRAGLIST | \
682 NETIF_F_HIGHDMA | \
683 NETIF_F_HW_CSUM)
684
685 static void ipgre_tunnel_setup(struct net_device *dev)
686 {
687 dev->netdev_ops = &ipgre_netdev_ops;
688 dev->type = ARPHRD_IPGRE;
689 ip_tunnel_setup(dev, ipgre_net_id);
690 }
691
692 static void __gre_tunnel_init(struct net_device *dev)
693 {
694 struct ip_tunnel *tunnel;
695 int t_hlen;
696
697 tunnel = netdev_priv(dev);
698 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
699 tunnel->parms.iph.protocol = IPPROTO_GRE;
700
701 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
702
703 t_hlen = tunnel->hlen + sizeof(struct iphdr);
704
705 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
706 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
707
708 dev->features |= GRE_FEATURES;
709 dev->hw_features |= GRE_FEATURES;
710
711 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
712 /* TCP offload with GRE SEQ is not supported, nor
713 * can we support 2 levels of outer headers requiring
714 * an update.
715 */
716 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
717 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
718 dev->features |= NETIF_F_GSO_SOFTWARE;
719 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
720 }
721
722 /* Can use a lockless transmit, unless we generate
723 * output sequences
724 */
725 dev->features |= NETIF_F_LLTX;
726 }
727 }
728
729 static int ipgre_tunnel_init(struct net_device *dev)
730 {
731 struct ip_tunnel *tunnel = netdev_priv(dev);
732 struct iphdr *iph = &tunnel->parms.iph;
733
734 __gre_tunnel_init(dev);
735
736 memcpy(dev->dev_addr, &iph->saddr, 4);
737 memcpy(dev->broadcast, &iph->daddr, 4);
738
739 dev->flags = IFF_NOARP;
740 netif_keep_dst(dev);
741 dev->addr_len = 4;
742
743 if (iph->daddr && !tunnel->collect_md) {
744 #ifdef CONFIG_NET_IPGRE_BROADCAST
745 if (ipv4_is_multicast(iph->daddr)) {
746 if (!iph->saddr)
747 return -EINVAL;
748 dev->flags = IFF_BROADCAST;
749 dev->header_ops = &ipgre_header_ops;
750 }
751 #endif
752 } else if (!tunnel->collect_md) {
753 dev->header_ops = &ipgre_header_ops;
754 }
755
756 return ip_tunnel_init(dev);
757 }
758
759 static const struct gre_protocol ipgre_protocol = {
760 .handler = gre_rcv,
761 .err_handler = gre_err,
762 };
763
764 static int __net_init ipgre_init_net(struct net *net)
765 {
766 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
767 }
768
769 static void __net_exit ipgre_exit_net(struct net *net)
770 {
771 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
772 ip_tunnel_delete_net(itn, &ipgre_link_ops);
773 }
774
775 static struct pernet_operations ipgre_net_ops = {
776 .init = ipgre_init_net,
777 .exit = ipgre_exit_net,
778 .id = &ipgre_net_id,
779 .size = sizeof(struct ip_tunnel_net),
780 };
781
782 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
783 struct netlink_ext_ack *extack)
784 {
785 __be16 flags;
786
787 if (!data)
788 return 0;
789
790 flags = 0;
791 if (data[IFLA_GRE_IFLAGS])
792 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
793 if (data[IFLA_GRE_OFLAGS])
794 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
795 if (flags & (GRE_VERSION|GRE_ROUTING))
796 return -EINVAL;
797
798 if (data[IFLA_GRE_COLLECT_METADATA] &&
799 data[IFLA_GRE_ENCAP_TYPE] &&
800 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
801 return -EINVAL;
802
803 return 0;
804 }
805
806 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
807 struct netlink_ext_ack *extack)
808 {
809 __be32 daddr;
810
811 if (tb[IFLA_ADDRESS]) {
812 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
813 return -EINVAL;
814 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
815 return -EADDRNOTAVAIL;
816 }
817
818 if (!data)
819 goto out;
820
821 if (data[IFLA_GRE_REMOTE]) {
822 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
823 if (!daddr)
824 return -EINVAL;
825 }
826
827 out:
828 return ipgre_tunnel_validate(tb, data, extack);
829 }
830
831 static int ipgre_netlink_parms(struct net_device *dev,
832 struct nlattr *data[],
833 struct nlattr *tb[],
834 struct ip_tunnel_parm *parms,
835 __u32 *fwmark)
836 {
837 struct ip_tunnel *t = netdev_priv(dev);
838
839 memset(parms, 0, sizeof(*parms));
840
841 parms->iph.protocol = IPPROTO_GRE;
842
843 if (!data)
844 return 0;
845
846 if (data[IFLA_GRE_LINK])
847 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
848
849 if (data[IFLA_GRE_IFLAGS])
850 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
851
852 if (data[IFLA_GRE_OFLAGS])
853 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
854
855 if (data[IFLA_GRE_IKEY])
856 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
857
858 if (data[IFLA_GRE_OKEY])
859 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
860
861 if (data[IFLA_GRE_LOCAL])
862 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
863
864 if (data[IFLA_GRE_REMOTE])
865 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
866
867 if (data[IFLA_GRE_TTL])
868 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
869
870 if (data[IFLA_GRE_TOS])
871 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
872
873 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
874 if (t->ignore_df)
875 return -EINVAL;
876 parms->iph.frag_off = htons(IP_DF);
877 }
878
879 if (data[IFLA_GRE_COLLECT_METADATA]) {
880 t->collect_md = true;
881 if (dev->type == ARPHRD_IPGRE)
882 dev->type = ARPHRD_NONE;
883 }
884
885 if (data[IFLA_GRE_IGNORE_DF]) {
886 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
887 && (parms->iph.frag_off & htons(IP_DF)))
888 return -EINVAL;
889 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
890 }
891
892 if (data[IFLA_GRE_FWMARK])
893 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
894
895 return 0;
896 }
897
898 /* This function returns true when ENCAP attributes are present in the nl msg */
899 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
900 struct ip_tunnel_encap *ipencap)
901 {
902 bool ret = false;
903
904 memset(ipencap, 0, sizeof(*ipencap));
905
906 if (!data)
907 return ret;
908
909 if (data[IFLA_GRE_ENCAP_TYPE]) {
910 ret = true;
911 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
912 }
913
914 if (data[IFLA_GRE_ENCAP_FLAGS]) {
915 ret = true;
916 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
917 }
918
919 if (data[IFLA_GRE_ENCAP_SPORT]) {
920 ret = true;
921 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
922 }
923
924 if (data[IFLA_GRE_ENCAP_DPORT]) {
925 ret = true;
926 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
927 }
928
929 return ret;
930 }
931
932 static int gre_tap_init(struct net_device *dev)
933 {
934 __gre_tunnel_init(dev);
935 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
936
937 return ip_tunnel_init(dev);
938 }
939
940 static const struct net_device_ops gre_tap_netdev_ops = {
941 .ndo_init = gre_tap_init,
942 .ndo_uninit = ip_tunnel_uninit,
943 .ndo_start_xmit = gre_tap_xmit,
944 .ndo_set_mac_address = eth_mac_addr,
945 .ndo_validate_addr = eth_validate_addr,
946 .ndo_change_mtu = ip_tunnel_change_mtu,
947 .ndo_get_stats64 = ip_tunnel_get_stats64,
948 .ndo_get_iflink = ip_tunnel_get_iflink,
949 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
950 };
951
952 static void ipgre_tap_setup(struct net_device *dev)
953 {
954 ether_setup(dev);
955 dev->max_mtu = 0;
956 dev->netdev_ops = &gre_tap_netdev_ops;
957 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
958 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
959 ip_tunnel_setup(dev, gre_tap_net_id);
960 }
961
962 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
963 struct nlattr *tb[], struct nlattr *data[],
964 struct netlink_ext_ack *extack)
965 {
966 struct ip_tunnel_parm p;
967 struct ip_tunnel_encap ipencap;
968 __u32 fwmark = 0;
969 int err;
970
971 if (ipgre_netlink_encap_parms(data, &ipencap)) {
972 struct ip_tunnel *t = netdev_priv(dev);
973 err = ip_tunnel_encap_setup(t, &ipencap);
974
975 if (err < 0)
976 return err;
977 }
978
979 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
980 if (err < 0)
981 return err;
982 return ip_tunnel_newlink(dev, tb, &p, fwmark);
983 }
984
985 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
986 struct nlattr *data[],
987 struct netlink_ext_ack *extack)
988 {
989 struct ip_tunnel *t = netdev_priv(dev);
990 struct ip_tunnel_parm p;
991 struct ip_tunnel_encap ipencap;
992 __u32 fwmark = t->fwmark;
993 int err;
994
995 if (ipgre_netlink_encap_parms(data, &ipencap)) {
996 err = ip_tunnel_encap_setup(t, &ipencap);
997
998 if (err < 0)
999 return err;
1000 }
1001
1002 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1003 if (err < 0)
1004 return err;
1005 return ip_tunnel_changelink(dev, tb, &p, fwmark);
1006 }
1007
1008 static size_t ipgre_get_size(const struct net_device *dev)
1009 {
1010 return
1011 /* IFLA_GRE_LINK */
1012 nla_total_size(4) +
1013 /* IFLA_GRE_IFLAGS */
1014 nla_total_size(2) +
1015 /* IFLA_GRE_OFLAGS */
1016 nla_total_size(2) +
1017 /* IFLA_GRE_IKEY */
1018 nla_total_size(4) +
1019 /* IFLA_GRE_OKEY */
1020 nla_total_size(4) +
1021 /* IFLA_GRE_LOCAL */
1022 nla_total_size(4) +
1023 /* IFLA_GRE_REMOTE */
1024 nla_total_size(4) +
1025 /* IFLA_GRE_TTL */
1026 nla_total_size(1) +
1027 /* IFLA_GRE_TOS */
1028 nla_total_size(1) +
1029 /* IFLA_GRE_PMTUDISC */
1030 nla_total_size(1) +
1031 /* IFLA_GRE_ENCAP_TYPE */
1032 nla_total_size(2) +
1033 /* IFLA_GRE_ENCAP_FLAGS */
1034 nla_total_size(2) +
1035 /* IFLA_GRE_ENCAP_SPORT */
1036 nla_total_size(2) +
1037 /* IFLA_GRE_ENCAP_DPORT */
1038 nla_total_size(2) +
1039 /* IFLA_GRE_COLLECT_METADATA */
1040 nla_total_size(0) +
1041 /* IFLA_GRE_IGNORE_DF */
1042 nla_total_size(1) +
1043 /* IFLA_GRE_FWMARK */
1044 nla_total_size(4) +
1045 0;
1046 }
1047
1048 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1049 {
1050 struct ip_tunnel *t = netdev_priv(dev);
1051 struct ip_tunnel_parm *p = &t->parms;
1052
1053 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1054 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1055 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1056 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1057 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1058 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1059 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1060 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1061 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1062 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1063 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1064 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1065 !!(p->iph.frag_off & htons(IP_DF))) ||
1066 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1067 goto nla_put_failure;
1068
1069 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1070 t->encap.type) ||
1071 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1072 t->encap.sport) ||
1073 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1074 t->encap.dport) ||
1075 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1076 t->encap.flags))
1077 goto nla_put_failure;
1078
1079 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1080 goto nla_put_failure;
1081
1082 if (t->collect_md) {
1083 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1084 goto nla_put_failure;
1085 }
1086
1087 return 0;
1088
1089 nla_put_failure:
1090 return -EMSGSIZE;
1091 }
1092
1093 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1094 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1095 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1096 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1097 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1098 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1099 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1100 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1101 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1102 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1103 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1104 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1105 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1106 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1107 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1108 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1109 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1110 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1111 };
1112
1113 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1114 .kind = "gre",
1115 .maxtype = IFLA_GRE_MAX,
1116 .policy = ipgre_policy,
1117 .priv_size = sizeof(struct ip_tunnel),
1118 .setup = ipgre_tunnel_setup,
1119 .validate = ipgre_tunnel_validate,
1120 .newlink = ipgre_newlink,
1121 .changelink = ipgre_changelink,
1122 .dellink = ip_tunnel_dellink,
1123 .get_size = ipgre_get_size,
1124 .fill_info = ipgre_fill_info,
1125 .get_link_net = ip_tunnel_get_link_net,
1126 };
1127
1128 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1129 .kind = "gretap",
1130 .maxtype = IFLA_GRE_MAX,
1131 .policy = ipgre_policy,
1132 .priv_size = sizeof(struct ip_tunnel),
1133 .setup = ipgre_tap_setup,
1134 .validate = ipgre_tap_validate,
1135 .newlink = ipgre_newlink,
1136 .changelink = ipgre_changelink,
1137 .dellink = ip_tunnel_dellink,
1138 .get_size = ipgre_get_size,
1139 .fill_info = ipgre_fill_info,
1140 .get_link_net = ip_tunnel_get_link_net,
1141 };
1142
1143 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1144 u8 name_assign_type)
1145 {
1146 struct nlattr *tb[IFLA_MAX + 1];
1147 struct net_device *dev;
1148 LIST_HEAD(list_kill);
1149 struct ip_tunnel *t;
1150 int err;
1151
1152 memset(&tb, 0, sizeof(tb));
1153
1154 dev = rtnl_create_link(net, name, name_assign_type,
1155 &ipgre_tap_ops, tb);
1156 if (IS_ERR(dev))
1157 return dev;
1158
1159 /* Configure flow based GRE device. */
1160 t = netdev_priv(dev);
1161 t->collect_md = true;
1162
1163 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1164 if (err < 0) {
1165 free_netdev(dev);
1166 return ERR_PTR(err);
1167 }
1168
1169 /* openvswitch users expect packet sizes to be unrestricted,
1170 * so set the largest MTU we can.
1171 */
1172 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1173 if (err)
1174 goto out;
1175
1176 err = rtnl_configure_link(dev, NULL);
1177 if (err < 0)
1178 goto out;
1179
1180 return dev;
1181 out:
1182 ip_tunnel_dellink(dev, &list_kill);
1183 unregister_netdevice_many(&list_kill);
1184 return ERR_PTR(err);
1185 }
1186 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1187
1188 static int __net_init ipgre_tap_init_net(struct net *net)
1189 {
1190 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1191 }
1192
1193 static void __net_exit ipgre_tap_exit_net(struct net *net)
1194 {
1195 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1196 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1197 }
1198
1199 static struct pernet_operations ipgre_tap_net_ops = {
1200 .init = ipgre_tap_init_net,
1201 .exit = ipgre_tap_exit_net,
1202 .id = &gre_tap_net_id,
1203 .size = sizeof(struct ip_tunnel_net),
1204 };
1205
1206 static int __init ipgre_init(void)
1207 {
1208 int err;
1209
1210 pr_info("GRE over IPv4 tunneling driver\n");
1211
1212 err = register_pernet_device(&ipgre_net_ops);
1213 if (err < 0)
1214 return err;
1215
1216 err = register_pernet_device(&ipgre_tap_net_ops);
1217 if (err < 0)
1218 goto pnet_tap_faied;
1219
1220 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1221 if (err < 0) {
1222 pr_info("%s: can't add protocol\n", __func__);
1223 goto add_proto_failed;
1224 }
1225
1226 err = rtnl_link_register(&ipgre_link_ops);
1227 if (err < 0)
1228 goto rtnl_link_failed;
1229
1230 err = rtnl_link_register(&ipgre_tap_ops);
1231 if (err < 0)
1232 goto tap_ops_failed;
1233
1234 return 0;
1235
1236 tap_ops_failed:
1237 rtnl_link_unregister(&ipgre_link_ops);
1238 rtnl_link_failed:
1239 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1240 add_proto_failed:
1241 unregister_pernet_device(&ipgre_tap_net_ops);
1242 pnet_tap_faied:
1243 unregister_pernet_device(&ipgre_net_ops);
1244 return err;
1245 }
1246
1247 static void __exit ipgre_fini(void)
1248 {
1249 rtnl_link_unregister(&ipgre_tap_ops);
1250 rtnl_link_unregister(&ipgre_link_ops);
1251 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1252 unregister_pernet_device(&ipgre_tap_net_ops);
1253 unregister_pernet_device(&ipgre_net_ops);
1254 }
1255
1256 module_init(ipgre_init);
1257 module_exit(ipgre_fini);
1258 MODULE_LICENSE("GPL");
1259 MODULE_ALIAS_RTNL_LINK("gre");
1260 MODULE_ALIAS_RTNL_LINK("gretap");
1261 MODULE_ALIAS_NETDEV("gre0");
1262 MODULE_ALIAS_NETDEV("gretap0");