]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/l2tp/l2tp_ip.c
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / net / l2tp / l2tp_ip.c
1 /*
2 * L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <asm/ioctls.h>
15 #include <linux/icmp.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/random.h>
19 #include <linux/socket.h>
20 #include <linux/l2tp.h>
21 #include <linux/in.h>
22 #include <net/sock.h>
23 #include <net/ip.h>
24 #include <net/icmp.h>
25 #include <net/udp.h>
26 #include <net/inet_common.h>
27 #include <net/inet_hashtables.h>
28 #include <net/tcp_states.h>
29 #include <net/protocol.h>
30 #include <net/xfrm.h>
31
32 #include "l2tp_core.h"
33
34 struct l2tp_ip_sock {
35 /* inet_sock has to be the first member of l2tp_ip_sock */
36 struct inet_sock inet;
37
38 u32 conn_id;
39 u32 peer_conn_id;
40 };
41
42 static DEFINE_RWLOCK(l2tp_ip_lock);
43 static struct hlist_head l2tp_ip_table;
44 static struct hlist_head l2tp_ip_bind_table;
45
46 static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
47 {
48 return (struct l2tp_ip_sock *)sk;
49 }
50
51 static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
52 __be32 raddr, int dif, u32 tunnel_id)
53 {
54 struct sock *sk;
55
56 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
57 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
58 const struct inet_sock *inet = inet_sk(sk);
59
60 if (!net_eq(sock_net(sk), net))
61 continue;
62
63 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
64 continue;
65
66 if (inet->inet_rcv_saddr && laddr &&
67 inet->inet_rcv_saddr != laddr)
68 continue;
69
70 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
71 continue;
72
73 if (l2tp->conn_id != tunnel_id)
74 continue;
75
76 goto found;
77 }
78
79 sk = NULL;
80 found:
81 return sk;
82 }
83
84 /* When processing receive frames, there are two cases to
85 * consider. Data frames consist of a non-zero session-id and an
86 * optional cookie. Control frames consist of a regular L2TP header
87 * preceded by 32-bits of zeros.
88 *
89 * L2TPv3 Session Header Over IP
90 *
91 * 0 1 2 3
92 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94 * | Session ID |
95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
96 * | Cookie (optional, maximum 64 bits)...
97 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
98 * |
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 *
101 * L2TPv3 Control Message Header Over IP
102 *
103 * 0 1 2 3
104 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * | (32 bits of zeros) |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 * | Control Connection ID |
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 * | Ns | Nr |
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 *
115 * All control frames are passed to userspace.
116 */
117 static int l2tp_ip_recv(struct sk_buff *skb)
118 {
119 struct net *net = dev_net(skb->dev);
120 struct sock *sk;
121 u32 session_id;
122 u32 tunnel_id;
123 unsigned char *ptr, *optr;
124 struct l2tp_session *session;
125 struct l2tp_tunnel *tunnel = NULL;
126 int length;
127
128 if (!pskb_may_pull(skb, 4))
129 goto discard;
130
131 /* Point to L2TP header */
132 optr = ptr = skb->data;
133 session_id = ntohl(*((__be32 *) ptr));
134 ptr += 4;
135
136 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
137 * the session_id. If it is 0, the packet is a L2TP control
138 * frame and the session_id value can be discarded.
139 */
140 if (session_id == 0) {
141 __skb_pull(skb, 4);
142 goto pass_up;
143 }
144
145 /* Ok, this is a data packet. Lookup the session. */
146 session = l2tp_session_find(net, NULL, session_id);
147 if (session == NULL)
148 goto discard;
149
150 tunnel = session->tunnel;
151 if (tunnel == NULL)
152 goto discard;
153
154 /* Trace packet contents, if enabled */
155 if (tunnel->debug & L2TP_MSG_DATA) {
156 length = min(32u, skb->len);
157 if (!pskb_may_pull(skb, length))
158 goto discard;
159
160 /* Point to L2TP header */
161 optr = ptr = skb->data;
162 ptr += 4;
163 pr_debug("%s: ip recv\n", tunnel->name);
164 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
165 }
166
167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
168
169 return 0;
170
171 pass_up:
172 /* Get the tunnel_id from the L2TP header */
173 if (!pskb_may_pull(skb, 12))
174 goto discard;
175
176 if ((skb->data[0] & 0xc0) != 0xc0)
177 goto discard;
178
179 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
180 tunnel = l2tp_tunnel_find(net, tunnel_id);
181 if (tunnel != NULL)
182 sk = tunnel->sock;
183 else {
184 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
185
186 read_lock_bh(&l2tp_ip_lock);
187 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
188 inet_iif(skb), tunnel_id);
189 if (!sk) {
190 read_unlock_bh(&l2tp_ip_lock);
191 goto discard;
192 }
193
194 sock_hold(sk);
195 read_unlock_bh(&l2tp_ip_lock);
196 }
197
198 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
199 goto discard_put;
200
201 nf_reset(skb);
202
203 return sk_receive_skb(sk, skb, 1);
204
205 discard_put:
206 sock_put(sk);
207
208 discard:
209 kfree_skb(skb);
210 return 0;
211 }
212
213 static int l2tp_ip_open(struct sock *sk)
214 {
215 /* Prevent autobind. We don't have ports. */
216 inet_sk(sk)->inet_num = IPPROTO_L2TP;
217
218 write_lock_bh(&l2tp_ip_lock);
219 sk_add_node(sk, &l2tp_ip_table);
220 write_unlock_bh(&l2tp_ip_lock);
221
222 return 0;
223 }
224
225 static void l2tp_ip_close(struct sock *sk, long timeout)
226 {
227 write_lock_bh(&l2tp_ip_lock);
228 hlist_del_init(&sk->sk_bind_node);
229 sk_del_node_init(sk);
230 write_unlock_bh(&l2tp_ip_lock);
231 sk_common_release(sk);
232 }
233
234 static void l2tp_ip_destroy_sock(struct sock *sk)
235 {
236 struct sk_buff *skb;
237 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
238
239 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
240 kfree_skb(skb);
241
242 if (tunnel) {
243 l2tp_tunnel_closeall(tunnel);
244 sock_put(sk);
245 }
246
247 sk_refcnt_debug_dec(sk);
248 }
249
250 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251 {
252 struct inet_sock *inet = inet_sk(sk);
253 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
254 struct net *net = sock_net(sk);
255 int ret;
256 int chk_addr_ret;
257
258 if (addr_len < sizeof(struct sockaddr_l2tpip))
259 return -EINVAL;
260 if (addr->l2tp_family != AF_INET)
261 return -EINVAL;
262
263 lock_sock(sk);
264
265 ret = -EINVAL;
266 if (!sock_flag(sk, SOCK_ZAPPED))
267 goto out;
268
269 if (sk->sk_state != TCP_CLOSE)
270 goto out;
271
272 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
273 ret = -EADDRNOTAVAIL;
274 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
275 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
276 goto out;
277
278 if (addr->l2tp_addr.s_addr)
279 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
280 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
281 inet->inet_saddr = 0; /* Use device */
282
283 write_lock_bh(&l2tp_ip_lock);
284 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
285 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
286 write_unlock_bh(&l2tp_ip_lock);
287 ret = -EADDRINUSE;
288 goto out;
289 }
290
291 sk_dst_reset(sk);
292 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
293
294 sk_add_bind_node(sk, &l2tp_ip_bind_table);
295 sk_del_node_init(sk);
296 write_unlock_bh(&l2tp_ip_lock);
297
298 ret = 0;
299 sock_reset_flag(sk, SOCK_ZAPPED);
300
301 out:
302 release_sock(sk);
303
304 return ret;
305 }
306
307 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
308 {
309 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
310 int rc;
311
312 if (addr_len < sizeof(*lsa))
313 return -EINVAL;
314
315 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
316 return -EINVAL;
317
318 lock_sock(sk);
319
320 /* Must bind first - autobinding does not work */
321 if (sock_flag(sk, SOCK_ZAPPED)) {
322 rc = -EINVAL;
323 goto out_sk;
324 }
325
326 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
327 if (rc < 0)
328 goto out_sk;
329
330 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
331
332 write_lock_bh(&l2tp_ip_lock);
333 hlist_del_init(&sk->sk_bind_node);
334 sk_add_bind_node(sk, &l2tp_ip_bind_table);
335 write_unlock_bh(&l2tp_ip_lock);
336
337 out_sk:
338 release_sock(sk);
339
340 return rc;
341 }
342
343 static int l2tp_ip_disconnect(struct sock *sk, int flags)
344 {
345 if (sock_flag(sk, SOCK_ZAPPED))
346 return 0;
347
348 return __udp_disconnect(sk, flags);
349 }
350
351 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
352 int *uaddr_len, int peer)
353 {
354 struct sock *sk = sock->sk;
355 struct inet_sock *inet = inet_sk(sk);
356 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
357 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
358
359 memset(lsa, 0, sizeof(*lsa));
360 lsa->l2tp_family = AF_INET;
361 if (peer) {
362 if (!inet->inet_dport)
363 return -ENOTCONN;
364 lsa->l2tp_conn_id = lsk->peer_conn_id;
365 lsa->l2tp_addr.s_addr = inet->inet_daddr;
366 } else {
367 __be32 addr = inet->inet_rcv_saddr;
368 if (!addr)
369 addr = inet->inet_saddr;
370 lsa->l2tp_conn_id = lsk->conn_id;
371 lsa->l2tp_addr.s_addr = addr;
372 }
373 *uaddr_len = sizeof(*lsa);
374 return 0;
375 }
376
377 static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
378 {
379 int rc;
380
381 /* Charge it to the socket, dropping if the queue is full. */
382 rc = sock_queue_rcv_skb(sk, skb);
383 if (rc < 0)
384 goto drop;
385
386 return 0;
387
388 drop:
389 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
390 kfree_skb(skb);
391 return 0;
392 }
393
394 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
395 * control frames.
396 */
397 static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
398 {
399 struct sk_buff *skb;
400 int rc;
401 struct inet_sock *inet = inet_sk(sk);
402 struct rtable *rt = NULL;
403 struct flowi4 *fl4;
404 int connected = 0;
405 __be32 daddr;
406
407 lock_sock(sk);
408
409 rc = -ENOTCONN;
410 if (sock_flag(sk, SOCK_DEAD))
411 goto out;
412
413 /* Get and verify the address. */
414 if (msg->msg_name) {
415 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
416 rc = -EINVAL;
417 if (msg->msg_namelen < sizeof(*lip))
418 goto out;
419
420 if (lip->l2tp_family != AF_INET) {
421 rc = -EAFNOSUPPORT;
422 if (lip->l2tp_family != AF_UNSPEC)
423 goto out;
424 }
425
426 daddr = lip->l2tp_addr.s_addr;
427 } else {
428 rc = -EDESTADDRREQ;
429 if (sk->sk_state != TCP_ESTABLISHED)
430 goto out;
431
432 daddr = inet->inet_daddr;
433 connected = 1;
434 }
435
436 /* Allocate a socket buffer */
437 rc = -ENOMEM;
438 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
439 4 + len, 0, GFP_KERNEL);
440 if (!skb)
441 goto error;
442
443 /* Reserve space for headers, putting IP header on 4-byte boundary. */
444 skb_reserve(skb, 2 + NET_SKB_PAD);
445 skb_reset_network_header(skb);
446 skb_reserve(skb, sizeof(struct iphdr));
447 skb_reset_transport_header(skb);
448
449 /* Insert 0 session_id */
450 *((__be32 *) skb_put(skb, 4)) = 0;
451
452 /* Copy user data into skb */
453 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
454 if (rc < 0) {
455 kfree_skb(skb);
456 goto error;
457 }
458
459 fl4 = &inet->cork.fl.u.ip4;
460 if (connected)
461 rt = (struct rtable *) __sk_dst_check(sk, 0);
462
463 rcu_read_lock();
464 if (rt == NULL) {
465 const struct ip_options_rcu *inet_opt;
466
467 inet_opt = rcu_dereference(inet->inet_opt);
468
469 /* Use correct destination address if we have options. */
470 if (inet_opt && inet_opt->opt.srr)
471 daddr = inet_opt->opt.faddr;
472
473 /* If this fails, retransmit mechanism of transport layer will
474 * keep trying until route appears or the connection times
475 * itself out.
476 */
477 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
478 daddr, inet->inet_saddr,
479 inet->inet_dport, inet->inet_sport,
480 sk->sk_protocol, RT_CONN_FLAGS(sk),
481 sk->sk_bound_dev_if);
482 if (IS_ERR(rt))
483 goto no_route;
484 if (connected) {
485 sk_setup_caps(sk, &rt->dst);
486 } else {
487 skb_dst_set(skb, &rt->dst);
488 goto xmit;
489 }
490 }
491
492 /* We dont need to clone dst here, it is guaranteed to not disappear.
493 * __dev_xmit_skb() might force a refcount if needed.
494 */
495 skb_dst_set_noref(skb, &rt->dst);
496
497 xmit:
498 /* Queue the packet to IP for output */
499 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
500 rcu_read_unlock();
501
502 error:
503 if (rc >= 0)
504 rc = len;
505
506 out:
507 release_sock(sk);
508 return rc;
509
510 no_route:
511 rcu_read_unlock();
512 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
513 kfree_skb(skb);
514 rc = -EHOSTUNREACH;
515 goto out;
516 }
517
518 static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
519 size_t len, int noblock, int flags, int *addr_len)
520 {
521 struct inet_sock *inet = inet_sk(sk);
522 size_t copied = 0;
523 int err = -EOPNOTSUPP;
524 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
525 struct sk_buff *skb;
526
527 if (flags & MSG_OOB)
528 goto out;
529
530 skb = skb_recv_datagram(sk, flags, noblock, &err);
531 if (!skb)
532 goto out;
533
534 copied = skb->len;
535 if (len < copied) {
536 msg->msg_flags |= MSG_TRUNC;
537 copied = len;
538 }
539
540 err = skb_copy_datagram_msg(skb, 0, msg, copied);
541 if (err)
542 goto done;
543
544 sock_recv_timestamp(msg, sk, skb);
545
546 /* Copy the address. */
547 if (sin) {
548 sin->sin_family = AF_INET;
549 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
550 sin->sin_port = 0;
551 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
552 *addr_len = sizeof(*sin);
553 }
554 if (inet->cmsg_flags)
555 ip_cmsg_recv(msg, skb);
556 if (flags & MSG_TRUNC)
557 copied = skb->len;
558 done:
559 skb_free_datagram(sk, skb);
560 out:
561 return err ? err : copied;
562 }
563
564 int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
565 {
566 struct sk_buff *skb;
567 int amount;
568
569 switch (cmd) {
570 case SIOCOUTQ:
571 amount = sk_wmem_alloc_get(sk);
572 break;
573 case SIOCINQ:
574 spin_lock_bh(&sk->sk_receive_queue.lock);
575 skb = skb_peek(&sk->sk_receive_queue);
576 amount = skb ? skb->len : 0;
577 spin_unlock_bh(&sk->sk_receive_queue.lock);
578 break;
579
580 default:
581 return -ENOIOCTLCMD;
582 }
583
584 return put_user(amount, (int __user *)arg);
585 }
586 EXPORT_SYMBOL(l2tp_ioctl);
587
588 static struct proto l2tp_ip_prot = {
589 .name = "L2TP/IP",
590 .owner = THIS_MODULE,
591 .init = l2tp_ip_open,
592 .close = l2tp_ip_close,
593 .bind = l2tp_ip_bind,
594 .connect = l2tp_ip_connect,
595 .disconnect = l2tp_ip_disconnect,
596 .ioctl = l2tp_ioctl,
597 .destroy = l2tp_ip_destroy_sock,
598 .setsockopt = ip_setsockopt,
599 .getsockopt = ip_getsockopt,
600 .sendmsg = l2tp_ip_sendmsg,
601 .recvmsg = l2tp_ip_recvmsg,
602 .backlog_rcv = l2tp_ip_backlog_recv,
603 .hash = inet_hash,
604 .unhash = inet_unhash,
605 .obj_size = sizeof(struct l2tp_ip_sock),
606 #ifdef CONFIG_COMPAT
607 .compat_setsockopt = compat_ip_setsockopt,
608 .compat_getsockopt = compat_ip_getsockopt,
609 #endif
610 };
611
612 static const struct proto_ops l2tp_ip_ops = {
613 .family = PF_INET,
614 .owner = THIS_MODULE,
615 .release = inet_release,
616 .bind = inet_bind,
617 .connect = inet_dgram_connect,
618 .socketpair = sock_no_socketpair,
619 .accept = sock_no_accept,
620 .getname = l2tp_ip_getname,
621 .poll = datagram_poll,
622 .ioctl = inet_ioctl,
623 .listen = sock_no_listen,
624 .shutdown = inet_shutdown,
625 .setsockopt = sock_common_setsockopt,
626 .getsockopt = sock_common_getsockopt,
627 .sendmsg = inet_sendmsg,
628 .recvmsg = sock_common_recvmsg,
629 .mmap = sock_no_mmap,
630 .sendpage = sock_no_sendpage,
631 #ifdef CONFIG_COMPAT
632 .compat_setsockopt = compat_sock_common_setsockopt,
633 .compat_getsockopt = compat_sock_common_getsockopt,
634 #endif
635 };
636
637 static struct inet_protosw l2tp_ip_protosw = {
638 .type = SOCK_DGRAM,
639 .protocol = IPPROTO_L2TP,
640 .prot = &l2tp_ip_prot,
641 .ops = &l2tp_ip_ops,
642 };
643
644 static struct net_protocol l2tp_ip_protocol __read_mostly = {
645 .handler = l2tp_ip_recv,
646 .netns_ok = 1,
647 };
648
649 static int __init l2tp_ip_init(void)
650 {
651 int err;
652
653 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
654
655 err = proto_register(&l2tp_ip_prot, 1);
656 if (err != 0)
657 goto out;
658
659 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
660 if (err)
661 goto out1;
662
663 inet_register_protosw(&l2tp_ip_protosw);
664 return 0;
665
666 out1:
667 proto_unregister(&l2tp_ip_prot);
668 out:
669 return err;
670 }
671
672 static void __exit l2tp_ip_exit(void)
673 {
674 inet_unregister_protosw(&l2tp_ip_protosw);
675 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
676 proto_unregister(&l2tp_ip_prot);
677 }
678
679 module_init(l2tp_ip_init);
680 module_exit(l2tp_ip_exit);
681
682 MODULE_LICENSE("GPL");
683 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
684 MODULE_DESCRIPTION("L2TP over IP");
685 MODULE_VERSION("1.0");
686
687 /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
688 * enums
689 */
690 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
691 MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);