]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/l2tp/l2tp_ip.c
l2tp: hold tunnel socket when handling control frames in l2tp_ip and l2tp_ip6
[mirror_ubuntu-zesty-kernel.git] / net / l2tp / l2tp_ip.c
1 /*
2 * L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <asm/ioctls.h>
15 #include <linux/icmp.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/random.h>
19 #include <linux/socket.h>
20 #include <linux/l2tp.h>
21 #include <linux/in.h>
22 #include <net/sock.h>
23 #include <net/ip.h>
24 #include <net/icmp.h>
25 #include <net/udp.h>
26 #include <net/inet_common.h>
27 #include <net/inet_hashtables.h>
28 #include <net/tcp_states.h>
29 #include <net/protocol.h>
30 #include <net/xfrm.h>
31
32 #include "l2tp_core.h"
33
34 struct l2tp_ip_sock {
35 /* inet_sock has to be the first member of l2tp_ip_sock */
36 struct inet_sock inet;
37
38 u32 conn_id;
39 u32 peer_conn_id;
40 };
41
42 static DEFINE_RWLOCK(l2tp_ip_lock);
43 static struct hlist_head l2tp_ip_table;
44 static struct hlist_head l2tp_ip_bind_table;
45
46 static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
47 {
48 return (struct l2tp_ip_sock *)sk;
49 }
50
51 static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
52 __be32 raddr, int dif, u32 tunnel_id)
53 {
54 struct sock *sk;
55
56 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
57 struct inet_sock *inet = inet_sk(sk);
58 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
59
60 if (l2tp == NULL)
61 continue;
62
63 if ((l2tp->conn_id == tunnel_id) &&
64 net_eq(sock_net(sk), net) &&
65 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
66 (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
67 (!sk->sk_bound_dev_if || !dif ||
68 sk->sk_bound_dev_if == dif))
69 goto found;
70 }
71
72 sk = NULL;
73 found:
74 return sk;
75 }
76
77 /* When processing receive frames, there are two cases to
78 * consider. Data frames consist of a non-zero session-id and an
79 * optional cookie. Control frames consist of a regular L2TP header
80 * preceded by 32-bits of zeros.
81 *
82 * L2TPv3 Session Header Over IP
83 *
84 * 0 1 2 3
85 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
86 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
87 * | Session ID |
88 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
89 * | Cookie (optional, maximum 64 bits)...
90 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
91 * |
92 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
93 *
94 * L2TPv3 Control Message Header Over IP
95 *
96 * 0 1 2 3
97 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
98 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
99 * | (32 bits of zeros) |
100 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
101 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
102 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
103 * | Control Connection ID |
104 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
105 * | Ns | Nr |
106 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
107 *
108 * All control frames are passed to userspace.
109 */
110 static int l2tp_ip_recv(struct sk_buff *skb)
111 {
112 struct net *net = dev_net(skb->dev);
113 struct sock *sk;
114 u32 session_id;
115 u32 tunnel_id;
116 unsigned char *ptr, *optr;
117 struct l2tp_session *session;
118 struct l2tp_tunnel *tunnel = NULL;
119 int length;
120
121 if (!pskb_may_pull(skb, 4))
122 goto discard;
123
124 /* Point to L2TP header */
125 optr = ptr = skb->data;
126 session_id = ntohl(*((__be32 *) ptr));
127 ptr += 4;
128
129 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
130 * the session_id. If it is 0, the packet is a L2TP control
131 * frame and the session_id value can be discarded.
132 */
133 if (session_id == 0) {
134 __skb_pull(skb, 4);
135 goto pass_up;
136 }
137
138 /* Ok, this is a data packet. Lookup the session. */
139 session = l2tp_session_find(net, NULL, session_id);
140 if (session == NULL)
141 goto discard;
142
143 tunnel = session->tunnel;
144 if (tunnel == NULL)
145 goto discard;
146
147 /* Trace packet contents, if enabled */
148 if (tunnel->debug & L2TP_MSG_DATA) {
149 length = min(32u, skb->len);
150 if (!pskb_may_pull(skb, length))
151 goto discard;
152
153 /* Point to L2TP header */
154 optr = ptr = skb->data;
155 ptr += 4;
156 pr_debug("%s: ip recv\n", tunnel->name);
157 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
158 }
159
160 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
161
162 return 0;
163
164 pass_up:
165 /* Get the tunnel_id from the L2TP header */
166 if (!pskb_may_pull(skb, 12))
167 goto discard;
168
169 if ((skb->data[0] & 0xc0) != 0xc0)
170 goto discard;
171
172 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
173 tunnel = l2tp_tunnel_find(net, tunnel_id);
174 if (tunnel) {
175 sk = tunnel->sock;
176 sock_hold(sk);
177 } else {
178 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
179
180 read_lock_bh(&l2tp_ip_lock);
181 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
182 inet_iif(skb), tunnel_id);
183 if (!sk) {
184 read_unlock_bh(&l2tp_ip_lock);
185 goto discard;
186 }
187
188 sock_hold(sk);
189 read_unlock_bh(&l2tp_ip_lock);
190 }
191
192 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
193 goto discard_put;
194
195 nf_reset(skb);
196
197 return sk_receive_skb(sk, skb, 1);
198
199 discard_put:
200 sock_put(sk);
201
202 discard:
203 kfree_skb(skb);
204 return 0;
205 }
206
207 static int l2tp_ip_open(struct sock *sk)
208 {
209 /* Prevent autobind. We don't have ports. */
210 inet_sk(sk)->inet_num = IPPROTO_L2TP;
211
212 write_lock_bh(&l2tp_ip_lock);
213 sk_add_node(sk, &l2tp_ip_table);
214 write_unlock_bh(&l2tp_ip_lock);
215
216 return 0;
217 }
218
219 static void l2tp_ip_close(struct sock *sk, long timeout)
220 {
221 write_lock_bh(&l2tp_ip_lock);
222 hlist_del_init(&sk->sk_bind_node);
223 sk_del_node_init(sk);
224 write_unlock_bh(&l2tp_ip_lock);
225 sk_common_release(sk);
226 }
227
228 static void l2tp_ip_destroy_sock(struct sock *sk)
229 {
230 struct sk_buff *skb;
231 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
232
233 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
234 kfree_skb(skb);
235
236 if (tunnel) {
237 l2tp_tunnel_closeall(tunnel);
238 sock_put(sk);
239 }
240
241 sk_refcnt_debug_dec(sk);
242 }
243
244 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
245 {
246 struct inet_sock *inet = inet_sk(sk);
247 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
248 struct net *net = sock_net(sk);
249 int ret;
250 int chk_addr_ret;
251
252 if (addr_len < sizeof(struct sockaddr_l2tpip))
253 return -EINVAL;
254 if (addr->l2tp_family != AF_INET)
255 return -EINVAL;
256
257 lock_sock(sk);
258
259 ret = -EINVAL;
260 if (!sock_flag(sk, SOCK_ZAPPED))
261 goto out;
262
263 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
264 goto out;
265
266 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
267 ret = -EADDRNOTAVAIL;
268 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
269 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
270 goto out;
271
272 if (addr->l2tp_addr.s_addr)
273 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
274 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
275 inet->inet_saddr = 0; /* Use device */
276
277 write_lock_bh(&l2tp_ip_lock);
278 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
279 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
280 write_unlock_bh(&l2tp_ip_lock);
281 ret = -EADDRINUSE;
282 goto out;
283 }
284
285 sk_dst_reset(sk);
286 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
287
288 sk_add_bind_node(sk, &l2tp_ip_bind_table);
289 sk_del_node_init(sk);
290 write_unlock_bh(&l2tp_ip_lock);
291
292 ret = 0;
293 sock_reset_flag(sk, SOCK_ZAPPED);
294
295 out:
296 release_sock(sk);
297
298 return ret;
299 }
300
301 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
302 {
303 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
304 int rc;
305
306 if (addr_len < sizeof(*lsa))
307 return -EINVAL;
308
309 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
310 return -EINVAL;
311
312 lock_sock(sk);
313
314 /* Must bind first - autobinding does not work */
315 if (sock_flag(sk, SOCK_ZAPPED)) {
316 rc = -EINVAL;
317 goto out_sk;
318 }
319
320 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
321 if (rc < 0)
322 goto out_sk;
323
324 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
325
326 write_lock_bh(&l2tp_ip_lock);
327 hlist_del_init(&sk->sk_bind_node);
328 sk_add_bind_node(sk, &l2tp_ip_bind_table);
329 write_unlock_bh(&l2tp_ip_lock);
330
331 out_sk:
332 release_sock(sk);
333
334 return rc;
335 }
336
337 static int l2tp_ip_disconnect(struct sock *sk, int flags)
338 {
339 if (sock_flag(sk, SOCK_ZAPPED))
340 return 0;
341
342 return __udp_disconnect(sk, flags);
343 }
344
345 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
346 int *uaddr_len, int peer)
347 {
348 struct sock *sk = sock->sk;
349 struct inet_sock *inet = inet_sk(sk);
350 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
351 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
352
353 memset(lsa, 0, sizeof(*lsa));
354 lsa->l2tp_family = AF_INET;
355 if (peer) {
356 if (!inet->inet_dport)
357 return -ENOTCONN;
358 lsa->l2tp_conn_id = lsk->peer_conn_id;
359 lsa->l2tp_addr.s_addr = inet->inet_daddr;
360 } else {
361 __be32 addr = inet->inet_rcv_saddr;
362 if (!addr)
363 addr = inet->inet_saddr;
364 lsa->l2tp_conn_id = lsk->conn_id;
365 lsa->l2tp_addr.s_addr = addr;
366 }
367 *uaddr_len = sizeof(*lsa);
368 return 0;
369 }
370
371 static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
372 {
373 int rc;
374
375 /* Charge it to the socket, dropping if the queue is full. */
376 rc = sock_queue_rcv_skb(sk, skb);
377 if (rc < 0)
378 goto drop;
379
380 return 0;
381
382 drop:
383 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
384 kfree_skb(skb);
385 return 0;
386 }
387
388 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
389 * control frames.
390 */
391 static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
392 {
393 struct sk_buff *skb;
394 int rc;
395 struct inet_sock *inet = inet_sk(sk);
396 struct rtable *rt = NULL;
397 struct flowi4 *fl4;
398 int connected = 0;
399 __be32 daddr;
400
401 lock_sock(sk);
402
403 rc = -ENOTCONN;
404 if (sock_flag(sk, SOCK_DEAD))
405 goto out;
406
407 /* Get and verify the address. */
408 if (msg->msg_name) {
409 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
410 rc = -EINVAL;
411 if (msg->msg_namelen < sizeof(*lip))
412 goto out;
413
414 if (lip->l2tp_family != AF_INET) {
415 rc = -EAFNOSUPPORT;
416 if (lip->l2tp_family != AF_UNSPEC)
417 goto out;
418 }
419
420 daddr = lip->l2tp_addr.s_addr;
421 } else {
422 rc = -EDESTADDRREQ;
423 if (sk->sk_state != TCP_ESTABLISHED)
424 goto out;
425
426 daddr = inet->inet_daddr;
427 connected = 1;
428 }
429
430 /* Allocate a socket buffer */
431 rc = -ENOMEM;
432 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
433 4 + len, 0, GFP_KERNEL);
434 if (!skb)
435 goto error;
436
437 /* Reserve space for headers, putting IP header on 4-byte boundary. */
438 skb_reserve(skb, 2 + NET_SKB_PAD);
439 skb_reset_network_header(skb);
440 skb_reserve(skb, sizeof(struct iphdr));
441 skb_reset_transport_header(skb);
442
443 /* Insert 0 session_id */
444 *((__be32 *) skb_put(skb, 4)) = 0;
445
446 /* Copy user data into skb */
447 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
448 if (rc < 0) {
449 kfree_skb(skb);
450 goto error;
451 }
452
453 fl4 = &inet->cork.fl.u.ip4;
454 if (connected)
455 rt = (struct rtable *) __sk_dst_check(sk, 0);
456
457 rcu_read_lock();
458 if (rt == NULL) {
459 const struct ip_options_rcu *inet_opt;
460
461 inet_opt = rcu_dereference(inet->inet_opt);
462
463 /* Use correct destination address if we have options. */
464 if (inet_opt && inet_opt->opt.srr)
465 daddr = inet_opt->opt.faddr;
466
467 /* If this fails, retransmit mechanism of transport layer will
468 * keep trying until route appears or the connection times
469 * itself out.
470 */
471 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
472 daddr, inet->inet_saddr,
473 inet->inet_dport, inet->inet_sport,
474 sk->sk_protocol, RT_CONN_FLAGS(sk),
475 sk->sk_bound_dev_if);
476 if (IS_ERR(rt))
477 goto no_route;
478 if (connected) {
479 sk_setup_caps(sk, &rt->dst);
480 } else {
481 skb_dst_set(skb, &rt->dst);
482 goto xmit;
483 }
484 }
485
486 /* We dont need to clone dst here, it is guaranteed to not disappear.
487 * __dev_xmit_skb() might force a refcount if needed.
488 */
489 skb_dst_set_noref(skb, &rt->dst);
490
491 xmit:
492 /* Queue the packet to IP for output */
493 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
494 rcu_read_unlock();
495
496 error:
497 if (rc >= 0)
498 rc = len;
499
500 out:
501 release_sock(sk);
502 return rc;
503
504 no_route:
505 rcu_read_unlock();
506 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
507 kfree_skb(skb);
508 rc = -EHOSTUNREACH;
509 goto out;
510 }
511
512 static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
513 size_t len, int noblock, int flags, int *addr_len)
514 {
515 struct inet_sock *inet = inet_sk(sk);
516 size_t copied = 0;
517 int err = -EOPNOTSUPP;
518 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
519 struct sk_buff *skb;
520
521 if (flags & MSG_OOB)
522 goto out;
523
524 skb = skb_recv_datagram(sk, flags, noblock, &err);
525 if (!skb)
526 goto out;
527
528 copied = skb->len;
529 if (len < copied) {
530 msg->msg_flags |= MSG_TRUNC;
531 copied = len;
532 }
533
534 err = skb_copy_datagram_msg(skb, 0, msg, copied);
535 if (err)
536 goto done;
537
538 sock_recv_timestamp(msg, sk, skb);
539
540 /* Copy the address. */
541 if (sin) {
542 sin->sin_family = AF_INET;
543 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
544 sin->sin_port = 0;
545 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
546 *addr_len = sizeof(*sin);
547 }
548 if (inet->cmsg_flags)
549 ip_cmsg_recv(msg, skb);
550 if (flags & MSG_TRUNC)
551 copied = skb->len;
552 done:
553 skb_free_datagram(sk, skb);
554 out:
555 return err ? err : copied;
556 }
557
558 int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
559 {
560 struct sk_buff *skb;
561 int amount;
562
563 switch (cmd) {
564 case SIOCOUTQ:
565 amount = sk_wmem_alloc_get(sk);
566 break;
567 case SIOCINQ:
568 spin_lock_bh(&sk->sk_receive_queue.lock);
569 skb = skb_peek(&sk->sk_receive_queue);
570 amount = skb ? skb->len : 0;
571 spin_unlock_bh(&sk->sk_receive_queue.lock);
572 break;
573
574 default:
575 return -ENOIOCTLCMD;
576 }
577
578 return put_user(amount, (int __user *)arg);
579 }
580 EXPORT_SYMBOL(l2tp_ioctl);
581
582 static struct proto l2tp_ip_prot = {
583 .name = "L2TP/IP",
584 .owner = THIS_MODULE,
585 .init = l2tp_ip_open,
586 .close = l2tp_ip_close,
587 .bind = l2tp_ip_bind,
588 .connect = l2tp_ip_connect,
589 .disconnect = l2tp_ip_disconnect,
590 .ioctl = l2tp_ioctl,
591 .destroy = l2tp_ip_destroy_sock,
592 .setsockopt = ip_setsockopt,
593 .getsockopt = ip_getsockopt,
594 .sendmsg = l2tp_ip_sendmsg,
595 .recvmsg = l2tp_ip_recvmsg,
596 .backlog_rcv = l2tp_ip_backlog_recv,
597 .hash = inet_hash,
598 .unhash = inet_unhash,
599 .obj_size = sizeof(struct l2tp_ip_sock),
600 #ifdef CONFIG_COMPAT
601 .compat_setsockopt = compat_ip_setsockopt,
602 .compat_getsockopt = compat_ip_getsockopt,
603 #endif
604 };
605
606 static const struct proto_ops l2tp_ip_ops = {
607 .family = PF_INET,
608 .owner = THIS_MODULE,
609 .release = inet_release,
610 .bind = inet_bind,
611 .connect = inet_dgram_connect,
612 .socketpair = sock_no_socketpair,
613 .accept = sock_no_accept,
614 .getname = l2tp_ip_getname,
615 .poll = datagram_poll,
616 .ioctl = inet_ioctl,
617 .listen = sock_no_listen,
618 .shutdown = inet_shutdown,
619 .setsockopt = sock_common_setsockopt,
620 .getsockopt = sock_common_getsockopt,
621 .sendmsg = inet_sendmsg,
622 .recvmsg = sock_common_recvmsg,
623 .mmap = sock_no_mmap,
624 .sendpage = sock_no_sendpage,
625 #ifdef CONFIG_COMPAT
626 .compat_setsockopt = compat_sock_common_setsockopt,
627 .compat_getsockopt = compat_sock_common_getsockopt,
628 #endif
629 };
630
631 static struct inet_protosw l2tp_ip_protosw = {
632 .type = SOCK_DGRAM,
633 .protocol = IPPROTO_L2TP,
634 .prot = &l2tp_ip_prot,
635 .ops = &l2tp_ip_ops,
636 };
637
638 static struct net_protocol l2tp_ip_protocol __read_mostly = {
639 .handler = l2tp_ip_recv,
640 .netns_ok = 1,
641 };
642
643 static int __init l2tp_ip_init(void)
644 {
645 int err;
646
647 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
648
649 err = proto_register(&l2tp_ip_prot, 1);
650 if (err != 0)
651 goto out;
652
653 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
654 if (err)
655 goto out1;
656
657 inet_register_protosw(&l2tp_ip_protosw);
658 return 0;
659
660 out1:
661 proto_unregister(&l2tp_ip_prot);
662 out:
663 return err;
664 }
665
666 static void __exit l2tp_ip_exit(void)
667 {
668 inet_unregister_protosw(&l2tp_ip_protosw);
669 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
670 proto_unregister(&l2tp_ip_prot);
671 }
672
673 module_init(l2tp_ip_init);
674 module_exit(l2tp_ip_exit);
675
676 MODULE_LICENSE("GPL");
677 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
678 MODULE_DESCRIPTION("L2TP over IP");
679 MODULE_VERSION("1.0");
680
681 /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
682 * enums
683 */
684 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
685 MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);