]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ppp/pptp.c
Merge branch 'stable/for-jens-4.13' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ppp / pptp.c
1 /*
2 * Point-to-Point Tunneling Protocol for Linux
3 *
4 * Authors: Dmitry Kozlov <xeb@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #include <linux/string.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/net.h>
20 #include <linux/skbuff.h>
21 #include <linux/vmalloc.h>
22 #include <linux/init.h>
23 #include <linux/ppp_channel.h>
24 #include <linux/ppp_defs.h>
25 #include <linux/if_pppox.h>
26 #include <linux/ppp-ioctl.h>
27 #include <linux/notifier.h>
28 #include <linux/file.h>
29 #include <linux/in.h>
30 #include <linux/ip.h>
31 #include <linux/rcupdate.h>
32 #include <linux/spinlock.h>
33
34 #include <net/sock.h>
35 #include <net/protocol.h>
36 #include <net/ip.h>
37 #include <net/icmp.h>
38 #include <net/route.h>
39 #include <net/gre.h>
40 #include <net/pptp.h>
41
42 #include <linux/uaccess.h>
43
44 #define PPTP_DRIVER_VERSION "0.8.5"
45
46 #define MAX_CALLID 65535
47
48 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
49 static struct pppox_sock __rcu **callid_sock;
50
51 static DEFINE_SPINLOCK(chan_lock);
52
53 static struct proto pptp_sk_proto __read_mostly;
54 static const struct ppp_channel_ops pptp_chan_ops;
55 static const struct proto_ops pptp_ops;
56
57 static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
58 {
59 struct pppox_sock *sock;
60 struct pptp_opt *opt;
61
62 rcu_read_lock();
63 sock = rcu_dereference(callid_sock[call_id]);
64 if (sock) {
65 opt = &sock->proto.pptp;
66 if (opt->dst_addr.sin_addr.s_addr != s_addr)
67 sock = NULL;
68 else
69 sock_hold(sk_pppox(sock));
70 }
71 rcu_read_unlock();
72
73 return sock;
74 }
75
76 static int lookup_chan_dst(u16 call_id, __be32 d_addr)
77 {
78 struct pppox_sock *sock;
79 struct pptp_opt *opt;
80 int i;
81
82 rcu_read_lock();
83 i = 1;
84 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
85 sock = rcu_dereference(callid_sock[i]);
86 if (!sock)
87 continue;
88 opt = &sock->proto.pptp;
89 if (opt->dst_addr.call_id == call_id &&
90 opt->dst_addr.sin_addr.s_addr == d_addr)
91 break;
92 }
93 rcu_read_unlock();
94
95 return i < MAX_CALLID;
96 }
97
98 static int add_chan(struct pppox_sock *sock,
99 struct pptp_addr *sa)
100 {
101 static int call_id;
102
103 spin_lock(&chan_lock);
104 if (!sa->call_id) {
105 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
106 if (call_id == MAX_CALLID) {
107 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
108 if (call_id == MAX_CALLID)
109 goto out_err;
110 }
111 sa->call_id = call_id;
112 } else if (test_bit(sa->call_id, callid_bitmap)) {
113 goto out_err;
114 }
115
116 sock->proto.pptp.src_addr = *sa;
117 set_bit(sa->call_id, callid_bitmap);
118 rcu_assign_pointer(callid_sock[sa->call_id], sock);
119 spin_unlock(&chan_lock);
120
121 return 0;
122
123 out_err:
124 spin_unlock(&chan_lock);
125 return -1;
126 }
127
128 static void del_chan(struct pppox_sock *sock)
129 {
130 spin_lock(&chan_lock);
131 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
132 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
133 spin_unlock(&chan_lock);
134 }
135
136 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
137 {
138 struct sock *sk = (struct sock *) chan->private;
139 struct pppox_sock *po = pppox_sk(sk);
140 struct net *net = sock_net(sk);
141 struct pptp_opt *opt = &po->proto.pptp;
142 struct pptp_gre_header *hdr;
143 unsigned int header_len = sizeof(*hdr);
144 struct flowi4 fl4;
145 int islcp;
146 int len;
147 unsigned char *data;
148 __u32 seq_recv;
149
150
151 struct rtable *rt;
152 struct net_device *tdev;
153 struct iphdr *iph;
154 int max_headroom;
155
156 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
157 goto tx_error;
158
159 rt = ip_route_output_ports(net, &fl4, NULL,
160 opt->dst_addr.sin_addr.s_addr,
161 opt->src_addr.sin_addr.s_addr,
162 0, 0, IPPROTO_GRE,
163 RT_TOS(0), 0);
164 if (IS_ERR(rt))
165 goto tx_error;
166
167 tdev = rt->dst.dev;
168
169 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
170
171 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
172 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
173 if (!new_skb) {
174 ip_rt_put(rt);
175 goto tx_error;
176 }
177 if (skb->sk)
178 skb_set_owner_w(new_skb, skb->sk);
179 consume_skb(skb);
180 skb = new_skb;
181 }
182
183 data = skb->data;
184 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
185
186 /* compress protocol field */
187 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
188 skb_pull(skb, 1);
189
190 /* Put in the address/control bytes if necessary */
191 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
192 data = skb_push(skb, 2);
193 data[0] = PPP_ALLSTATIONS;
194 data[1] = PPP_UI;
195 }
196
197 len = skb->len;
198
199 seq_recv = opt->seq_recv;
200
201 if (opt->ack_sent == seq_recv)
202 header_len -= sizeof(hdr->ack);
203
204 /* Push down and install GRE header */
205 skb_push(skb, header_len);
206 hdr = (struct pptp_gre_header *)(skb->data);
207
208 hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ;
209 hdr->gre_hd.protocol = GRE_PROTO_PPP;
210 hdr->call_id = htons(opt->dst_addr.call_id);
211
212 hdr->seq = htonl(++opt->seq_sent);
213 if (opt->ack_sent != seq_recv) {
214 /* send ack with this message */
215 hdr->gre_hd.flags |= GRE_ACK;
216 hdr->ack = htonl(seq_recv);
217 opt->ack_sent = seq_recv;
218 }
219 hdr->payload_len = htons(len);
220
221 /* Push down and install the IP header. */
222
223 skb_reset_transport_header(skb);
224 skb_push(skb, sizeof(*iph));
225 skb_reset_network_header(skb);
226 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
227 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
228
229 iph = ip_hdr(skb);
230 iph->version = 4;
231 iph->ihl = sizeof(struct iphdr) >> 2;
232 if (ip_dont_fragment(sk, &rt->dst))
233 iph->frag_off = htons(IP_DF);
234 else
235 iph->frag_off = 0;
236 iph->protocol = IPPROTO_GRE;
237 iph->tos = 0;
238 iph->daddr = fl4.daddr;
239 iph->saddr = fl4.saddr;
240 iph->ttl = ip4_dst_hoplimit(&rt->dst);
241 iph->tot_len = htons(skb->len);
242
243 skb_dst_drop(skb);
244 skb_dst_set(skb, &rt->dst);
245
246 nf_reset(skb);
247
248 skb->ip_summed = CHECKSUM_NONE;
249 ip_select_ident(net, skb, NULL);
250 ip_send_check(iph);
251
252 ip_local_out(net, skb->sk, skb);
253 return 1;
254
255 tx_error:
256 kfree_skb(skb);
257 return 1;
258 }
259
260 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
261 {
262 struct pppox_sock *po = pppox_sk(sk);
263 struct pptp_opt *opt = &po->proto.pptp;
264 int headersize, payload_len, seq;
265 __u8 *payload;
266 struct pptp_gre_header *header;
267
268 if (!(sk->sk_state & PPPOX_CONNECTED)) {
269 if (sock_queue_rcv_skb(sk, skb))
270 goto drop;
271 return NET_RX_SUCCESS;
272 }
273
274 header = (struct pptp_gre_header *)(skb->data);
275 headersize = sizeof(*header);
276
277 /* test if acknowledgement present */
278 if (GRE_IS_ACK(header->gre_hd.flags)) {
279 __u32 ack;
280
281 if (!pskb_may_pull(skb, headersize))
282 goto drop;
283 header = (struct pptp_gre_header *)(skb->data);
284
285 /* ack in different place if S = 0 */
286 ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq;
287
288 ack = ntohl(ack);
289
290 if (ack > opt->ack_recv)
291 opt->ack_recv = ack;
292 /* also handle sequence number wrap-around */
293 if (WRAPPED(ack, opt->ack_recv))
294 opt->ack_recv = ack;
295 } else {
296 headersize -= sizeof(header->ack);
297 }
298 /* test if payload present */
299 if (!GRE_IS_SEQ(header->gre_hd.flags))
300 goto drop;
301
302 payload_len = ntohs(header->payload_len);
303 seq = ntohl(header->seq);
304
305 /* check for incomplete packet (length smaller than expected) */
306 if (!pskb_may_pull(skb, headersize + payload_len))
307 goto drop;
308
309 payload = skb->data + headersize;
310 /* check for expected sequence number */
311 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
312 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
313 (PPP_PROTOCOL(payload) == PPP_LCP) &&
314 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
315 goto allow_packet;
316 } else {
317 opt->seq_recv = seq;
318 allow_packet:
319 skb_pull(skb, headersize);
320
321 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
322 /* chop off address/control */
323 if (skb->len < 3)
324 goto drop;
325 skb_pull(skb, 2);
326 }
327
328 if ((*skb->data) & 1) {
329 /* protocol is compressed */
330 *(u8 *)skb_push(skb, 1) = 0;
331 }
332
333 skb->ip_summed = CHECKSUM_NONE;
334 skb_set_network_header(skb, skb->head-skb->data);
335 ppp_input(&po->chan, skb);
336
337 return NET_RX_SUCCESS;
338 }
339 drop:
340 kfree_skb(skb);
341 return NET_RX_DROP;
342 }
343
344 static int pptp_rcv(struct sk_buff *skb)
345 {
346 struct pppox_sock *po;
347 struct pptp_gre_header *header;
348 struct iphdr *iph;
349
350 if (skb->pkt_type != PACKET_HOST)
351 goto drop;
352
353 if (!pskb_may_pull(skb, 12))
354 goto drop;
355
356 iph = ip_hdr(skb);
357
358 header = (struct pptp_gre_header *)skb->data;
359
360 if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */
361 GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */
362 GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */
363 !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */
364 (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */
365 /* if invalid, discard this packet */
366 goto drop;
367
368 po = lookup_chan(htons(header->call_id), iph->saddr);
369 if (po) {
370 skb_dst_drop(skb);
371 nf_reset(skb);
372 return sk_receive_skb(sk_pppox(po), skb, 0);
373 }
374 drop:
375 kfree_skb(skb);
376 return NET_RX_DROP;
377 }
378
379 static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
380 int sockaddr_len)
381 {
382 struct sock *sk = sock->sk;
383 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
384 struct pppox_sock *po = pppox_sk(sk);
385 int error = 0;
386
387 if (sockaddr_len < sizeof(struct sockaddr_pppox))
388 return -EINVAL;
389
390 lock_sock(sk);
391
392 if (sk->sk_state & PPPOX_DEAD) {
393 error = -EALREADY;
394 goto out;
395 }
396
397 if (sk->sk_state & PPPOX_BOUND) {
398 error = -EBUSY;
399 goto out;
400 }
401
402 if (add_chan(po, &sp->sa_addr.pptp))
403 error = -EBUSY;
404 else
405 sk->sk_state |= PPPOX_BOUND;
406
407 out:
408 release_sock(sk);
409 return error;
410 }
411
412 static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
413 int sockaddr_len, int flags)
414 {
415 struct sock *sk = sock->sk;
416 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
417 struct pppox_sock *po = pppox_sk(sk);
418 struct pptp_opt *opt = &po->proto.pptp;
419 struct rtable *rt;
420 struct flowi4 fl4;
421 int error = 0;
422
423 if (sockaddr_len < sizeof(struct sockaddr_pppox))
424 return -EINVAL;
425
426 if (sp->sa_protocol != PX_PROTO_PPTP)
427 return -EINVAL;
428
429 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
430 return -EALREADY;
431
432 lock_sock(sk);
433 /* Check for already bound sockets */
434 if (sk->sk_state & PPPOX_CONNECTED) {
435 error = -EBUSY;
436 goto end;
437 }
438
439 /* Check for already disconnected sockets, on attempts to disconnect */
440 if (sk->sk_state & PPPOX_DEAD) {
441 error = -EALREADY;
442 goto end;
443 }
444
445 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
446 error = -EINVAL;
447 goto end;
448 }
449
450 po->chan.private = sk;
451 po->chan.ops = &pptp_chan_ops;
452
453 rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
454 opt->dst_addr.sin_addr.s_addr,
455 opt->src_addr.sin_addr.s_addr,
456 0, 0,
457 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
458 if (IS_ERR(rt)) {
459 error = -EHOSTUNREACH;
460 goto end;
461 }
462 sk_setup_caps(sk, &rt->dst);
463
464 po->chan.mtu = dst_mtu(&rt->dst);
465 if (!po->chan.mtu)
466 po->chan.mtu = PPP_MRU;
467 ip_rt_put(rt);
468 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
469
470 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
471 error = ppp_register_channel(&po->chan);
472 if (error) {
473 pr_err("PPTP: failed to register PPP channel (%d)\n", error);
474 goto end;
475 }
476
477 opt->dst_addr = sp->sa_addr.pptp;
478 sk->sk_state |= PPPOX_CONNECTED;
479
480 end:
481 release_sock(sk);
482 return error;
483 }
484
485 static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
486 int *usockaddr_len, int peer)
487 {
488 int len = sizeof(struct sockaddr_pppox);
489 struct sockaddr_pppox sp;
490
491 memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
492
493 sp.sa_family = AF_PPPOX;
494 sp.sa_protocol = PX_PROTO_PPTP;
495 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
496
497 memcpy(uaddr, &sp, len);
498
499 *usockaddr_len = len;
500
501 return 0;
502 }
503
504 static int pptp_release(struct socket *sock)
505 {
506 struct sock *sk = sock->sk;
507 struct pppox_sock *po;
508 int error = 0;
509
510 if (!sk)
511 return 0;
512
513 lock_sock(sk);
514
515 if (sock_flag(sk, SOCK_DEAD)) {
516 release_sock(sk);
517 return -EBADF;
518 }
519
520 po = pppox_sk(sk);
521 del_chan(po);
522 synchronize_rcu();
523
524 pppox_unbind_sock(sk);
525 sk->sk_state = PPPOX_DEAD;
526
527 sock_orphan(sk);
528 sock->sk = NULL;
529
530 release_sock(sk);
531 sock_put(sk);
532
533 return error;
534 }
535
536 static void pptp_sock_destruct(struct sock *sk)
537 {
538 if (!(sk->sk_state & PPPOX_DEAD)) {
539 del_chan(pppox_sk(sk));
540 pppox_unbind_sock(sk);
541 }
542 skb_queue_purge(&sk->sk_receive_queue);
543 }
544
545 static int pptp_create(struct net *net, struct socket *sock, int kern)
546 {
547 int error = -ENOMEM;
548 struct sock *sk;
549 struct pppox_sock *po;
550 struct pptp_opt *opt;
551
552 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
553 if (!sk)
554 goto out;
555
556 sock_init_data(sock, sk);
557
558 sock->state = SS_UNCONNECTED;
559 sock->ops = &pptp_ops;
560
561 sk->sk_backlog_rcv = pptp_rcv_core;
562 sk->sk_state = PPPOX_NONE;
563 sk->sk_type = SOCK_STREAM;
564 sk->sk_family = PF_PPPOX;
565 sk->sk_protocol = PX_PROTO_PPTP;
566 sk->sk_destruct = pptp_sock_destruct;
567
568 po = pppox_sk(sk);
569 opt = &po->proto.pptp;
570
571 opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
572 opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
573
574 error = 0;
575 out:
576 return error;
577 }
578
579 static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
580 unsigned long arg)
581 {
582 struct sock *sk = (struct sock *) chan->private;
583 struct pppox_sock *po = pppox_sk(sk);
584 struct pptp_opt *opt = &po->proto.pptp;
585 void __user *argp = (void __user *)arg;
586 int __user *p = argp;
587 int err, val;
588
589 err = -EFAULT;
590 switch (cmd) {
591 case PPPIOCGFLAGS:
592 val = opt->ppp_flags;
593 if (put_user(val, p))
594 break;
595 err = 0;
596 break;
597 case PPPIOCSFLAGS:
598 if (get_user(val, p))
599 break;
600 opt->ppp_flags = val & ~SC_RCV_BITS;
601 err = 0;
602 break;
603 default:
604 err = -ENOTTY;
605 }
606
607 return err;
608 }
609
610 static const struct ppp_channel_ops pptp_chan_ops = {
611 .start_xmit = pptp_xmit,
612 .ioctl = pptp_ppp_ioctl,
613 };
614
615 static struct proto pptp_sk_proto __read_mostly = {
616 .name = "PPTP",
617 .owner = THIS_MODULE,
618 .obj_size = sizeof(struct pppox_sock),
619 };
620
621 static const struct proto_ops pptp_ops = {
622 .family = AF_PPPOX,
623 .owner = THIS_MODULE,
624 .release = pptp_release,
625 .bind = pptp_bind,
626 .connect = pptp_connect,
627 .socketpair = sock_no_socketpair,
628 .accept = sock_no_accept,
629 .getname = pptp_getname,
630 .poll = sock_no_poll,
631 .listen = sock_no_listen,
632 .shutdown = sock_no_shutdown,
633 .setsockopt = sock_no_setsockopt,
634 .getsockopt = sock_no_getsockopt,
635 .sendmsg = sock_no_sendmsg,
636 .recvmsg = sock_no_recvmsg,
637 .mmap = sock_no_mmap,
638 .ioctl = pppox_ioctl,
639 };
640
641 static const struct pppox_proto pppox_pptp_proto = {
642 .create = pptp_create,
643 .owner = THIS_MODULE,
644 };
645
646 static const struct gre_protocol gre_pptp_protocol = {
647 .handler = pptp_rcv,
648 };
649
650 static int __init pptp_init_module(void)
651 {
652 int err = 0;
653 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
654
655 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
656 if (!callid_sock)
657 return -ENOMEM;
658
659 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
660 if (err) {
661 pr_err("PPTP: can't add gre protocol\n");
662 goto out_mem_free;
663 }
664
665 err = proto_register(&pptp_sk_proto, 0);
666 if (err) {
667 pr_err("PPTP: can't register sk_proto\n");
668 goto out_gre_del_protocol;
669 }
670
671 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
672 if (err) {
673 pr_err("PPTP: can't register pppox_proto\n");
674 goto out_unregister_sk_proto;
675 }
676
677 return 0;
678
679 out_unregister_sk_proto:
680 proto_unregister(&pptp_sk_proto);
681 out_gre_del_protocol:
682 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
683 out_mem_free:
684 vfree(callid_sock);
685
686 return err;
687 }
688
689 static void __exit pptp_exit_module(void)
690 {
691 unregister_pppox_proto(PX_PROTO_PPTP);
692 proto_unregister(&pptp_sk_proto);
693 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
694 vfree(callid_sock);
695 }
696
697 module_init(pptp_init_module);
698 module_exit(pptp_exit_module);
699
700 MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
701 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
702 MODULE_LICENSE("GPL");
703 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP);