]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/atm/pppoatm.c
Merge branch 'x86-acpi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / net / atm / pppoatm.c
CommitLineData
1da177e4
LT
1/* net/atm/pppoatm.c - RFC2364 PPP over ATM/AAL5 */
2
3/* Copyright 1999-2000 by Mitchell Blank Jr */
4/* Based on clip.c; 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
5/* And on ppp_async.c; Copyright 1999 Paul Mackerras */
6/* And help from Jens Axboe */
7
8/*
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * This driver provides the encapsulation and framing for sending
15 * and receiving PPP frames in ATM AAL5 PDUs.
16 */
17
18/*
19 * One shortcoming of this driver is that it does not comply with
20 * section 8 of RFC2364 - we are supposed to detect a change
21 * in encapsulation and immediately abort the connection (in order
22 * to avoid a black-hole being created if our peer loses state
23 * and changes encapsulation unilaterally. However, since the
24 * ppp_generic layer actually does the decapsulation, we need
25 * a way of notifying it when we _think_ there might be a problem)
26 * There's two cases:
27 * 1. LLC-encapsulation was missing when it was enabled. In
28 * this case, we should tell the upper layer "tear down
29 * this session if this skb looks ok to you"
30 * 2. LLC-encapsulation was present when it was disabled. Then
31 * we need to tell the upper layer "this packet may be
32 * ok, but if its in error tear down the session"
33 * These hooks are not yet available in ppp_generic
34 */
35
99824461
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
37
1da177e4 38#include <linux/module.h>
1da177e4 39#include <linux/init.h>
a6b7a407 40#include <linux/interrupt.h>
1da177e4 41#include <linux/skbuff.h>
5a0e3ad6 42#include <linux/slab.h>
1da177e4
LT
43#include <linux/atm.h>
44#include <linux/atmdev.h>
4fc268d2 45#include <linux/capability.h>
1da177e4 46#include <linux/ppp_defs.h>
4b32da2b 47#include <linux/ppp-ioctl.h>
1da177e4
LT
48#include <linux/ppp_channel.h>
49#include <linux/atmppp.h>
50
51#include "common.h"
52
1da177e4
LT
53enum pppoatm_encaps {
54 e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
55 e_vc = PPPOATM_ENCAPS_VC,
56 e_llc = PPPOATM_ENCAPS_LLC,
57};
58
59struct pppoatm_vcc {
60 struct atm_vcc *atmvcc; /* VCC descriptor */
61 void (*old_push)(struct atm_vcc *, struct sk_buff *);
62 void (*old_pop)(struct atm_vcc *, struct sk_buff *);
0e56d99a 63 void (*old_release_cb)(struct atm_vcc *);
e41faed9 64 struct module *old_owner;
1da177e4
LT
65 /* keep old push/pop for detaching */
66 enum pppoatm_encaps encaps;
9d02daf7
DW
67 atomic_t inflight;
68 unsigned long blocked;
1da177e4
LT
69 int flags; /* SC_COMP_PROT - compress protocol */
70 struct ppp_channel chan; /* interface to generic ppp layer */
71 struct tasklet_struct wakeup_tasklet;
72};
73
9d02daf7
DW
74/*
75 * We want to allow two packets in the queue. The one that's currently in
76 * flight, and *one* queued up ready for the ATM device to send immediately
77 * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
78 * inflight == -2 represents an empty queue, -1 one packet, and zero means
79 * there are two packets in the queue.
80 */
81#define NONE_INFLIGHT -2
82
83#define BLOCKED 0
84
1da177e4
LT
85/*
86 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
87 * ID (0xC021) used in autodetection
88 */
89static const unsigned char pppllc[6] = { 0xFE, 0xFE, 0x03, 0xCF, 0xC0, 0x21 };
90#define LLC_LEN (4)
91
92static inline struct pppoatm_vcc *atmvcc_to_pvcc(const struct atm_vcc *atmvcc)
93{
94 return (struct pppoatm_vcc *) (atmvcc->user_back);
95}
96
97static inline struct pppoatm_vcc *chan_to_pvcc(const struct ppp_channel *chan)
98{
99 return (struct pppoatm_vcc *) (chan->private);
100}
101
102/*
103 * We can't do this directly from our _pop handler, since the ppp code
104 * doesn't want to be called in interrupt context, so we do it from
105 * a tasklet
106 */
107static void pppoatm_wakeup_sender(unsigned long arg)
108{
109 ppp_output_wakeup((struct ppp_channel *) arg);
110}
111
0e56d99a
DW
112static void pppoatm_release_cb(struct atm_vcc *atmvcc)
113{
114 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
115
5b4d7208
DW
116 /*
117 * As in pppoatm_pop(), it's safe to clear the BLOCKED bit here because
118 * the wakeup *can't* race with pppoatm_send(). They both hold the PPP
119 * channel's ->downl lock. And the potential race with *setting* it,
120 * which leads to the double-check dance in pppoatm_may_send(), doesn't
121 * exist here. In the sock_owned_by_user() case in pppoatm_send(), we
122 * set the BLOCKED bit while the socket is still locked. We know that
123 * ->release_cb() can't be called until that's done.
124 */
125 if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
126 tasklet_schedule(&pvcc->wakeup_tasklet);
0e56d99a
DW
127 if (pvcc->old_release_cb)
128 pvcc->old_release_cb(atmvcc);
129}
1da177e4
LT
130/*
131 * This gets called every time the ATM card has finished sending our
132 * skb. The ->old_pop will take care up normal atm flow control,
133 * but we also need to wake up the device if we blocked it
134 */
135static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
136{
137 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
9d02daf7 138
1da177e4 139 pvcc->old_pop(atmvcc, skb);
9d02daf7
DW
140 atomic_dec(&pvcc->inflight);
141
1da177e4 142 /*
9d02daf7
DW
143 * We always used to run the wakeup tasklet unconditionally here, for
144 * fear of race conditions where we clear the BLOCKED flag just as we
145 * refuse another packet in pppoatm_send(). This was quite inefficient.
146 *
147 * In fact it's OK. The PPP core will only ever call pppoatm_send()
148 * while holding the channel->downl lock. And ppp_output_wakeup() as
149 * called by the tasklet will *also* grab that lock. So even if another
150 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
151 * with it. The wakeup *will* happen after the other CPU is safely out
152 * of pppoatm_send() again.
153 *
154 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
155 * it about to return, that's fine. We trigger a wakeup which will
156 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
157 * BLOCKED bit yet, that's fine too because of the double check in
158 * pppoatm_may_send() which is commented there.
1da177e4 159 */
9d02daf7
DW
160 if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
161 tasklet_schedule(&pvcc->wakeup_tasklet);
1da177e4
LT
162}
163
164/*
165 * Unbind from PPP - currently we only do this when closing the socket,
166 * but we could put this into an ioctl if need be
167 */
168static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
169{
170 struct pppoatm_vcc *pvcc;
171 pvcc = atmvcc_to_pvcc(atmvcc);
172 atmvcc->push = pvcc->old_push;
173 atmvcc->pop = pvcc->old_pop;
0e56d99a 174 atmvcc->release_cb = pvcc->old_release_cb;
1da177e4
LT
175 tasklet_kill(&pvcc->wakeup_tasklet);
176 ppp_unregister_channel(&pvcc->chan);
177 atmvcc->user_back = NULL;
178 kfree(pvcc);
1da177e4
LT
179}
180
181/* Called when an AAL5 PDU comes in */
182static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
183{
184 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
99824461 185 pr_debug("\n");
1da177e4 186 if (skb == NULL) { /* VCC was closed */
e41faed9
KM
187 struct module *module;
188
52240062 189 pr_debug("removing ATMPPP VCC %p\n", pvcc);
e41faed9 190 module = pvcc->old_owner;
1da177e4
LT
191 pppoatm_unassign_vcc(atmvcc);
192 atmvcc->push(atmvcc, NULL); /* Pass along bad news */
e41faed9 193 module_put(module);
1da177e4
LT
194 return;
195 }
196 atm_return(atmvcc, skb->truesize);
197 switch (pvcc->encaps) {
198 case e_llc:
199 if (skb->len < LLC_LEN ||
200 memcmp(skb->data, pppllc, LLC_LEN))
201 goto error;
202 skb_pull(skb, LLC_LEN);
203 break;
204 case e_autodetect:
205 if (pvcc->chan.ppp == NULL) { /* Not bound yet! */
206 kfree_skb(skb);
207 return;
208 }
209 if (skb->len >= sizeof(pppllc) &&
210 !memcmp(skb->data, pppllc, sizeof(pppllc))) {
211 pvcc->encaps = e_llc;
212 skb_pull(skb, LLC_LEN);
213 break;
214 }
215 if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
216 !memcmp(skb->data, &pppllc[LLC_LEN],
217 sizeof(pppllc) - LLC_LEN)) {
218 pvcc->encaps = e_vc;
219 pvcc->chan.mtu += LLC_LEN;
220 break;
221 }
99824461
JP
222 pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
223 skb->data[0], skb->data[1], skb->data[2],
224 skb->data[3], skb->data[4], skb->data[5]);
1da177e4
LT
225 goto error;
226 case e_vc:
227 break;
228 }
229 ppp_input(&pvcc->chan, skb);
230 return;
d81219db
JP
231
232error:
1da177e4
LT
233 kfree_skb(skb);
234 ppp_input_error(&pvcc->chan, 0);
235}
236
397ff16d 237static int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
9d02daf7
DW
238{
239 /*
240 * It's not clear that we need to bother with using atm_may_send()
241 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
242 * value of sk_sndbuf which is lower than the MTU, we're going to
243 * block for ever. But the code always did that before we introduced
244 * the packet count limit, so...
245 */
246 if (atm_may_send(pvcc->atmvcc, size) &&
247 atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
248 return 1;
249
250 /*
251 * We use test_and_set_bit() rather than set_bit() here because
252 * we need to ensure there's a memory barrier after it. The bit
253 * *must* be set before we do the atomic_inc() on pvcc->inflight.
254 * There's no smp_mb__after_set_bit(), so it's this or abuse
4e857c58 255 * smp_mb__after_atomic().
9d02daf7
DW
256 */
257 test_and_set_bit(BLOCKED, &pvcc->blocked);
258
259 /*
260 * We may have raced with pppoatm_pop(). If it ran for the
261 * last packet in the queue, *just* before we set the BLOCKED
262 * bit, then it might never run again and the channel could
263 * remain permanently blocked. Cope with that race by checking
264 * *again*. If it did run in that window, we'll have space on
265 * the queue now and can return success. It's harmless to leave
266 * the BLOCKED flag set, since it's only used as a trigger to
267 * run the wakeup tasklet. Another wakeup will never hurt.
268 * If pppoatm_pop() is running but hasn't got as far as making
269 * space on the queue yet, then it hasn't checked the BLOCKED
270 * flag yet either, so we're safe in that case too. It'll issue
271 * an "immediate" wakeup... where "immediate" actually involves
272 * taking the PPP channel's ->downl lock, which is held by the
273 * code path that calls pppoatm_send(), and is thus going to
274 * wait for us to finish.
275 */
276 if (atm_may_send(pvcc->atmvcc, size) &&
277 atomic_inc_not_zero(&pvcc->inflight))
278 return 1;
279
280 return 0;
281}
1da177e4
LT
282/*
283 * Called by the ppp_generic.c to send a packet - returns true if packet
284 * was accepted. If we return false, then it's our job to call
285 * ppp_output_wakeup(chan) when we're feeling more up to it.
286 * Note that in the ENOMEM case (as opposed to the !atm_may_send case)
287 * we should really drop the packet, but the generic layer doesn't
288 * support this yet. We just return 'DROP_PACKET' which we actually define
289 * as success, just to be clear what we're really doing.
290 */
291#define DROP_PACKET 1
292static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
293{
294 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
3ac10800
KM
295 struct atm_vcc *vcc;
296 int ret;
297
1da177e4 298 ATM_SKB(skb)->vcc = pvcc->atmvcc;
99824461 299 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
1da177e4
LT
300 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
301 (void) skb_pull(skb, 1);
3ac10800
KM
302
303 vcc = ATM_SKB(skb)->vcc;
304 bh_lock_sock(sk_atm(vcc));
5b4d7208
DW
305 if (sock_owned_by_user(sk_atm(vcc))) {
306 /*
307 * Needs to happen (and be flushed, hence test_and_) before we unlock
308 * the socket. It needs to be seen by the time our ->release_cb gets
309 * called.
310 */
311 test_and_set_bit(BLOCKED, &pvcc->blocked);
3ac10800 312 goto nospace;
5b4d7208 313 }
071d9393
KM
314 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
315 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
316 !test_bit(ATM_VF_READY, &vcc->flags)) {
317 bh_unlock_sock(sk_atm(vcc));
318 kfree_skb(skb);
319 return DROP_PACKET;
320 }
3ac10800 321
1da177e4
LT
322 switch (pvcc->encaps) { /* LLC encapsulation needed */
323 case e_llc:
324 if (skb_headroom(skb) < LLC_LEN) {
325 struct sk_buff *n;
326 n = skb_realloc_headroom(skb, LLC_LEN);
327 if (n != NULL &&
9d02daf7 328 !pppoatm_may_send(pvcc, n->truesize)) {
1da177e4
LT
329 kfree_skb(n);
330 goto nospace;
331 }
5d0ba55b 332 consume_skb(skb);
d81219db 333 skb = n;
3ac10800
KM
334 if (skb == NULL) {
335 bh_unlock_sock(sk_atm(vcc));
1da177e4 336 return DROP_PACKET;
3ac10800 337 }
9d02daf7 338 } else if (!pppoatm_may_send(pvcc, skb->truesize))
1da177e4
LT
339 goto nospace;
340 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
341 break;
342 case e_vc:
9d02daf7 343 if (!pppoatm_may_send(pvcc, skb->truesize))
1da177e4
LT
344 goto nospace;
345 break;
346 case e_autodetect:
3ac10800 347 bh_unlock_sock(sk_atm(vcc));
52240062 348 pr_debug("Trying to send without setting encaps!\n");
1da177e4
LT
349 kfree_skb(skb);
350 return 1;
351 }
352
353 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
354 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
99824461
JP
355 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
356 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
3ac10800 357 ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
1da177e4 358 ? DROP_PACKET : 1;
3ac10800
KM
359 bh_unlock_sock(sk_atm(vcc));
360 return ret;
d81219db 361nospace:
3ac10800 362 bh_unlock_sock(sk_atm(vcc));
1da177e4
LT
363 /*
364 * We don't have space to send this SKB now, but we might have
365 * already applied SC_COMP_PROT compression, so may need to undo
366 */
367 if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
368 skb->data[-1] == '\0')
369 (void) skb_push(skb, 1);
370 return 0;
371}
372
373/* This handles ioctls sent to the /dev/ppp interface */
374static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
375 unsigned long arg)
376{
377 switch (cmd) {
378 case PPPIOCGFLAGS:
379 return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
380 ? -EFAULT : 0;
381 case PPPIOCSFLAGS:
382 return get_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
383 ? -EFAULT : 0;
384 }
385 return -ENOTTY;
386}
387
d7100da0 388static const struct ppp_channel_ops pppoatm_ops = {
1da177e4
LT
389 .start_xmit = pppoatm_send,
390 .ioctl = pppoatm_devppp_ioctl,
391};
392
393static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
394{
395 struct atm_backend_ppp be;
396 struct pppoatm_vcc *pvcc;
397 int err;
398 /*
399 * Each PPPoATM instance has its own tasklet - this is just a
400 * prototypical one used to initialize them
401 */
402 static const DECLARE_TASKLET(tasklet_proto, pppoatm_wakeup_sender, 0);
403 if (copy_from_user(&be, arg, sizeof be))
404 return -EFAULT;
405 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
406 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
407 return -EINVAL;
0da974f4 408 pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
1da177e4
LT
409 if (pvcc == NULL)
410 return -ENOMEM;
1da177e4 411 pvcc->atmvcc = atmvcc;
9d02daf7
DW
412
413 /* Maximum is zero, so that we can use atomic_inc_not_zero() */
414 atomic_set(&pvcc->inflight, NONE_INFLIGHT);
1da177e4
LT
415 pvcc->old_push = atmvcc->push;
416 pvcc->old_pop = atmvcc->pop;
e41faed9 417 pvcc->old_owner = atmvcc->owner;
0e56d99a 418 pvcc->old_release_cb = atmvcc->release_cb;
1da177e4
LT
419 pvcc->encaps = (enum pppoatm_encaps) be.encaps;
420 pvcc->chan.private = pvcc;
421 pvcc->chan.ops = &pppoatm_ops;
422 pvcc->chan.mtu = atmvcc->qos.txtp.max_sdu - PPP_HDRLEN -
423 (be.encaps == e_vc ? 0 : LLC_LEN);
424 pvcc->wakeup_tasklet = tasklet_proto;
425 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
d81219db
JP
426 err = ppp_register_channel(&pvcc->chan);
427 if (err != 0) {
1da177e4
LT
428 kfree(pvcc);
429 return err;
430 }
431 atmvcc->user_back = pvcc;
432 atmvcc->push = pppoatm_push;
433 atmvcc->pop = pppoatm_pop;
0e56d99a 434 atmvcc->release_cb = pppoatm_release_cb;
1da177e4 435 __module_get(THIS_MODULE);
e41faed9 436 atmvcc->owner = THIS_MODULE;
4e55f578
JBD
437
438 /* re-process everything received between connection setup and
439 backend setup */
440 vcc_process_recv_queue(atmvcc);
1da177e4
LT
441 return 0;
442}
443
444/*
445 * This handles ioctls actually performed on our vcc - we must return
446 * -ENOIOCTLCMD for any unrecognized ioctl
447 */
448static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
449 unsigned long arg)
450{
451 struct atm_vcc *atmvcc = ATM_SD(sock);
452 void __user *argp = (void __user *)arg;
453
454 if (cmd != ATM_SETBACKEND && atmvcc->push != pppoatm_push)
455 return -ENOIOCTLCMD;
456 switch (cmd) {
457 case ATM_SETBACKEND: {
458 atm_backend_t b;
459 if (get_user(b, (atm_backend_t __user *) argp))
460 return -EFAULT;
461 if (b != ATM_BACKEND_PPP)
462 return -ENOIOCTLCMD;
463 if (!capable(CAP_NET_ADMIN))
464 return -EPERM;
3b1a9145
KM
465 if (sock->state != SS_CONNECTED)
466 return -EINVAL;
1da177e4
LT
467 return pppoatm_assign_vcc(atmvcc, argp);
468 }
469 case PPPIOCGCHAN:
470 return put_user(ppp_channel_index(&atmvcc_to_pvcc(atmvcc)->
471 chan), (int __user *) argp) ? -EFAULT : 0;
472 case PPPIOCGUNIT:
473 return put_user(ppp_unit_number(&atmvcc_to_pvcc(atmvcc)->
474 chan), (int __user *) argp) ? -EFAULT : 0;
475 }
476 return -ENOIOCTLCMD;
477}
478
479static struct atm_ioctl pppoatm_ioctl_ops = {
480 .owner = THIS_MODULE,
481 .ioctl = pppoatm_ioctl,
482};
483
484static int __init pppoatm_init(void)
485{
486 register_atm_ioctl(&pppoatm_ioctl_ops);
487 return 0;
488}
489
490static void __exit pppoatm_exit(void)
491{
492 deregister_atm_ioctl(&pppoatm_ioctl_ops);
493}
494
495module_init(pppoatm_init);
496module_exit(pppoatm_exit);
497
498MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
499MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
500MODULE_LICENSE("GPL");