]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/atm/pppoatm.c
pppoatm: drop frames to not-ready vcc
[mirror_ubuntu-artful-kernel.git] / net / atm / pppoatm.c
1 /* net/atm/pppoatm.c - RFC2364 PPP over ATM/AAL5 */
2
3 /* Copyright 1999-2000 by Mitchell Blank Jr */
4 /* Based on clip.c; 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
5 /* And on ppp_async.c; Copyright 1999 Paul Mackerras */
6 /* And help from Jens Axboe */
7
8 /*
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * This driver provides the encapsulation and framing for sending
15 * and receiving PPP frames in ATM AAL5 PDUs.
16 */
17
18 /*
19 * One shortcoming of this driver is that it does not comply with
20 * section 8 of RFC2364 - we are supposed to detect a change
21 * in encapsulation and immediately abort the connection (in order
22 * to avoid a black-hole being created if our peer loses state
23 * and changes encapsulation unilaterally. However, since the
24 * ppp_generic layer actually does the decapsulation, we need
25 * a way of notifying it when we _think_ there might be a problem)
26 * There's two cases:
27 * 1. LLC-encapsulation was missing when it was enabled. In
28 * this case, we should tell the upper layer "tear down
29 * this session if this skb looks ok to you"
30 * 2. LLC-encapsulation was present when it was disabled. Then
31 * we need to tell the upper layer "this packet may be
32 * ok, but if its in error tear down the session"
33 * These hooks are not yet available in ppp_generic
34 */
35
36 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
37
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/atm.h>
44 #include <linux/atmdev.h>
45 #include <linux/capability.h>
46 #include <linux/ppp_defs.h>
47 #include <linux/ppp-ioctl.h>
48 #include <linux/ppp_channel.h>
49 #include <linux/atmppp.h>
50
51 #include "common.h"
52
53 enum pppoatm_encaps {
54 e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
55 e_vc = PPPOATM_ENCAPS_VC,
56 e_llc = PPPOATM_ENCAPS_LLC,
57 };
58
59 struct pppoatm_vcc {
60 struct atm_vcc *atmvcc; /* VCC descriptor */
61 void (*old_push)(struct atm_vcc *, struct sk_buff *);
62 void (*old_pop)(struct atm_vcc *, struct sk_buff *);
63 struct module *old_owner;
64 /* keep old push/pop for detaching */
65 enum pppoatm_encaps encaps;
66 atomic_t inflight;
67 unsigned long blocked;
68 int flags; /* SC_COMP_PROT - compress protocol */
69 struct ppp_channel chan; /* interface to generic ppp layer */
70 struct tasklet_struct wakeup_tasklet;
71 };
72
73 /*
74 * We want to allow two packets in the queue. The one that's currently in
75 * flight, and *one* queued up ready for the ATM device to send immediately
76 * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
77 * inflight == -2 represents an empty queue, -1 one packet, and zero means
78 * there are two packets in the queue.
79 */
80 #define NONE_INFLIGHT -2
81
82 #define BLOCKED 0
83
84 /*
85 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
86 * ID (0xC021) used in autodetection
87 */
88 static const unsigned char pppllc[6] = { 0xFE, 0xFE, 0x03, 0xCF, 0xC0, 0x21 };
89 #define LLC_LEN (4)
90
91 static inline struct pppoatm_vcc *atmvcc_to_pvcc(const struct atm_vcc *atmvcc)
92 {
93 return (struct pppoatm_vcc *) (atmvcc->user_back);
94 }
95
96 static inline struct pppoatm_vcc *chan_to_pvcc(const struct ppp_channel *chan)
97 {
98 return (struct pppoatm_vcc *) (chan->private);
99 }
100
101 /*
102 * We can't do this directly from our _pop handler, since the ppp code
103 * doesn't want to be called in interrupt context, so we do it from
104 * a tasklet
105 */
106 static void pppoatm_wakeup_sender(unsigned long arg)
107 {
108 ppp_output_wakeup((struct ppp_channel *) arg);
109 }
110
111 /*
112 * This gets called every time the ATM card has finished sending our
113 * skb. The ->old_pop will take care up normal atm flow control,
114 * but we also need to wake up the device if we blocked it
115 */
116 static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
117 {
118 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
119
120 pvcc->old_pop(atmvcc, skb);
121 atomic_dec(&pvcc->inflight);
122
123 /*
124 * We always used to run the wakeup tasklet unconditionally here, for
125 * fear of race conditions where we clear the BLOCKED flag just as we
126 * refuse another packet in pppoatm_send(). This was quite inefficient.
127 *
128 * In fact it's OK. The PPP core will only ever call pppoatm_send()
129 * while holding the channel->downl lock. And ppp_output_wakeup() as
130 * called by the tasklet will *also* grab that lock. So even if another
131 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
132 * with it. The wakeup *will* happen after the other CPU is safely out
133 * of pppoatm_send() again.
134 *
135 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
136 * it about to return, that's fine. We trigger a wakeup which will
137 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
138 * BLOCKED bit yet, that's fine too because of the double check in
139 * pppoatm_may_send() which is commented there.
140 */
141 if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
142 tasklet_schedule(&pvcc->wakeup_tasklet);
143 }
144
145 /*
146 * Unbind from PPP - currently we only do this when closing the socket,
147 * but we could put this into an ioctl if need be
148 */
149 static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
150 {
151 struct pppoatm_vcc *pvcc;
152 pvcc = atmvcc_to_pvcc(atmvcc);
153 atmvcc->push = pvcc->old_push;
154 atmvcc->pop = pvcc->old_pop;
155 tasklet_kill(&pvcc->wakeup_tasklet);
156 ppp_unregister_channel(&pvcc->chan);
157 atmvcc->user_back = NULL;
158 kfree(pvcc);
159 }
160
161 /* Called when an AAL5 PDU comes in */
162 static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
163 {
164 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
165 pr_debug("\n");
166 if (skb == NULL) { /* VCC was closed */
167 struct module *module;
168
169 pr_debug("removing ATMPPP VCC %p\n", pvcc);
170 module = pvcc->old_owner;
171 pppoatm_unassign_vcc(atmvcc);
172 atmvcc->push(atmvcc, NULL); /* Pass along bad news */
173 module_put(module);
174 return;
175 }
176 atm_return(atmvcc, skb->truesize);
177 switch (pvcc->encaps) {
178 case e_llc:
179 if (skb->len < LLC_LEN ||
180 memcmp(skb->data, pppllc, LLC_LEN))
181 goto error;
182 skb_pull(skb, LLC_LEN);
183 break;
184 case e_autodetect:
185 if (pvcc->chan.ppp == NULL) { /* Not bound yet! */
186 kfree_skb(skb);
187 return;
188 }
189 if (skb->len >= sizeof(pppllc) &&
190 !memcmp(skb->data, pppllc, sizeof(pppllc))) {
191 pvcc->encaps = e_llc;
192 skb_pull(skb, LLC_LEN);
193 break;
194 }
195 if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
196 !memcmp(skb->data, &pppllc[LLC_LEN],
197 sizeof(pppllc) - LLC_LEN)) {
198 pvcc->encaps = e_vc;
199 pvcc->chan.mtu += LLC_LEN;
200 break;
201 }
202 pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
203 skb->data[0], skb->data[1], skb->data[2],
204 skb->data[3], skb->data[4], skb->data[5]);
205 goto error;
206 case e_vc:
207 break;
208 }
209 ppp_input(&pvcc->chan, skb);
210 return;
211
212 error:
213 kfree_skb(skb);
214 ppp_input_error(&pvcc->chan, 0);
215 }
216
217 static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
218 {
219 /*
220 * It's not clear that we need to bother with using atm_may_send()
221 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
222 * value of sk_sndbuf which is lower than the MTU, we're going to
223 * block for ever. But the code always did that before we introduced
224 * the packet count limit, so...
225 */
226 if (atm_may_send(pvcc->atmvcc, size) &&
227 atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
228 return 1;
229
230 /*
231 * We use test_and_set_bit() rather than set_bit() here because
232 * we need to ensure there's a memory barrier after it. The bit
233 * *must* be set before we do the atomic_inc() on pvcc->inflight.
234 * There's no smp_mb__after_set_bit(), so it's this or abuse
235 * smp_mb__after_clear_bit().
236 */
237 test_and_set_bit(BLOCKED, &pvcc->blocked);
238
239 /*
240 * We may have raced with pppoatm_pop(). If it ran for the
241 * last packet in the queue, *just* before we set the BLOCKED
242 * bit, then it might never run again and the channel could
243 * remain permanently blocked. Cope with that race by checking
244 * *again*. If it did run in that window, we'll have space on
245 * the queue now and can return success. It's harmless to leave
246 * the BLOCKED flag set, since it's only used as a trigger to
247 * run the wakeup tasklet. Another wakeup will never hurt.
248 * If pppoatm_pop() is running but hasn't got as far as making
249 * space on the queue yet, then it hasn't checked the BLOCKED
250 * flag yet either, so we're safe in that case too. It'll issue
251 * an "immediate" wakeup... where "immediate" actually involves
252 * taking the PPP channel's ->downl lock, which is held by the
253 * code path that calls pppoatm_send(), and is thus going to
254 * wait for us to finish.
255 */
256 if (atm_may_send(pvcc->atmvcc, size) &&
257 atomic_inc_not_zero(&pvcc->inflight))
258 return 1;
259
260 return 0;
261 }
262 /*
263 * Called by the ppp_generic.c to send a packet - returns true if packet
264 * was accepted. If we return false, then it's our job to call
265 * ppp_output_wakeup(chan) when we're feeling more up to it.
266 * Note that in the ENOMEM case (as opposed to the !atm_may_send case)
267 * we should really drop the packet, but the generic layer doesn't
268 * support this yet. We just return 'DROP_PACKET' which we actually define
269 * as success, just to be clear what we're really doing.
270 */
271 #define DROP_PACKET 1
272 static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
273 {
274 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
275 struct atm_vcc *vcc;
276 int ret;
277
278 ATM_SKB(skb)->vcc = pvcc->atmvcc;
279 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
280 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
281 (void) skb_pull(skb, 1);
282
283 vcc = ATM_SKB(skb)->vcc;
284 bh_lock_sock(sk_atm(vcc));
285 if (sock_owned_by_user(sk_atm(vcc)))
286 goto nospace;
287 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
288 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
289 !test_bit(ATM_VF_READY, &vcc->flags)) {
290 bh_unlock_sock(sk_atm(vcc));
291 kfree_skb(skb);
292 return DROP_PACKET;
293 }
294
295 switch (pvcc->encaps) { /* LLC encapsulation needed */
296 case e_llc:
297 if (skb_headroom(skb) < LLC_LEN) {
298 struct sk_buff *n;
299 n = skb_realloc_headroom(skb, LLC_LEN);
300 if (n != NULL &&
301 !pppoatm_may_send(pvcc, n->truesize)) {
302 kfree_skb(n);
303 goto nospace;
304 }
305 consume_skb(skb);
306 skb = n;
307 if (skb == NULL) {
308 bh_unlock_sock(sk_atm(vcc));
309 return DROP_PACKET;
310 }
311 } else if (!pppoatm_may_send(pvcc, skb->truesize))
312 goto nospace;
313 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
314 break;
315 case e_vc:
316 if (!pppoatm_may_send(pvcc, skb->truesize))
317 goto nospace;
318 break;
319 case e_autodetect:
320 bh_unlock_sock(sk_atm(vcc));
321 pr_debug("Trying to send without setting encaps!\n");
322 kfree_skb(skb);
323 return 1;
324 }
325
326 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
327 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
328 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
329 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
330 ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
331 ? DROP_PACKET : 1;
332 bh_unlock_sock(sk_atm(vcc));
333 return ret;
334 nospace:
335 bh_unlock_sock(sk_atm(vcc));
336 /*
337 * We don't have space to send this SKB now, but we might have
338 * already applied SC_COMP_PROT compression, so may need to undo
339 */
340 if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
341 skb->data[-1] == '\0')
342 (void) skb_push(skb, 1);
343 return 0;
344 }
345
346 /* This handles ioctls sent to the /dev/ppp interface */
347 static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
348 unsigned long arg)
349 {
350 switch (cmd) {
351 case PPPIOCGFLAGS:
352 return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
353 ? -EFAULT : 0;
354 case PPPIOCSFLAGS:
355 return get_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
356 ? -EFAULT : 0;
357 }
358 return -ENOTTY;
359 }
360
361 static const struct ppp_channel_ops pppoatm_ops = {
362 .start_xmit = pppoatm_send,
363 .ioctl = pppoatm_devppp_ioctl,
364 };
365
366 static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
367 {
368 struct atm_backend_ppp be;
369 struct pppoatm_vcc *pvcc;
370 int err;
371 /*
372 * Each PPPoATM instance has its own tasklet - this is just a
373 * prototypical one used to initialize them
374 */
375 static const DECLARE_TASKLET(tasklet_proto, pppoatm_wakeup_sender, 0);
376 if (copy_from_user(&be, arg, sizeof be))
377 return -EFAULT;
378 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
379 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
380 return -EINVAL;
381 pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
382 if (pvcc == NULL)
383 return -ENOMEM;
384 pvcc->atmvcc = atmvcc;
385
386 /* Maximum is zero, so that we can use atomic_inc_not_zero() */
387 atomic_set(&pvcc->inflight, NONE_INFLIGHT);
388 pvcc->old_push = atmvcc->push;
389 pvcc->old_pop = atmvcc->pop;
390 pvcc->old_owner = atmvcc->owner;
391 pvcc->encaps = (enum pppoatm_encaps) be.encaps;
392 pvcc->chan.private = pvcc;
393 pvcc->chan.ops = &pppoatm_ops;
394 pvcc->chan.mtu = atmvcc->qos.txtp.max_sdu - PPP_HDRLEN -
395 (be.encaps == e_vc ? 0 : LLC_LEN);
396 pvcc->wakeup_tasklet = tasklet_proto;
397 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
398 err = ppp_register_channel(&pvcc->chan);
399 if (err != 0) {
400 kfree(pvcc);
401 return err;
402 }
403 atmvcc->user_back = pvcc;
404 atmvcc->push = pppoatm_push;
405 atmvcc->pop = pppoatm_pop;
406 __module_get(THIS_MODULE);
407 atmvcc->owner = THIS_MODULE;
408
409 /* re-process everything received between connection setup and
410 backend setup */
411 vcc_process_recv_queue(atmvcc);
412 return 0;
413 }
414
415 /*
416 * This handles ioctls actually performed on our vcc - we must return
417 * -ENOIOCTLCMD for any unrecognized ioctl
418 */
419 static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
420 unsigned long arg)
421 {
422 struct atm_vcc *atmvcc = ATM_SD(sock);
423 void __user *argp = (void __user *)arg;
424
425 if (cmd != ATM_SETBACKEND && atmvcc->push != pppoatm_push)
426 return -ENOIOCTLCMD;
427 switch (cmd) {
428 case ATM_SETBACKEND: {
429 atm_backend_t b;
430 if (get_user(b, (atm_backend_t __user *) argp))
431 return -EFAULT;
432 if (b != ATM_BACKEND_PPP)
433 return -ENOIOCTLCMD;
434 if (!capable(CAP_NET_ADMIN))
435 return -EPERM;
436 if (sock->state != SS_CONNECTED)
437 return -EINVAL;
438 return pppoatm_assign_vcc(atmvcc, argp);
439 }
440 case PPPIOCGCHAN:
441 return put_user(ppp_channel_index(&atmvcc_to_pvcc(atmvcc)->
442 chan), (int __user *) argp) ? -EFAULT : 0;
443 case PPPIOCGUNIT:
444 return put_user(ppp_unit_number(&atmvcc_to_pvcc(atmvcc)->
445 chan), (int __user *) argp) ? -EFAULT : 0;
446 }
447 return -ENOIOCTLCMD;
448 }
449
450 static struct atm_ioctl pppoatm_ioctl_ops = {
451 .owner = THIS_MODULE,
452 .ioctl = pppoatm_ioctl,
453 };
454
455 static int __init pppoatm_init(void)
456 {
457 register_atm_ioctl(&pppoatm_ioctl_ops);
458 return 0;
459 }
460
461 static void __exit pppoatm_exit(void)
462 {
463 deregister_atm_ioctl(&pppoatm_ioctl_ops);
464 }
465
466 module_init(pppoatm_init);
467 module_exit(pppoatm_exit);
468
469 MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
470 MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
471 MODULE_LICENSE("GPL");