]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame_incremental - net/bluetooth/l2cap.c
Bluetooth: Fix expected_tx_seq calculation on L2CAP
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / l2cap.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth L2CAP core and sockets. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/socket.h>
40#include <linux/skbuff.h>
41#include <linux/list.h>
42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45#include <linux/uaccess.h>
46#include <linux/crc16.h>
47#include <net/sock.h>
48
49#include <asm/system.h>
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54#include <net/bluetooth/l2cap.h>
55
56#define VERSION "2.14"
57
58static int enable_ertm = 0;
59static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60
61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64static const struct proto_ops l2cap_sock_ops;
65
66static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68};
69
70static void __l2cap_sock_close(struct sock *sk, int reason);
71static void l2cap_sock_close(struct sock *sk);
72static void l2cap_sock_kill(struct sock *sk);
73
74static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
76
77/* ---- L2CAP timers ---- */
78static void l2cap_sock_timeout(unsigned long arg)
79{
80 struct sock *sk = (struct sock *) arg;
81 int reason;
82
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
84
85 bh_lock_sock(sk);
86
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
94
95 __l2cap_sock_close(sk, reason);
96
97 bh_unlock_sock(sk);
98
99 l2cap_sock_kill(sk);
100 sock_put(sk);
101}
102
103static void l2cap_sock_set_timer(struct sock *sk, long timeout)
104{
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
107}
108
109static void l2cap_sock_clear_timer(struct sock *sk)
110{
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
113}
114
115/* ---- L2CAP channels ---- */
116static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
117{
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
122 }
123 return s;
124}
125
126static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127{
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
132 }
133 return s;
134}
135
136/* Find channel with given SCID.
137 * Returns locked socket */
138static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139{
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
147}
148
149static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
150{
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
155 }
156 return s;
157}
158
159static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160{
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
168}
169
170static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
171{
172 u16 cid = L2CAP_CID_DYN_START;
173
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
177 }
178
179 return 0;
180}
181
182static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183{
184 sock_hold(sk);
185
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
188
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
192}
193
194static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
195{
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
197
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
201
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
207
208 __sock_put(sk);
209}
210
211static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
212{
213 struct l2cap_chan_list *l = &conn->chan_list;
214
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
217
218 conn->disc_reason = 0x13;
219
220 l2cap_pi(sk)->conn = conn;
221
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 }
236
237 __l2cap_chan_link(l, sk);
238
239 if (parent)
240 bt_accept_enqueue(parent, sk);
241}
242
243/* Delete channel.
244 * Must be called on the locked socket. */
245static void l2cap_chan_del(struct sock *sk, int err)
246{
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
249
250 l2cap_sock_clear_timer(sk);
251
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
253
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
259 }
260
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
263
264 if (err)
265 sk->sk_err = err;
266
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
272}
273
274/* Service level security */
275static inline int l2cap_check_security(struct sock *sk)
276{
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
279
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
285
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
299 }
300 }
301
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
304}
305
306static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307{
308 u8 id;
309
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
314 */
315
316 spin_lock_bh(&conn->lock);
317
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
320
321 id = conn->tx_ident;
322
323 spin_unlock_bh(&conn->lock);
324
325 return id;
326}
327
328static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
329{
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
331
332 BT_DBG("code 0x%2.2x", code);
333
334 if (!skb)
335 return -ENOMEM;
336
337 return hci_send_acl(conn->hcon, skb, 0);
338}
339
340static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
341{
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
346
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
349
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
351
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
354
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 if (!skb)
357 return -ENOMEM;
358
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
363
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
367 }
368
369 return hci_send_acl(pi->conn->hcon, skb, 0);
370}
371
372static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
373{
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
376 else
377 control |= L2CAP_SUPER_RCV_READY;
378
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
380
381 return l2cap_send_sframe(pi, control);
382}
383
384static void l2cap_do_start(struct sock *sk)
385{
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
387
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
390 return;
391
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
396
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
398
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
401 }
402 } else {
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
405
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
408
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
411
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
414 }
415}
416
417static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
418{
419 struct l2cap_disconn_req req;
420
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
425}
426
427/* ---- L2CAP connections ---- */
428static void l2cap_conn_start(struct l2cap_conn *conn)
429{
430 struct l2cap_chan_list *l = &conn->chan_list;
431 struct sock *sk;
432
433 BT_DBG("conn %p", conn);
434
435 read_lock(&l->lock);
436
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
438 bh_lock_sock(sk);
439
440 if (sk->sk_type != SOCK_SEQPACKET) {
441 bh_unlock_sock(sk);
442 continue;
443 }
444
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
450
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
452
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
455 }
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
460
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
467
468 } else {
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
472 }
473 } else {
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
476 }
477
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
480 }
481
482 bh_unlock_sock(sk);
483 }
484
485 read_unlock(&l->lock);
486}
487
488static void l2cap_conn_ready(struct l2cap_conn *conn)
489{
490 struct l2cap_chan_list *l = &conn->chan_list;
491 struct sock *sk;
492
493 BT_DBG("conn %p", conn);
494
495 read_lock(&l->lock);
496
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
498 bh_lock_sock(sk);
499
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
505 l2cap_do_start(sk);
506
507 bh_unlock_sock(sk);
508 }
509
510 read_unlock(&l->lock);
511}
512
513/* Notify sockets that we cannot guaranty reliability anymore */
514static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
515{
516 struct l2cap_chan_list *l = &conn->chan_list;
517 struct sock *sk;
518
519 BT_DBG("conn %p", conn);
520
521 read_lock(&l->lock);
522
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
525 sk->sk_err = err;
526 }
527
528 read_unlock(&l->lock);
529}
530
531static void l2cap_info_timeout(unsigned long arg)
532{
533 struct l2cap_conn *conn = (void *) arg;
534
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
537
538 l2cap_conn_start(conn);
539}
540
541static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
542{
543 struct l2cap_conn *conn = hcon->l2cap_data;
544
545 if (conn || status)
546 return conn;
547
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
549 if (!conn)
550 return NULL;
551
552 hcon->l2cap_data = conn;
553 conn->hcon = hcon;
554
555 BT_DBG("hcon %p conn %p", hcon, conn);
556
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
560
561 conn->feat_mask = 0;
562
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
565
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
568
569 conn->disc_reason = 0x13;
570
571 return conn;
572}
573
574static void l2cap_conn_del(struct hci_conn *hcon, int err)
575{
576 struct l2cap_conn *conn = hcon->l2cap_data;
577 struct sock *sk;
578
579 if (!conn)
580 return;
581
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
583
584 kfree_skb(conn->rx_skb);
585
586 /* Kill channels */
587 while ((sk = conn->chan_list.head)) {
588 bh_lock_sock(sk);
589 l2cap_chan_del(sk, err);
590 bh_unlock_sock(sk);
591 l2cap_sock_kill(sk);
592 }
593
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
596
597 hcon->l2cap_data = NULL;
598 kfree(conn);
599}
600
601static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
602{
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
607}
608
609/* ---- Socket interface ---- */
610static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
611{
612 struct sock *sk;
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
616 goto found;
617 sk = NULL;
618found:
619 return sk;
620}
621
622/* Find socket with psm and source bdaddr.
623 * Returns closest match.
624 */
625static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
626{
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
629
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
632 continue;
633
634 if (l2cap_pi(sk)->psm == psm) {
635 /* Exact match. */
636 if (!bacmp(&bt_sk(sk)->src, src))
637 break;
638
639 /* Closest match */
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
641 sk1 = sk;
642 }
643 }
644 return node ? sk : sk1;
645}
646
647/* Find socket with given address (psm, src).
648 * Returns locked socket */
649static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
650{
651 struct sock *s;
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
654 if (s)
655 bh_lock_sock(s);
656 read_unlock(&l2cap_sk_list.lock);
657 return s;
658}
659
660static void l2cap_sock_destruct(struct sock *sk)
661{
662 BT_DBG("sk %p", sk);
663
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
666}
667
668static void l2cap_sock_cleanup_listen(struct sock *parent)
669{
670 struct sock *sk;
671
672 BT_DBG("parent %p", parent);
673
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
677
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
680}
681
682/* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
684 */
685static void l2cap_sock_kill(struct sock *sk)
686{
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
688 return;
689
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
691
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
695 sock_put(sk);
696}
697
698static void __l2cap_sock_close(struct sock *sk, int reason)
699{
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
701
702 switch (sk->sk_state) {
703 case BT_LISTEN:
704 l2cap_sock_cleanup_listen(sk);
705 break;
706
707 case BT_CONNECTED:
708 case BT_CONFIG:
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
711
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
715 } else
716 l2cap_chan_del(sk, reason);
717 break;
718
719 case BT_CONNECT2:
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
723 __u16 result;
724
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
727 else
728 result = L2CAP_CR_BAD_PSM;
729
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
736 } else
737 l2cap_chan_del(sk, reason);
738 break;
739
740 case BT_CONNECT:
741 case BT_DISCONN:
742 l2cap_chan_del(sk, reason);
743 break;
744
745 default:
746 sock_set_flag(sk, SOCK_ZAPPED);
747 break;
748 }
749}
750
751/* Must be called on unlocked socket. */
752static void l2cap_sock_close(struct sock *sk)
753{
754 l2cap_sock_clear_timer(sk);
755 lock_sock(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
757 release_sock(sk);
758 l2cap_sock_kill(sk);
759}
760
761static void l2cap_sock_init(struct sock *sk, struct sock *parent)
762{
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
764
765 BT_DBG("sk %p", sk);
766
767 if (parent) {
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
770
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
778 } else {
779 pi->imtu = L2CAP_DEFAULT_MTU;
780 pi->omtu = 0;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
784 pi->role_switch = 0;
785 pi->force_reliable = 0;
786 }
787
788 /* Default config options */
789 pi->conf_len = 0;
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
794}
795
796static struct proto l2cap_proto = {
797 .name = "L2CAP",
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
800};
801
802static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
803{
804 struct sock *sk;
805
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
807 if (!sk)
808 return NULL;
809
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
812
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
815
816 sock_reset_flag(sk, SOCK_ZAPPED);
817
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
820
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
822
823 bt_sock_link(&l2cap_sk_list, sk);
824 return sk;
825}
826
827static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
828 int kern)
829{
830 struct sock *sk;
831
832 BT_DBG("sock %p", sock);
833
834 sock->state = SS_UNCONNECTED;
835
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
839
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
841 return -EPERM;
842
843 sock->ops = &l2cap_sock_ops;
844
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
846 if (!sk)
847 return -ENOMEM;
848
849 l2cap_sock_init(sk, NULL);
850 return 0;
851}
852
853static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
854{
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
857 int len, err = 0;
858
859 BT_DBG("sk %p", sk);
860
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
862 return -EINVAL;
863
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
867
868 if (la.l2_cid)
869 return -EINVAL;
870
871 lock_sock(sk);
872
873 if (sk->sk_state != BT_OPEN) {
874 err = -EBADFD;
875 goto done;
876 }
877
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
880 err = -EACCES;
881 goto done;
882 }
883
884 write_lock_bh(&l2cap_sk_list.lock);
885
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
887 err = -EADDRINUSE;
888 } else {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
894
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
898 }
899
900 write_unlock_bh(&l2cap_sk_list.lock);
901
902done:
903 release_sock(sk);
904 return err;
905}
906
907static int l2cap_do_connect(struct sock *sk)
908{
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
914 __u8 auth_type;
915 int err;
916
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
918 l2cap_pi(sk)->psm);
919
920 hdev = hci_get_route(dst, src);
921 if (!hdev)
922 return -EHOSTUNREACH;
923
924 hci_dev_lock_bh(hdev);
925
926 err = -ENOMEM;
927
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
932 break;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
935 break;
936 default:
937 auth_type = HCI_AT_NO_BONDING;
938 break;
939 }
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
943 else
944 auth_type = HCI_AT_NO_BONDING;
945
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
948 } else {
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
952 break;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
955 break;
956 default:
957 auth_type = HCI_AT_NO_BONDING;
958 break;
959 }
960 }
961
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
964 if (!hcon)
965 goto done;
966
967 conn = l2cap_conn_add(hcon, 0);
968 if (!conn) {
969 hci_conn_put(hcon);
970 goto done;
971 }
972
973 err = 0;
974
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
977
978 l2cap_chan_add(conn, sk, NULL);
979
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
982
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
987 } else
988 l2cap_do_start(sk);
989 }
990
991done:
992 hci_dev_unlock_bh(hdev);
993 hci_dev_put(hdev);
994 return err;
995}
996
997static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
998{
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1001 int len, err = 0;
1002
1003 BT_DBG("sk %p", sk);
1004
1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1007 return -EINVAL;
1008
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1012
1013 if (la.l2_cid)
1014 return -EINVAL;
1015
1016 lock_sock(sk);
1017
1018 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1019 err = -EINVAL;
1020 goto done;
1021 }
1022
1023 switch (l2cap_pi(sk)->mode) {
1024 case L2CAP_MODE_BASIC:
1025 break;
1026 case L2CAP_MODE_ERTM:
1027 case L2CAP_MODE_STREAMING:
1028 if (enable_ertm)
1029 break;
1030 /* fall through */
1031 default:
1032 err = -ENOTSUPP;
1033 goto done;
1034 }
1035
1036 switch (sk->sk_state) {
1037 case BT_CONNECT:
1038 case BT_CONNECT2:
1039 case BT_CONFIG:
1040 /* Already connecting */
1041 goto wait;
1042
1043 case BT_CONNECTED:
1044 /* Already connected */
1045 goto done;
1046
1047 case BT_OPEN:
1048 case BT_BOUND:
1049 /* Can connect */
1050 break;
1051
1052 default:
1053 err = -EBADFD;
1054 goto done;
1055 }
1056
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1059 l2cap_pi(sk)->psm = la.l2_psm;
1060
1061 err = l2cap_do_connect(sk);
1062 if (err)
1063 goto done;
1064
1065wait:
1066 err = bt_sock_wait_state(sk, BT_CONNECTED,
1067 sock_sndtimeo(sk, flags & O_NONBLOCK));
1068done:
1069 release_sock(sk);
1070 return err;
1071}
1072
1073static int l2cap_sock_listen(struct socket *sock, int backlog)
1074{
1075 struct sock *sk = sock->sk;
1076 int err = 0;
1077
1078 BT_DBG("sk %p backlog %d", sk, backlog);
1079
1080 lock_sock(sk);
1081
1082 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1083 err = -EBADFD;
1084 goto done;
1085 }
1086
1087 switch (l2cap_pi(sk)->mode) {
1088 case L2CAP_MODE_BASIC:
1089 break;
1090 case L2CAP_MODE_ERTM:
1091 case L2CAP_MODE_STREAMING:
1092 if (enable_ertm)
1093 break;
1094 /* fall through */
1095 default:
1096 err = -ENOTSUPP;
1097 goto done;
1098 }
1099
1100 if (!l2cap_pi(sk)->psm) {
1101 bdaddr_t *src = &bt_sk(sk)->src;
1102 u16 psm;
1103
1104 err = -EINVAL;
1105
1106 write_lock_bh(&l2cap_sk_list.lock);
1107
1108 for (psm = 0x1001; psm < 0x1100; psm += 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1110 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1111 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1112 err = 0;
1113 break;
1114 }
1115
1116 write_unlock_bh(&l2cap_sk_list.lock);
1117
1118 if (err < 0)
1119 goto done;
1120 }
1121
1122 sk->sk_max_ack_backlog = backlog;
1123 sk->sk_ack_backlog = 0;
1124 sk->sk_state = BT_LISTEN;
1125
1126done:
1127 release_sock(sk);
1128 return err;
1129}
1130
1131static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1132{
1133 DECLARE_WAITQUEUE(wait, current);
1134 struct sock *sk = sock->sk, *nsk;
1135 long timeo;
1136 int err = 0;
1137
1138 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1139
1140 if (sk->sk_state != BT_LISTEN) {
1141 err = -EBADFD;
1142 goto done;
1143 }
1144
1145 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1146
1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1148
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!timeo) {
1154 err = -EAGAIN;
1155 break;
1156 }
1157
1158 release_sock(sk);
1159 timeo = schedule_timeout(timeo);
1160 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1161
1162 if (sk->sk_state != BT_LISTEN) {
1163 err = -EBADFD;
1164 break;
1165 }
1166
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1169 break;
1170 }
1171 }
1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk_sleep(sk), &wait);
1174
1175 if (err)
1176 goto done;
1177
1178 newsock->state = SS_CONNECTED;
1179
1180 BT_DBG("new socket %p", nsk);
1181
1182done:
1183 release_sock(sk);
1184 return err;
1185}
1186
1187static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1188{
1189 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1190 struct sock *sk = sock->sk;
1191
1192 BT_DBG("sock %p, sk %p", sock, sk);
1193
1194 addr->sa_family = AF_BLUETOOTH;
1195 *len = sizeof(struct sockaddr_l2);
1196
1197 if (peer) {
1198 la->l2_psm = l2cap_pi(sk)->psm;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1201 } else {
1202 la->l2_psm = l2cap_pi(sk)->sport;
1203 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1204 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1205 }
1206
1207 return 0;
1208}
1209
1210static void l2cap_monitor_timeout(unsigned long arg)
1211{
1212 struct sock *sk = (void *) arg;
1213 u16 control;
1214
1215 bh_lock_sock(sk);
1216 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1217 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1218 bh_unlock_sock(sk);
1219 return;
1220 }
1221
1222 l2cap_pi(sk)->retry_count++;
1223 __mod_monitor_timer();
1224
1225 control = L2CAP_CTRL_POLL;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1227 bh_unlock_sock(sk);
1228}
1229
1230static void l2cap_retrans_timeout(unsigned long arg)
1231{
1232 struct sock *sk = (void *) arg;
1233 u16 control;
1234
1235 bh_lock_sock(sk);
1236 l2cap_pi(sk)->retry_count = 1;
1237 __mod_monitor_timer();
1238
1239 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1240
1241 control = L2CAP_CTRL_POLL;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1243 bh_unlock_sock(sk);
1244}
1245
1246static void l2cap_drop_acked_frames(struct sock *sk)
1247{
1248 struct sk_buff *skb;
1249
1250 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1251 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1252 break;
1253
1254 skb = skb_dequeue(TX_QUEUE(sk));
1255 kfree_skb(skb);
1256
1257 l2cap_pi(sk)->unacked_frames--;
1258 }
1259
1260 if (!l2cap_pi(sk)->unacked_frames)
1261 del_timer(&l2cap_pi(sk)->retrans_timer);
1262
1263 return;
1264}
1265
1266static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1267{
1268 struct l2cap_pinfo *pi = l2cap_pi(sk);
1269 int err;
1270
1271 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1272
1273 err = hci_send_acl(pi->conn->hcon, skb, 0);
1274 if (err < 0)
1275 kfree_skb(skb);
1276
1277 return err;
1278}
1279
1280static int l2cap_streaming_send(struct sock *sk)
1281{
1282 struct sk_buff *skb, *tx_skb;
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1284 u16 control, fcs;
1285 int err;
1286
1287 while ((skb = sk->sk_send_head)) {
1288 tx_skb = skb_clone(skb, GFP_ATOMIC);
1289
1290 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1291 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1292 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1293
1294 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1296 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1297 }
1298
1299 err = l2cap_do_send(sk, tx_skb);
1300 if (err < 0) {
1301 l2cap_send_disconn_req(pi->conn, sk);
1302 return err;
1303 }
1304
1305 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1306
1307 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1308 sk->sk_send_head = NULL;
1309 else
1310 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1311
1312 skb = skb_dequeue(TX_QUEUE(sk));
1313 kfree_skb(skb);
1314 }
1315 return 0;
1316}
1317
1318static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1319{
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb, *tx_skb;
1322 u16 control, fcs;
1323 int err;
1324
1325 skb = skb_peek(TX_QUEUE(sk));
1326 do {
1327 if (bt_cb(skb)->tx_seq != tx_seq) {
1328 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1329 break;
1330 skb = skb_queue_next(TX_QUEUE(sk), skb);
1331 continue;
1332 }
1333
1334 if (pi->remote_max_tx &&
1335 bt_cb(skb)->retries == pi->remote_max_tx) {
1336 l2cap_send_disconn_req(pi->conn, sk);
1337 break;
1338 }
1339
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341 bt_cb(skb)->retries++;
1342 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1343 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1344 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1345 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1346
1347 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1348 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1349 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1350 }
1351
1352 err = l2cap_do_send(sk, tx_skb);
1353 if (err < 0) {
1354 l2cap_send_disconn_req(pi->conn, sk);
1355 return err;
1356 }
1357 break;
1358 } while(1);
1359 return 0;
1360}
1361
1362static int l2cap_ertm_send(struct sock *sk)
1363{
1364 struct sk_buff *skb, *tx_skb;
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366 u16 control, fcs;
1367 int err;
1368
1369 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1370 return 0;
1371
1372 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1373 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1374
1375 if (pi->remote_max_tx &&
1376 bt_cb(skb)->retries == pi->remote_max_tx) {
1377 l2cap_send_disconn_req(pi->conn, sk);
1378 break;
1379 }
1380
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382
1383 bt_cb(skb)->retries++;
1384
1385 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1386 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1387 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1388 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1389
1390
1391 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1393 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1394 }
1395
1396 err = l2cap_do_send(sk, tx_skb);
1397 if (err < 0) {
1398 l2cap_send_disconn_req(pi->conn, sk);
1399 return err;
1400 }
1401 __mod_retrans_timer();
1402
1403 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1404 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1405
1406 pi->unacked_frames++;
1407
1408 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1409 sk->sk_send_head = NULL;
1410 else
1411 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1412 }
1413
1414 return 0;
1415}
1416
1417static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1418{
1419 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1420 struct sk_buff **frag;
1421 int err, sent = 0;
1422
1423 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1424 return -EFAULT;
1425 }
1426
1427 sent += count;
1428 len -= count;
1429
1430 /* Continuation fragments (no L2CAP header) */
1431 frag = &skb_shinfo(skb)->frag_list;
1432 while (len) {
1433 count = min_t(unsigned int, conn->mtu, len);
1434
1435 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1436 if (!*frag)
1437 return -EFAULT;
1438 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1439 return -EFAULT;
1440
1441 sent += count;
1442 len -= count;
1443
1444 frag = &(*frag)->next;
1445 }
1446
1447 return sent;
1448}
1449
1450static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1451{
1452 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1453 struct sk_buff *skb;
1454 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1455 struct l2cap_hdr *lh;
1456
1457 BT_DBG("sk %p len %d", sk, (int)len);
1458
1459 count = min_t(unsigned int, (conn->mtu - hlen), len);
1460 skb = bt_skb_send_alloc(sk, count + hlen,
1461 msg->msg_flags & MSG_DONTWAIT, &err);
1462 if (!skb)
1463 return ERR_PTR(-ENOMEM);
1464
1465 /* Create L2CAP header */
1466 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1467 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1469 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1470
1471 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1472 if (unlikely(err < 0)) {
1473 kfree_skb(skb);
1474 return ERR_PTR(err);
1475 }
1476 return skb;
1477}
1478
1479static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1480{
1481 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1482 struct sk_buff *skb;
1483 int err, count, hlen = L2CAP_HDR_SIZE;
1484 struct l2cap_hdr *lh;
1485
1486 BT_DBG("sk %p len %d", sk, (int)len);
1487
1488 count = min_t(unsigned int, (conn->mtu - hlen), len);
1489 skb = bt_skb_send_alloc(sk, count + hlen,
1490 msg->msg_flags & MSG_DONTWAIT, &err);
1491 if (!skb)
1492 return ERR_PTR(-ENOMEM);
1493
1494 /* Create L2CAP header */
1495 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1496 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1497 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1498
1499 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1500 if (unlikely(err < 0)) {
1501 kfree_skb(skb);
1502 return ERR_PTR(err);
1503 }
1504 return skb;
1505}
1506
1507static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1508{
1509 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1512 struct l2cap_hdr *lh;
1513
1514 BT_DBG("sk %p len %d", sk, (int)len);
1515
1516 if (sdulen)
1517 hlen += 2;
1518
1519 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1520 hlen += 2;
1521
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1525 if (!skb)
1526 return ERR_PTR(-ENOMEM);
1527
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1532 put_unaligned_le16(control, skb_put(skb, 2));
1533 if (sdulen)
1534 put_unaligned_le16(sdulen, skb_put(skb, 2));
1535
1536 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1537 if (unlikely(err < 0)) {
1538 kfree_skb(skb);
1539 return ERR_PTR(err);
1540 }
1541
1542 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1543 put_unaligned_le16(0, skb_put(skb, 2));
1544
1545 bt_cb(skb)->retries = 0;
1546 return skb;
1547}
1548
1549static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1550{
1551 struct l2cap_pinfo *pi = l2cap_pi(sk);
1552 struct sk_buff *skb;
1553 struct sk_buff_head sar_queue;
1554 u16 control;
1555 size_t size = 0;
1556
1557 __skb_queue_head_init(&sar_queue);
1558 control = L2CAP_SDU_START;
1559 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1560 if (IS_ERR(skb))
1561 return PTR_ERR(skb);
1562
1563 __skb_queue_tail(&sar_queue, skb);
1564 len -= pi->max_pdu_size;
1565 size +=pi->max_pdu_size;
1566 control = 0;
1567
1568 while (len > 0) {
1569 size_t buflen;
1570
1571 if (len > pi->max_pdu_size) {
1572 control |= L2CAP_SDU_CONTINUE;
1573 buflen = pi->max_pdu_size;
1574 } else {
1575 control |= L2CAP_SDU_END;
1576 buflen = len;
1577 }
1578
1579 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1580 if (IS_ERR(skb)) {
1581 skb_queue_purge(&sar_queue);
1582 return PTR_ERR(skb);
1583 }
1584
1585 __skb_queue_tail(&sar_queue, skb);
1586 len -= buflen;
1587 size += buflen;
1588 control = 0;
1589 }
1590 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1591 if (sk->sk_send_head == NULL)
1592 sk->sk_send_head = sar_queue.next;
1593
1594 return size;
1595}
1596
1597static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1598{
1599 struct sock *sk = sock->sk;
1600 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 struct sk_buff *skb;
1602 u16 control;
1603 int err;
1604
1605 BT_DBG("sock %p, sk %p", sock, sk);
1606
1607 err = sock_error(sk);
1608 if (err)
1609 return err;
1610
1611 if (msg->msg_flags & MSG_OOB)
1612 return -EOPNOTSUPP;
1613
1614 lock_sock(sk);
1615
1616 if (sk->sk_state != BT_CONNECTED) {
1617 err = -ENOTCONN;
1618 goto done;
1619 }
1620
1621 /* Connectionless channel */
1622 if (sk->sk_type == SOCK_DGRAM) {
1623 skb = l2cap_create_connless_pdu(sk, msg, len);
1624 if (IS_ERR(skb))
1625 err = PTR_ERR(skb);
1626 else
1627 err = l2cap_do_send(sk, skb);
1628 goto done;
1629 }
1630
1631 switch (pi->mode) {
1632 case L2CAP_MODE_BASIC:
1633 /* Check outgoing MTU */
1634 if (len > pi->omtu) {
1635 err = -EINVAL;
1636 goto done;
1637 }
1638
1639 /* Create a basic PDU */
1640 skb = l2cap_create_basic_pdu(sk, msg, len);
1641 if (IS_ERR(skb)) {
1642 err = PTR_ERR(skb);
1643 goto done;
1644 }
1645
1646 err = l2cap_do_send(sk, skb);
1647 if (!err)
1648 err = len;
1649 break;
1650
1651 case L2CAP_MODE_ERTM:
1652 case L2CAP_MODE_STREAMING:
1653 /* Entire SDU fits into one PDU */
1654 if (len <= pi->max_pdu_size) {
1655 control = L2CAP_SDU_UNSEGMENTED;
1656 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1657 if (IS_ERR(skb)) {
1658 err = PTR_ERR(skb);
1659 goto done;
1660 }
1661 __skb_queue_tail(TX_QUEUE(sk), skb);
1662 if (sk->sk_send_head == NULL)
1663 sk->sk_send_head = skb;
1664 } else {
1665 /* Segment SDU into multiples PDUs */
1666 err = l2cap_sar_segment_sdu(sk, msg, len);
1667 if (err < 0)
1668 goto done;
1669 }
1670
1671 if (pi->mode == L2CAP_MODE_STREAMING)
1672 err = l2cap_streaming_send(sk);
1673 else
1674 err = l2cap_ertm_send(sk);
1675
1676 if (!err)
1677 err = len;
1678 break;
1679
1680 default:
1681 BT_DBG("bad state %1.1x", pi->mode);
1682 err = -EINVAL;
1683 }
1684
1685done:
1686 release_sock(sk);
1687 return err;
1688}
1689
1690static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1691{
1692 struct sock *sk = sock->sk;
1693
1694 lock_sock(sk);
1695
1696 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1697 struct l2cap_conn_rsp rsp;
1698
1699 sk->sk_state = BT_CONFIG;
1700
1701 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1702 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1703 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1704 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1705 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1706 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1707
1708 release_sock(sk);
1709 return 0;
1710 }
1711
1712 release_sock(sk);
1713
1714 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1715}
1716
1717static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1718{
1719 struct sock *sk = sock->sk;
1720 struct l2cap_options opts;
1721 int len, err = 0;
1722 u32 opt;
1723
1724 BT_DBG("sk %p", sk);
1725
1726 lock_sock(sk);
1727
1728 switch (optname) {
1729 case L2CAP_OPTIONS:
1730 opts.imtu = l2cap_pi(sk)->imtu;
1731 opts.omtu = l2cap_pi(sk)->omtu;
1732 opts.flush_to = l2cap_pi(sk)->flush_to;
1733 opts.mode = l2cap_pi(sk)->mode;
1734 opts.fcs = l2cap_pi(sk)->fcs;
1735
1736 len = min_t(unsigned int, sizeof(opts), optlen);
1737 if (copy_from_user((char *) &opts, optval, len)) {
1738 err = -EFAULT;
1739 break;
1740 }
1741
1742 l2cap_pi(sk)->imtu = opts.imtu;
1743 l2cap_pi(sk)->omtu = opts.omtu;
1744 l2cap_pi(sk)->mode = opts.mode;
1745 l2cap_pi(sk)->fcs = opts.fcs;
1746 break;
1747
1748 case L2CAP_LM:
1749 if (get_user(opt, (u32 __user *) optval)) {
1750 err = -EFAULT;
1751 break;
1752 }
1753
1754 if (opt & L2CAP_LM_AUTH)
1755 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1756 if (opt & L2CAP_LM_ENCRYPT)
1757 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1758 if (opt & L2CAP_LM_SECURE)
1759 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1760
1761 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1762 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1763 break;
1764
1765 default:
1766 err = -ENOPROTOOPT;
1767 break;
1768 }
1769
1770 release_sock(sk);
1771 return err;
1772}
1773
1774static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1775{
1776 struct sock *sk = sock->sk;
1777 struct bt_security sec;
1778 int len, err = 0;
1779 u32 opt;
1780
1781 BT_DBG("sk %p", sk);
1782
1783 if (level == SOL_L2CAP)
1784 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1785
1786 if (level != SOL_BLUETOOTH)
1787 return -ENOPROTOOPT;
1788
1789 lock_sock(sk);
1790
1791 switch (optname) {
1792 case BT_SECURITY:
1793 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1794 err = -EINVAL;
1795 break;
1796 }
1797
1798 sec.level = BT_SECURITY_LOW;
1799
1800 len = min_t(unsigned int, sizeof(sec), optlen);
1801 if (copy_from_user((char *) &sec, optval, len)) {
1802 err = -EFAULT;
1803 break;
1804 }
1805
1806 if (sec.level < BT_SECURITY_LOW ||
1807 sec.level > BT_SECURITY_HIGH) {
1808 err = -EINVAL;
1809 break;
1810 }
1811
1812 l2cap_pi(sk)->sec_level = sec.level;
1813 break;
1814
1815 case BT_DEFER_SETUP:
1816 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1817 err = -EINVAL;
1818 break;
1819 }
1820
1821 if (get_user(opt, (u32 __user *) optval)) {
1822 err = -EFAULT;
1823 break;
1824 }
1825
1826 bt_sk(sk)->defer_setup = opt;
1827 break;
1828
1829 default:
1830 err = -ENOPROTOOPT;
1831 break;
1832 }
1833
1834 release_sock(sk);
1835 return err;
1836}
1837
1838static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1839{
1840 struct sock *sk = sock->sk;
1841 struct l2cap_options opts;
1842 struct l2cap_conninfo cinfo;
1843 int len, err = 0;
1844 u32 opt;
1845
1846 BT_DBG("sk %p", sk);
1847
1848 if (get_user(len, optlen))
1849 return -EFAULT;
1850
1851 lock_sock(sk);
1852
1853 switch (optname) {
1854 case L2CAP_OPTIONS:
1855 opts.imtu = l2cap_pi(sk)->imtu;
1856 opts.omtu = l2cap_pi(sk)->omtu;
1857 opts.flush_to = l2cap_pi(sk)->flush_to;
1858 opts.mode = l2cap_pi(sk)->mode;
1859 opts.fcs = l2cap_pi(sk)->fcs;
1860
1861 len = min_t(unsigned int, len, sizeof(opts));
1862 if (copy_to_user(optval, (char *) &opts, len))
1863 err = -EFAULT;
1864
1865 break;
1866
1867 case L2CAP_LM:
1868 switch (l2cap_pi(sk)->sec_level) {
1869 case BT_SECURITY_LOW:
1870 opt = L2CAP_LM_AUTH;
1871 break;
1872 case BT_SECURITY_MEDIUM:
1873 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1874 break;
1875 case BT_SECURITY_HIGH:
1876 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1877 L2CAP_LM_SECURE;
1878 break;
1879 default:
1880 opt = 0;
1881 break;
1882 }
1883
1884 if (l2cap_pi(sk)->role_switch)
1885 opt |= L2CAP_LM_MASTER;
1886
1887 if (l2cap_pi(sk)->force_reliable)
1888 opt |= L2CAP_LM_RELIABLE;
1889
1890 if (put_user(opt, (u32 __user *) optval))
1891 err = -EFAULT;
1892 break;
1893
1894 case L2CAP_CONNINFO:
1895 if (sk->sk_state != BT_CONNECTED &&
1896 !(sk->sk_state == BT_CONNECT2 &&
1897 bt_sk(sk)->defer_setup)) {
1898 err = -ENOTCONN;
1899 break;
1900 }
1901
1902 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1903 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1904
1905 len = min_t(unsigned int, len, sizeof(cinfo));
1906 if (copy_to_user(optval, (char *) &cinfo, len))
1907 err = -EFAULT;
1908
1909 break;
1910
1911 default:
1912 err = -ENOPROTOOPT;
1913 break;
1914 }
1915
1916 release_sock(sk);
1917 return err;
1918}
1919
1920static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1921{
1922 struct sock *sk = sock->sk;
1923 struct bt_security sec;
1924 int len, err = 0;
1925
1926 BT_DBG("sk %p", sk);
1927
1928 if (level == SOL_L2CAP)
1929 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1930
1931 if (level != SOL_BLUETOOTH)
1932 return -ENOPROTOOPT;
1933
1934 if (get_user(len, optlen))
1935 return -EFAULT;
1936
1937 lock_sock(sk);
1938
1939 switch (optname) {
1940 case BT_SECURITY:
1941 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1942 err = -EINVAL;
1943 break;
1944 }
1945
1946 sec.level = l2cap_pi(sk)->sec_level;
1947
1948 len = min_t(unsigned int, len, sizeof(sec));
1949 if (copy_to_user(optval, (char *) &sec, len))
1950 err = -EFAULT;
1951
1952 break;
1953
1954 case BT_DEFER_SETUP:
1955 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1956 err = -EINVAL;
1957 break;
1958 }
1959
1960 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1961 err = -EFAULT;
1962
1963 break;
1964
1965 default:
1966 err = -ENOPROTOOPT;
1967 break;
1968 }
1969
1970 release_sock(sk);
1971 return err;
1972}
1973
1974static int l2cap_sock_shutdown(struct socket *sock, int how)
1975{
1976 struct sock *sk = sock->sk;
1977 int err = 0;
1978
1979 BT_DBG("sock %p, sk %p", sock, sk);
1980
1981 if (!sk)
1982 return 0;
1983
1984 lock_sock(sk);
1985 if (!sk->sk_shutdown) {
1986 sk->sk_shutdown = SHUTDOWN_MASK;
1987 l2cap_sock_clear_timer(sk);
1988 __l2cap_sock_close(sk, 0);
1989
1990 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1991 err = bt_sock_wait_state(sk, BT_CLOSED,
1992 sk->sk_lingertime);
1993 }
1994 release_sock(sk);
1995 return err;
1996}
1997
1998static int l2cap_sock_release(struct socket *sock)
1999{
2000 struct sock *sk = sock->sk;
2001 int err;
2002
2003 BT_DBG("sock %p, sk %p", sock, sk);
2004
2005 if (!sk)
2006 return 0;
2007
2008 err = l2cap_sock_shutdown(sock, 2);
2009
2010 sock_orphan(sk);
2011 l2cap_sock_kill(sk);
2012 return err;
2013}
2014
2015static void l2cap_chan_ready(struct sock *sk)
2016{
2017 struct sock *parent = bt_sk(sk)->parent;
2018
2019 BT_DBG("sk %p, parent %p", sk, parent);
2020
2021 l2cap_pi(sk)->conf_state = 0;
2022 l2cap_sock_clear_timer(sk);
2023
2024 if (!parent) {
2025 /* Outgoing channel.
2026 * Wake up socket sleeping on connect.
2027 */
2028 sk->sk_state = BT_CONNECTED;
2029 sk->sk_state_change(sk);
2030 } else {
2031 /* Incoming channel.
2032 * Wake up socket sleeping on accept.
2033 */
2034 parent->sk_data_ready(parent, 0);
2035 }
2036}
2037
2038/* Copy frame to all raw sockets on that connection */
2039static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2040{
2041 struct l2cap_chan_list *l = &conn->chan_list;
2042 struct sk_buff *nskb;
2043 struct sock *sk;
2044
2045 BT_DBG("conn %p", conn);
2046
2047 read_lock(&l->lock);
2048 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2049 if (sk->sk_type != SOCK_RAW)
2050 continue;
2051
2052 /* Don't send frame to the socket it came from */
2053 if (skb->sk == sk)
2054 continue;
2055 nskb = skb_clone(skb, GFP_ATOMIC);
2056 if (!nskb)
2057 continue;
2058
2059 if (sock_queue_rcv_skb(sk, nskb))
2060 kfree_skb(nskb);
2061 }
2062 read_unlock(&l->lock);
2063}
2064
2065/* ---- L2CAP signalling commands ---- */
2066static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2067 u8 code, u8 ident, u16 dlen, void *data)
2068{
2069 struct sk_buff *skb, **frag;
2070 struct l2cap_cmd_hdr *cmd;
2071 struct l2cap_hdr *lh;
2072 int len, count;
2073
2074 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2075 conn, code, ident, dlen);
2076
2077 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2078 count = min_t(unsigned int, conn->mtu, len);
2079
2080 skb = bt_skb_alloc(count, GFP_ATOMIC);
2081 if (!skb)
2082 return NULL;
2083
2084 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2085 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2086 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2087
2088 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2089 cmd->code = code;
2090 cmd->ident = ident;
2091 cmd->len = cpu_to_le16(dlen);
2092
2093 if (dlen) {
2094 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2095 memcpy(skb_put(skb, count), data, count);
2096 data += count;
2097 }
2098
2099 len -= skb->len;
2100
2101 /* Continuation fragments (no L2CAP header) */
2102 frag = &skb_shinfo(skb)->frag_list;
2103 while (len) {
2104 count = min_t(unsigned int, conn->mtu, len);
2105
2106 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2107 if (!*frag)
2108 goto fail;
2109
2110 memcpy(skb_put(*frag, count), data, count);
2111
2112 len -= count;
2113 data += count;
2114
2115 frag = &(*frag)->next;
2116 }
2117
2118 return skb;
2119
2120fail:
2121 kfree_skb(skb);
2122 return NULL;
2123}
2124
2125static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2126{
2127 struct l2cap_conf_opt *opt = *ptr;
2128 int len;
2129
2130 len = L2CAP_CONF_OPT_SIZE + opt->len;
2131 *ptr += len;
2132
2133 *type = opt->type;
2134 *olen = opt->len;
2135
2136 switch (opt->len) {
2137 case 1:
2138 *val = *((u8 *) opt->val);
2139 break;
2140
2141 case 2:
2142 *val = __le16_to_cpu(*((__le16 *) opt->val));
2143 break;
2144
2145 case 4:
2146 *val = __le32_to_cpu(*((__le32 *) opt->val));
2147 break;
2148
2149 default:
2150 *val = (unsigned long) opt->val;
2151 break;
2152 }
2153
2154 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2155 return len;
2156}
2157
2158static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2159{
2160 struct l2cap_conf_opt *opt = *ptr;
2161
2162 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2163
2164 opt->type = type;
2165 opt->len = len;
2166
2167 switch (len) {
2168 case 1:
2169 *((u8 *) opt->val) = val;
2170 break;
2171
2172 case 2:
2173 *((__le16 *) opt->val) = cpu_to_le16(val);
2174 break;
2175
2176 case 4:
2177 *((__le32 *) opt->val) = cpu_to_le32(val);
2178 break;
2179
2180 default:
2181 memcpy(opt->val, (void *) val, len);
2182 break;
2183 }
2184
2185 *ptr += L2CAP_CONF_OPT_SIZE + len;
2186}
2187
2188static inline void l2cap_ertm_init(struct sock *sk)
2189{
2190 l2cap_pi(sk)->expected_ack_seq = 0;
2191 l2cap_pi(sk)->unacked_frames = 0;
2192 l2cap_pi(sk)->buffer_seq = 0;
2193 l2cap_pi(sk)->num_to_ack = 0;
2194
2195 setup_timer(&l2cap_pi(sk)->retrans_timer,
2196 l2cap_retrans_timeout, (unsigned long) sk);
2197 setup_timer(&l2cap_pi(sk)->monitor_timer,
2198 l2cap_monitor_timeout, (unsigned long) sk);
2199
2200 __skb_queue_head_init(SREJ_QUEUE(sk));
2201}
2202
2203static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2204{
2205 u32 local_feat_mask = l2cap_feat_mask;
2206 if (enable_ertm)
2207 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2208
2209 switch (mode) {
2210 case L2CAP_MODE_ERTM:
2211 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2212 case L2CAP_MODE_STREAMING:
2213 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2214 default:
2215 return 0x00;
2216 }
2217}
2218
2219static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2220{
2221 switch (mode) {
2222 case L2CAP_MODE_STREAMING:
2223 case L2CAP_MODE_ERTM:
2224 if (l2cap_mode_supported(mode, remote_feat_mask))
2225 return mode;
2226 /* fall through */
2227 default:
2228 return L2CAP_MODE_BASIC;
2229 }
2230}
2231
2232static int l2cap_build_conf_req(struct sock *sk, void *data)
2233{
2234 struct l2cap_pinfo *pi = l2cap_pi(sk);
2235 struct l2cap_conf_req *req = data;
2236 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2237 void *ptr = req->data;
2238
2239 BT_DBG("sk %p", sk);
2240
2241 if (pi->num_conf_req || pi->num_conf_rsp)
2242 goto done;
2243
2244 switch (pi->mode) {
2245 case L2CAP_MODE_STREAMING:
2246 case L2CAP_MODE_ERTM:
2247 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2248 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2249 l2cap_send_disconn_req(pi->conn, sk);
2250 break;
2251 default:
2252 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2253 break;
2254 }
2255
2256done:
2257 switch (pi->mode) {
2258 case L2CAP_MODE_BASIC:
2259 if (pi->imtu != L2CAP_DEFAULT_MTU)
2260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2261 break;
2262
2263 case L2CAP_MODE_ERTM:
2264 rfc.mode = L2CAP_MODE_ERTM;
2265 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2266 rfc.max_transmit = max_transmit;
2267 rfc.retrans_timeout = 0;
2268 rfc.monitor_timeout = 0;
2269 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2270
2271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2272 sizeof(rfc), (unsigned long) &rfc);
2273
2274 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2275 break;
2276
2277 if (pi->fcs == L2CAP_FCS_NONE ||
2278 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2279 pi->fcs = L2CAP_FCS_NONE;
2280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2281 }
2282 break;
2283
2284 case L2CAP_MODE_STREAMING:
2285 rfc.mode = L2CAP_MODE_STREAMING;
2286 rfc.txwin_size = 0;
2287 rfc.max_transmit = 0;
2288 rfc.retrans_timeout = 0;
2289 rfc.monitor_timeout = 0;
2290 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2291
2292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2293 sizeof(rfc), (unsigned long) &rfc);
2294
2295 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2296 break;
2297
2298 if (pi->fcs == L2CAP_FCS_NONE ||
2299 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2300 pi->fcs = L2CAP_FCS_NONE;
2301 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2302 }
2303 break;
2304 }
2305
2306 /* FIXME: Need actual value of the flush timeout */
2307 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2308 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2309
2310 req->dcid = cpu_to_le16(pi->dcid);
2311 req->flags = cpu_to_le16(0);
2312
2313 return ptr - data;
2314}
2315
2316static int l2cap_parse_conf_req(struct sock *sk, void *data)
2317{
2318 struct l2cap_pinfo *pi = l2cap_pi(sk);
2319 struct l2cap_conf_rsp *rsp = data;
2320 void *ptr = rsp->data;
2321 void *req = pi->conf_req;
2322 int len = pi->conf_len;
2323 int type, hint, olen;
2324 unsigned long val;
2325 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2326 u16 mtu = L2CAP_DEFAULT_MTU;
2327 u16 result = L2CAP_CONF_SUCCESS;
2328
2329 BT_DBG("sk %p", sk);
2330
2331 while (len >= L2CAP_CONF_OPT_SIZE) {
2332 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2333
2334 hint = type & L2CAP_CONF_HINT;
2335 type &= L2CAP_CONF_MASK;
2336
2337 switch (type) {
2338 case L2CAP_CONF_MTU:
2339 mtu = val;
2340 break;
2341
2342 case L2CAP_CONF_FLUSH_TO:
2343 pi->flush_to = val;
2344 break;
2345
2346 case L2CAP_CONF_QOS:
2347 break;
2348
2349 case L2CAP_CONF_RFC:
2350 if (olen == sizeof(rfc))
2351 memcpy(&rfc, (void *) val, olen);
2352 break;
2353
2354 case L2CAP_CONF_FCS:
2355 if (val == L2CAP_FCS_NONE)
2356 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2357
2358 break;
2359
2360 default:
2361 if (hint)
2362 break;
2363
2364 result = L2CAP_CONF_UNKNOWN;
2365 *((u8 *) ptr++) = type;
2366 break;
2367 }
2368 }
2369
2370 if (pi->num_conf_rsp || pi->num_conf_req)
2371 goto done;
2372
2373 switch (pi->mode) {
2374 case L2CAP_MODE_STREAMING:
2375 case L2CAP_MODE_ERTM:
2376 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2377 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2378 return -ECONNREFUSED;
2379 break;
2380 default:
2381 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2382 break;
2383 }
2384
2385done:
2386 if (pi->mode != rfc.mode) {
2387 result = L2CAP_CONF_UNACCEPT;
2388 rfc.mode = pi->mode;
2389
2390 if (pi->num_conf_rsp == 1)
2391 return -ECONNREFUSED;
2392
2393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2394 sizeof(rfc), (unsigned long) &rfc);
2395 }
2396
2397
2398 if (result == L2CAP_CONF_SUCCESS) {
2399 /* Configure output options and let the other side know
2400 * which ones we don't like. */
2401
2402 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2403 result = L2CAP_CONF_UNACCEPT;
2404 else {
2405 pi->omtu = mtu;
2406 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2407 }
2408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2409
2410 switch (rfc.mode) {
2411 case L2CAP_MODE_BASIC:
2412 pi->fcs = L2CAP_FCS_NONE;
2413 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2414 break;
2415
2416 case L2CAP_MODE_ERTM:
2417 pi->remote_tx_win = rfc.txwin_size;
2418 pi->remote_max_tx = rfc.max_transmit;
2419 pi->max_pdu_size = rfc.max_pdu_size;
2420
2421 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2422 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2423
2424 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2425
2426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2427 sizeof(rfc), (unsigned long) &rfc);
2428
2429 break;
2430
2431 case L2CAP_MODE_STREAMING:
2432 pi->remote_tx_win = rfc.txwin_size;
2433 pi->max_pdu_size = rfc.max_pdu_size;
2434
2435 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2436
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2438 sizeof(rfc), (unsigned long) &rfc);
2439
2440 break;
2441
2442 default:
2443 result = L2CAP_CONF_UNACCEPT;
2444
2445 memset(&rfc, 0, sizeof(rfc));
2446 rfc.mode = pi->mode;
2447 }
2448
2449 if (result == L2CAP_CONF_SUCCESS)
2450 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2451 }
2452 rsp->scid = cpu_to_le16(pi->dcid);
2453 rsp->result = cpu_to_le16(result);
2454 rsp->flags = cpu_to_le16(0x0000);
2455
2456 return ptr - data;
2457}
2458
2459static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2460{
2461 struct l2cap_pinfo *pi = l2cap_pi(sk);
2462 struct l2cap_conf_req *req = data;
2463 void *ptr = req->data;
2464 int type, olen;
2465 unsigned long val;
2466 struct l2cap_conf_rfc rfc;
2467
2468 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2469
2470 while (len >= L2CAP_CONF_OPT_SIZE) {
2471 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2472
2473 switch (type) {
2474 case L2CAP_CONF_MTU:
2475 if (val < L2CAP_DEFAULT_MIN_MTU) {
2476 *result = L2CAP_CONF_UNACCEPT;
2477 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2478 } else
2479 pi->omtu = val;
2480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2481 break;
2482
2483 case L2CAP_CONF_FLUSH_TO:
2484 pi->flush_to = val;
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2486 2, pi->flush_to);
2487 break;
2488
2489 case L2CAP_CONF_RFC:
2490 if (olen == sizeof(rfc))
2491 memcpy(&rfc, (void *)val, olen);
2492
2493 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2494 rfc.mode != pi->mode)
2495 return -ECONNREFUSED;
2496
2497 pi->mode = rfc.mode;
2498 pi->fcs = 0;
2499
2500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2501 sizeof(rfc), (unsigned long) &rfc);
2502 break;
2503 }
2504 }
2505
2506 if (*result == L2CAP_CONF_SUCCESS) {
2507 switch (rfc.mode) {
2508 case L2CAP_MODE_ERTM:
2509 pi->remote_tx_win = rfc.txwin_size;
2510 pi->retrans_timeout = rfc.retrans_timeout;
2511 pi->monitor_timeout = rfc.monitor_timeout;
2512 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2513 break;
2514 case L2CAP_MODE_STREAMING:
2515 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2516 break;
2517 }
2518 }
2519
2520 req->dcid = cpu_to_le16(pi->dcid);
2521 req->flags = cpu_to_le16(0x0000);
2522
2523 return ptr - data;
2524}
2525
2526static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2527{
2528 struct l2cap_conf_rsp *rsp = data;
2529 void *ptr = rsp->data;
2530
2531 BT_DBG("sk %p", sk);
2532
2533 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2534 rsp->result = cpu_to_le16(result);
2535 rsp->flags = cpu_to_le16(flags);
2536
2537 return ptr - data;
2538}
2539
2540static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2541{
2542 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2543
2544 if (rej->reason != 0x0000)
2545 return 0;
2546
2547 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2548 cmd->ident == conn->info_ident) {
2549 del_timer(&conn->info_timer);
2550
2551 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2552 conn->info_ident = 0;
2553
2554 l2cap_conn_start(conn);
2555 }
2556
2557 return 0;
2558}
2559
2560static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2561{
2562 struct l2cap_chan_list *list = &conn->chan_list;
2563 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2564 struct l2cap_conn_rsp rsp;
2565 struct sock *sk, *parent;
2566 int result, status = L2CAP_CS_NO_INFO;
2567
2568 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2569 __le16 psm = req->psm;
2570
2571 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2572
2573 /* Check if we have socket listening on psm */
2574 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2575 if (!parent) {
2576 result = L2CAP_CR_BAD_PSM;
2577 goto sendresp;
2578 }
2579
2580 /* Check if the ACL is secure enough (if not SDP) */
2581 if (psm != cpu_to_le16(0x0001) &&
2582 !hci_conn_check_link_mode(conn->hcon)) {
2583 conn->disc_reason = 0x05;
2584 result = L2CAP_CR_SEC_BLOCK;
2585 goto response;
2586 }
2587
2588 result = L2CAP_CR_NO_MEM;
2589
2590 /* Check for backlog size */
2591 if (sk_acceptq_is_full(parent)) {
2592 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2593 goto response;
2594 }
2595
2596 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2597 if (!sk)
2598 goto response;
2599
2600 write_lock_bh(&list->lock);
2601
2602 /* Check if we already have channel with that dcid */
2603 if (__l2cap_get_chan_by_dcid(list, scid)) {
2604 write_unlock_bh(&list->lock);
2605 sock_set_flag(sk, SOCK_ZAPPED);
2606 l2cap_sock_kill(sk);
2607 goto response;
2608 }
2609
2610 hci_conn_hold(conn->hcon);
2611
2612 l2cap_sock_init(sk, parent);
2613 bacpy(&bt_sk(sk)->src, conn->src);
2614 bacpy(&bt_sk(sk)->dst, conn->dst);
2615 l2cap_pi(sk)->psm = psm;
2616 l2cap_pi(sk)->dcid = scid;
2617
2618 __l2cap_chan_add(conn, sk, parent);
2619 dcid = l2cap_pi(sk)->scid;
2620
2621 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2622
2623 l2cap_pi(sk)->ident = cmd->ident;
2624
2625 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2626 if (l2cap_check_security(sk)) {
2627 if (bt_sk(sk)->defer_setup) {
2628 sk->sk_state = BT_CONNECT2;
2629 result = L2CAP_CR_PEND;
2630 status = L2CAP_CS_AUTHOR_PEND;
2631 parent->sk_data_ready(parent, 0);
2632 } else {
2633 sk->sk_state = BT_CONFIG;
2634 result = L2CAP_CR_SUCCESS;
2635 status = L2CAP_CS_NO_INFO;
2636 }
2637 } else {
2638 sk->sk_state = BT_CONNECT2;
2639 result = L2CAP_CR_PEND;
2640 status = L2CAP_CS_AUTHEN_PEND;
2641 }
2642 } else {
2643 sk->sk_state = BT_CONNECT2;
2644 result = L2CAP_CR_PEND;
2645 status = L2CAP_CS_NO_INFO;
2646 }
2647
2648 write_unlock_bh(&list->lock);
2649
2650response:
2651 bh_unlock_sock(parent);
2652
2653sendresp:
2654 rsp.scid = cpu_to_le16(scid);
2655 rsp.dcid = cpu_to_le16(dcid);
2656 rsp.result = cpu_to_le16(result);
2657 rsp.status = cpu_to_le16(status);
2658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2659
2660 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2661 struct l2cap_info_req info;
2662 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2663
2664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2665 conn->info_ident = l2cap_get_ident(conn);
2666
2667 mod_timer(&conn->info_timer, jiffies +
2668 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2669
2670 l2cap_send_cmd(conn, conn->info_ident,
2671 L2CAP_INFO_REQ, sizeof(info), &info);
2672 }
2673
2674 return 0;
2675}
2676
2677static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2678{
2679 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2680 u16 scid, dcid, result, status;
2681 struct sock *sk;
2682 u8 req[128];
2683
2684 scid = __le16_to_cpu(rsp->scid);
2685 dcid = __le16_to_cpu(rsp->dcid);
2686 result = __le16_to_cpu(rsp->result);
2687 status = __le16_to_cpu(rsp->status);
2688
2689 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2690
2691 if (scid) {
2692 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2693 if (!sk)
2694 return 0;
2695 } else {
2696 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2697 if (!sk)
2698 return 0;
2699 }
2700
2701 switch (result) {
2702 case L2CAP_CR_SUCCESS:
2703 sk->sk_state = BT_CONFIG;
2704 l2cap_pi(sk)->ident = 0;
2705 l2cap_pi(sk)->dcid = dcid;
2706 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2707
2708 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2709
2710 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2711 l2cap_build_conf_req(sk, req), req);
2712 l2cap_pi(sk)->num_conf_req++;
2713 break;
2714
2715 case L2CAP_CR_PEND:
2716 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2717 break;
2718
2719 default:
2720 l2cap_chan_del(sk, ECONNREFUSED);
2721 break;
2722 }
2723
2724 bh_unlock_sock(sk);
2725 return 0;
2726}
2727
2728static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2729{
2730 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2731 u16 dcid, flags;
2732 u8 rsp[64];
2733 struct sock *sk;
2734 int len;
2735
2736 dcid = __le16_to_cpu(req->dcid);
2737 flags = __le16_to_cpu(req->flags);
2738
2739 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2740
2741 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2742 if (!sk)
2743 return -ENOENT;
2744
2745 if (sk->sk_state == BT_DISCONN)
2746 goto unlock;
2747
2748 /* Reject if config buffer is too small. */
2749 len = cmd_len - sizeof(*req);
2750 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2751 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2752 l2cap_build_conf_rsp(sk, rsp,
2753 L2CAP_CONF_REJECT, flags), rsp);
2754 goto unlock;
2755 }
2756
2757 /* Store config. */
2758 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2759 l2cap_pi(sk)->conf_len += len;
2760
2761 if (flags & 0x0001) {
2762 /* Incomplete config. Send empty response. */
2763 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2764 l2cap_build_conf_rsp(sk, rsp,
2765 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2766 goto unlock;
2767 }
2768
2769 /* Complete config. */
2770 len = l2cap_parse_conf_req(sk, rsp);
2771 if (len < 0) {
2772 l2cap_send_disconn_req(conn, sk);
2773 goto unlock;
2774 }
2775
2776 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2777 l2cap_pi(sk)->num_conf_rsp++;
2778
2779 /* Reset config buffer. */
2780 l2cap_pi(sk)->conf_len = 0;
2781
2782 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2783 goto unlock;
2784
2785 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2786 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2787 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2788 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2789
2790 sk->sk_state = BT_CONNECTED;
2791
2792 l2cap_pi(sk)->next_tx_seq = 0;
2793 l2cap_pi(sk)->expected_tx_seq = 0;
2794 __skb_queue_head_init(TX_QUEUE(sk));
2795 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2796 l2cap_ertm_init(sk);
2797
2798 l2cap_chan_ready(sk);
2799 goto unlock;
2800 }
2801
2802 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2803 u8 buf[64];
2804 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2805 l2cap_build_conf_req(sk, buf), buf);
2806 l2cap_pi(sk)->num_conf_req++;
2807 }
2808
2809unlock:
2810 bh_unlock_sock(sk);
2811 return 0;
2812}
2813
2814static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2815{
2816 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2817 u16 scid, flags, result;
2818 struct sock *sk;
2819
2820 scid = __le16_to_cpu(rsp->scid);
2821 flags = __le16_to_cpu(rsp->flags);
2822 result = __le16_to_cpu(rsp->result);
2823
2824 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2825 scid, flags, result);
2826
2827 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2828 if (!sk)
2829 return 0;
2830
2831 switch (result) {
2832 case L2CAP_CONF_SUCCESS:
2833 break;
2834
2835 case L2CAP_CONF_UNACCEPT:
2836 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2837 int len = cmd->len - sizeof(*rsp);
2838 char req[64];
2839
2840 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2841 l2cap_send_disconn_req(conn, sk);
2842 goto done;
2843 }
2844
2845 /* throw out any old stored conf requests */
2846 result = L2CAP_CONF_SUCCESS;
2847 len = l2cap_parse_conf_rsp(sk, rsp->data,
2848 len, req, &result);
2849 if (len < 0) {
2850 l2cap_send_disconn_req(conn, sk);
2851 goto done;
2852 }
2853
2854 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2855 L2CAP_CONF_REQ, len, req);
2856 l2cap_pi(sk)->num_conf_req++;
2857 if (result != L2CAP_CONF_SUCCESS)
2858 goto done;
2859 break;
2860 }
2861
2862 default:
2863 sk->sk_state = BT_DISCONN;
2864 sk->sk_err = ECONNRESET;
2865 l2cap_sock_set_timer(sk, HZ * 5);
2866 l2cap_send_disconn_req(conn, sk);
2867 goto done;
2868 }
2869
2870 if (flags & 0x01)
2871 goto done;
2872
2873 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2874
2875 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2876 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2877 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2878 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2879
2880 sk->sk_state = BT_CONNECTED;
2881 l2cap_pi(sk)->next_tx_seq = 0;
2882 l2cap_pi(sk)->expected_tx_seq = 0;
2883 __skb_queue_head_init(TX_QUEUE(sk));
2884 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2885 l2cap_ertm_init(sk);
2886
2887 l2cap_chan_ready(sk);
2888 }
2889
2890done:
2891 bh_unlock_sock(sk);
2892 return 0;
2893}
2894
2895static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2896{
2897 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2898 struct l2cap_disconn_rsp rsp;
2899 u16 dcid, scid;
2900 struct sock *sk;
2901
2902 scid = __le16_to_cpu(req->scid);
2903 dcid = __le16_to_cpu(req->dcid);
2904
2905 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2906
2907 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2908 if (!sk)
2909 return 0;
2910
2911 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2913 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2914
2915 sk->sk_shutdown = SHUTDOWN_MASK;
2916
2917 skb_queue_purge(TX_QUEUE(sk));
2918
2919 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2920 skb_queue_purge(SREJ_QUEUE(sk));
2921 del_timer(&l2cap_pi(sk)->retrans_timer);
2922 del_timer(&l2cap_pi(sk)->monitor_timer);
2923 }
2924
2925 l2cap_chan_del(sk, ECONNRESET);
2926 bh_unlock_sock(sk);
2927
2928 l2cap_sock_kill(sk);
2929 return 0;
2930}
2931
2932static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2933{
2934 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2935 u16 dcid, scid;
2936 struct sock *sk;
2937
2938 scid = __le16_to_cpu(rsp->scid);
2939 dcid = __le16_to_cpu(rsp->dcid);
2940
2941 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2942
2943 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2944 if (!sk)
2945 return 0;
2946
2947 skb_queue_purge(TX_QUEUE(sk));
2948
2949 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2950 skb_queue_purge(SREJ_QUEUE(sk));
2951 del_timer(&l2cap_pi(sk)->retrans_timer);
2952 del_timer(&l2cap_pi(sk)->monitor_timer);
2953 }
2954
2955 l2cap_chan_del(sk, 0);
2956 bh_unlock_sock(sk);
2957
2958 l2cap_sock_kill(sk);
2959 return 0;
2960}
2961
2962static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2963{
2964 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2965 u16 type;
2966
2967 type = __le16_to_cpu(req->type);
2968
2969 BT_DBG("type 0x%4.4x", type);
2970
2971 if (type == L2CAP_IT_FEAT_MASK) {
2972 u8 buf[8];
2973 u32 feat_mask = l2cap_feat_mask;
2974 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2975 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2976 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2977 if (enable_ertm)
2978 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2979 | L2CAP_FEAT_FCS;
2980 put_unaligned_le32(feat_mask, rsp->data);
2981 l2cap_send_cmd(conn, cmd->ident,
2982 L2CAP_INFO_RSP, sizeof(buf), buf);
2983 } else if (type == L2CAP_IT_FIXED_CHAN) {
2984 u8 buf[12];
2985 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2986 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2987 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2988 memcpy(buf + 4, l2cap_fixed_chan, 8);
2989 l2cap_send_cmd(conn, cmd->ident,
2990 L2CAP_INFO_RSP, sizeof(buf), buf);
2991 } else {
2992 struct l2cap_info_rsp rsp;
2993 rsp.type = cpu_to_le16(type);
2994 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2995 l2cap_send_cmd(conn, cmd->ident,
2996 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2997 }
2998
2999 return 0;
3000}
3001
3002static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3003{
3004 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3005 u16 type, result;
3006
3007 type = __le16_to_cpu(rsp->type);
3008 result = __le16_to_cpu(rsp->result);
3009
3010 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3011
3012 del_timer(&conn->info_timer);
3013
3014 if (type == L2CAP_IT_FEAT_MASK) {
3015 conn->feat_mask = get_unaligned_le32(rsp->data);
3016
3017 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3018 struct l2cap_info_req req;
3019 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3020
3021 conn->info_ident = l2cap_get_ident(conn);
3022
3023 l2cap_send_cmd(conn, conn->info_ident,
3024 L2CAP_INFO_REQ, sizeof(req), &req);
3025 } else {
3026 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3027 conn->info_ident = 0;
3028
3029 l2cap_conn_start(conn);
3030 }
3031 } else if (type == L2CAP_IT_FIXED_CHAN) {
3032 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3033 conn->info_ident = 0;
3034
3035 l2cap_conn_start(conn);
3036 }
3037
3038 return 0;
3039}
3040
3041static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3042{
3043 u8 *data = skb->data;
3044 int len = skb->len;
3045 struct l2cap_cmd_hdr cmd;
3046 int err = 0;
3047
3048 l2cap_raw_recv(conn, skb);
3049
3050 while (len >= L2CAP_CMD_HDR_SIZE) {
3051 u16 cmd_len;
3052 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3053 data += L2CAP_CMD_HDR_SIZE;
3054 len -= L2CAP_CMD_HDR_SIZE;
3055
3056 cmd_len = le16_to_cpu(cmd.len);
3057
3058 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3059
3060 if (cmd_len > len || !cmd.ident) {
3061 BT_DBG("corrupted command");
3062 break;
3063 }
3064
3065 switch (cmd.code) {
3066 case L2CAP_COMMAND_REJ:
3067 l2cap_command_rej(conn, &cmd, data);
3068 break;
3069
3070 case L2CAP_CONN_REQ:
3071 err = l2cap_connect_req(conn, &cmd, data);
3072 break;
3073
3074 case L2CAP_CONN_RSP:
3075 err = l2cap_connect_rsp(conn, &cmd, data);
3076 break;
3077
3078 case L2CAP_CONF_REQ:
3079 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3080 break;
3081
3082 case L2CAP_CONF_RSP:
3083 err = l2cap_config_rsp(conn, &cmd, data);
3084 break;
3085
3086 case L2CAP_DISCONN_REQ:
3087 err = l2cap_disconnect_req(conn, &cmd, data);
3088 break;
3089
3090 case L2CAP_DISCONN_RSP:
3091 err = l2cap_disconnect_rsp(conn, &cmd, data);
3092 break;
3093
3094 case L2CAP_ECHO_REQ:
3095 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3096 break;
3097
3098 case L2CAP_ECHO_RSP:
3099 break;
3100
3101 case L2CAP_INFO_REQ:
3102 err = l2cap_information_req(conn, &cmd, data);
3103 break;
3104
3105 case L2CAP_INFO_RSP:
3106 err = l2cap_information_rsp(conn, &cmd, data);
3107 break;
3108
3109 default:
3110 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3111 err = -EINVAL;
3112 break;
3113 }
3114
3115 if (err) {
3116 struct l2cap_cmd_rej rej;
3117 BT_DBG("error %d", err);
3118
3119 /* FIXME: Map err to a valid reason */
3120 rej.reason = cpu_to_le16(0);
3121 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3122 }
3123
3124 data += cmd_len;
3125 len -= cmd_len;
3126 }
3127
3128 kfree_skb(skb);
3129}
3130
3131static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3132{
3133 u16 our_fcs, rcv_fcs;
3134 int hdr_size = L2CAP_HDR_SIZE + 2;
3135
3136 if (pi->fcs == L2CAP_FCS_CRC16) {
3137 skb_trim(skb, skb->len - 2);
3138 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3139 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3140
3141 if (our_fcs != rcv_fcs)
3142 return -EINVAL;
3143 }
3144 return 0;
3145}
3146
3147static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3148{
3149 struct sk_buff *next_skb;
3150
3151 bt_cb(skb)->tx_seq = tx_seq;
3152 bt_cb(skb)->sar = sar;
3153
3154 next_skb = skb_peek(SREJ_QUEUE(sk));
3155 if (!next_skb) {
3156 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3157 return;
3158 }
3159
3160 do {
3161 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3162 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3163 return;
3164 }
3165
3166 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3167 break;
3168
3169 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3170
3171 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3172}
3173
3174static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3175{
3176 struct l2cap_pinfo *pi = l2cap_pi(sk);
3177 struct sk_buff *_skb;
3178 int err = -EINVAL;
3179
3180 switch (control & L2CAP_CTRL_SAR) {
3181 case L2CAP_SDU_UNSEGMENTED:
3182 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3183 kfree_skb(pi->sdu);
3184 break;
3185 }
3186
3187 err = sock_queue_rcv_skb(sk, skb);
3188 if (!err)
3189 return 0;
3190
3191 break;
3192
3193 case L2CAP_SDU_START:
3194 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3195 kfree_skb(pi->sdu);
3196 break;
3197 }
3198
3199 pi->sdu_len = get_unaligned_le16(skb->data);
3200 skb_pull(skb, 2);
3201
3202 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3203 if (!pi->sdu) {
3204 err = -ENOMEM;
3205 break;
3206 }
3207
3208 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3209
3210 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3211 pi->partial_sdu_len = skb->len;
3212 err = 0;
3213 break;
3214
3215 case L2CAP_SDU_CONTINUE:
3216 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3217 break;
3218
3219 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3220
3221 pi->partial_sdu_len += skb->len;
3222 if (pi->partial_sdu_len > pi->sdu_len)
3223 kfree_skb(pi->sdu);
3224 else
3225 err = 0;
3226
3227 break;
3228
3229 case L2CAP_SDU_END:
3230 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3231 break;
3232
3233 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3234
3235 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3236 pi->partial_sdu_len += skb->len;
3237
3238 if (pi->partial_sdu_len == pi->sdu_len) {
3239 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3240 err = sock_queue_rcv_skb(sk, _skb);
3241 if (err < 0)
3242 kfree_skb(_skb);
3243 }
3244 kfree_skb(pi->sdu);
3245 err = 0;
3246
3247 break;
3248 }
3249
3250 kfree_skb(skb);
3251 return err;
3252}
3253
3254static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3255{
3256 struct sk_buff *skb;
3257 u16 control = 0;
3258
3259 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3260 if (bt_cb(skb)->tx_seq != tx_seq)
3261 break;
3262
3263 skb = skb_dequeue(SREJ_QUEUE(sk));
3264 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3265 l2cap_sar_reassembly_sdu(sk, skb, control);
3266 l2cap_pi(sk)->buffer_seq_srej =
3267 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3268 tx_seq++;
3269 }
3270}
3271
3272static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3273{
3274 struct l2cap_pinfo *pi = l2cap_pi(sk);
3275 struct srej_list *l, *tmp;
3276 u16 control;
3277
3278 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3279 if (l->tx_seq == tx_seq) {
3280 list_del(&l->list);
3281 kfree(l);
3282 return;
3283 }
3284 control = L2CAP_SUPER_SELECT_REJECT;
3285 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3286 l2cap_send_sframe(pi, control);
3287 list_del(&l->list);
3288 list_add_tail(&l->list, SREJ_LIST(sk));
3289 }
3290}
3291
3292static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3293{
3294 struct l2cap_pinfo *pi = l2cap_pi(sk);
3295 struct srej_list *new;
3296 u16 control;
3297
3298 while (tx_seq != pi->expected_tx_seq) {
3299 control = L2CAP_SUPER_SELECT_REJECT;
3300 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3301 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3302 control |= L2CAP_CTRL_POLL;
3303 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3304 }
3305 l2cap_send_sframe(pi, control);
3306
3307 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3308 new->tx_seq = pi->expected_tx_seq++;
3309 list_add_tail(&new->list, SREJ_LIST(sk));
3310 }
3311 pi->expected_tx_seq++;
3312}
3313
3314static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3315{
3316 struct l2cap_pinfo *pi = l2cap_pi(sk);
3317 u8 tx_seq = __get_txseq(rx_control);
3318 u8 req_seq = __get_reqseq(rx_control);
3319 u16 tx_control = 0;
3320 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3321 int err = 0;
3322
3323 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3324
3325 pi->expected_ack_seq = req_seq;
3326 l2cap_drop_acked_frames(sk);
3327
3328 if (tx_seq == pi->expected_tx_seq)
3329 goto expected;
3330
3331 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3332 struct srej_list *first;
3333
3334 first = list_first_entry(SREJ_LIST(sk),
3335 struct srej_list, list);
3336 if (tx_seq == first->tx_seq) {
3337 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3338 l2cap_check_srej_gap(sk, tx_seq);
3339
3340 list_del(&first->list);
3341 kfree(first);
3342
3343 if (list_empty(SREJ_LIST(sk))) {
3344 pi->buffer_seq = pi->buffer_seq_srej;
3345 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3346 }
3347 } else {
3348 struct srej_list *l;
3349 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3350
3351 list_for_each_entry(l, SREJ_LIST(sk), list) {
3352 if (l->tx_seq == tx_seq) {
3353 l2cap_resend_srejframe(sk, tx_seq);
3354 return 0;
3355 }
3356 }
3357 l2cap_send_srejframe(sk, tx_seq);
3358 }
3359 } else {
3360 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3361
3362 INIT_LIST_HEAD(SREJ_LIST(sk));
3363 pi->buffer_seq_srej = pi->buffer_seq;
3364
3365 __skb_queue_head_init(SREJ_QUEUE(sk));
3366 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3367
3368 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3369
3370 l2cap_send_srejframe(sk, tx_seq);
3371 }
3372 return 0;
3373
3374expected:
3375 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3376
3377 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3378 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3379 return 0;
3380 }
3381
3382 if (rx_control & L2CAP_CTRL_FINAL) {
3383 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3384 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3385 else {
3386 sk->sk_send_head = TX_QUEUE(sk)->next;
3387 pi->next_tx_seq = pi->expected_ack_seq;
3388 l2cap_ertm_send(sk);
3389 }
3390 }
3391
3392 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3393
3394 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3395 if (err < 0)
3396 return err;
3397
3398 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3399 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3400 tx_control |= L2CAP_SUPER_RCV_READY;
3401 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3402 l2cap_send_sframe(pi, tx_control);
3403 }
3404 return 0;
3405}
3406
3407static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3408{
3409 struct l2cap_pinfo *pi = l2cap_pi(sk);
3410 u8 tx_seq = __get_reqseq(rx_control);
3411
3412 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3413
3414 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3415 case L2CAP_SUPER_RCV_READY:
3416 if (rx_control & L2CAP_CTRL_POLL) {
3417 u16 control = L2CAP_CTRL_FINAL;
3418 control |= L2CAP_SUPER_RCV_READY |
3419 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3420 l2cap_send_sframe(l2cap_pi(sk), control);
3421 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3422
3423 } else if (rx_control & L2CAP_CTRL_FINAL) {
3424 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3425 pi->expected_ack_seq = tx_seq;
3426 l2cap_drop_acked_frames(sk);
3427
3428 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3429 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3430 else {
3431 sk->sk_send_head = TX_QUEUE(sk)->next;
3432 pi->next_tx_seq = pi->expected_ack_seq;
3433 l2cap_ertm_send(sk);
3434 }
3435
3436 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3437 break;
3438
3439 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3440 del_timer(&pi->monitor_timer);
3441
3442 if (pi->unacked_frames > 0)
3443 __mod_retrans_timer();
3444 } else {
3445 pi->expected_ack_seq = tx_seq;
3446 l2cap_drop_acked_frames(sk);
3447
3448 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3449 (pi->unacked_frames > 0))
3450 __mod_retrans_timer();
3451
3452 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3453 l2cap_ertm_send(sk);
3454 }
3455 break;
3456
3457 case L2CAP_SUPER_REJECT:
3458 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3459
3460 pi->expected_ack_seq = __get_reqseq(rx_control);
3461 l2cap_drop_acked_frames(sk);
3462
3463 if (rx_control & L2CAP_CTRL_FINAL) {
3464 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3465 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3466 else {
3467 sk->sk_send_head = TX_QUEUE(sk)->next;
3468 pi->next_tx_seq = pi->expected_ack_seq;
3469 l2cap_ertm_send(sk);
3470 }
3471 } else {
3472 sk->sk_send_head = TX_QUEUE(sk)->next;
3473 pi->next_tx_seq = pi->expected_ack_seq;
3474 l2cap_ertm_send(sk);
3475
3476 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3477 pi->srej_save_reqseq = tx_seq;
3478 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3479 }
3480 }
3481
3482 break;
3483
3484 case L2CAP_SUPER_SELECT_REJECT:
3485 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3486
3487 if (rx_control & L2CAP_CTRL_POLL) {
3488 pi->expected_ack_seq = tx_seq;
3489 l2cap_drop_acked_frames(sk);
3490 l2cap_retransmit_frame(sk, tx_seq);
3491 l2cap_ertm_send(sk);
3492 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3493 pi->srej_save_reqseq = tx_seq;
3494 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3495 }
3496 } else if (rx_control & L2CAP_CTRL_FINAL) {
3497 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3498 pi->srej_save_reqseq == tx_seq)
3499 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3500 else
3501 l2cap_retransmit_frame(sk, tx_seq);
3502 }
3503 else {
3504 l2cap_retransmit_frame(sk, tx_seq);
3505 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3506 pi->srej_save_reqseq = tx_seq;
3507 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3508 }
3509 }
3510 break;
3511
3512 case L2CAP_SUPER_RCV_NOT_READY:
3513 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3514 pi->expected_ack_seq = tx_seq;
3515 l2cap_drop_acked_frames(sk);
3516
3517 del_timer(&l2cap_pi(sk)->retrans_timer);
3518 if (rx_control & L2CAP_CTRL_POLL) {
3519 u16 control = L2CAP_CTRL_FINAL;
3520 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3521 }
3522 break;
3523 }
3524
3525 kfree_skb(skb);
3526 return 0;
3527}
3528
3529static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3530{
3531 struct sock *sk;
3532 struct l2cap_pinfo *pi;
3533 u16 control, len;
3534 u8 tx_seq;
3535
3536 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3537 if (!sk) {
3538 BT_DBG("unknown cid 0x%4.4x", cid);
3539 goto drop;
3540 }
3541
3542 pi = l2cap_pi(sk);
3543
3544 BT_DBG("sk %p, len %d", sk, skb->len);
3545
3546 if (sk->sk_state != BT_CONNECTED)
3547 goto drop;
3548
3549 switch (pi->mode) {
3550 case L2CAP_MODE_BASIC:
3551 /* If socket recv buffers overflows we drop data here
3552 * which is *bad* because L2CAP has to be reliable.
3553 * But we don't have any other choice. L2CAP doesn't
3554 * provide flow control mechanism. */
3555
3556 if (pi->imtu < skb->len)
3557 goto drop;
3558
3559 if (!sock_queue_rcv_skb(sk, skb))
3560 goto done;
3561 break;
3562
3563 case L2CAP_MODE_ERTM:
3564 control = get_unaligned_le16(skb->data);
3565 skb_pull(skb, 2);
3566 len = skb->len;
3567
3568 if (__is_sar_start(control))
3569 len -= 2;
3570
3571 if (pi->fcs == L2CAP_FCS_CRC16)
3572 len -= 2;
3573
3574 /*
3575 * We can just drop the corrupted I-frame here.
3576 * Receiver will miss it and start proper recovery
3577 * procedures and ask retransmission.
3578 */
3579 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3580 goto drop;
3581
3582 if (l2cap_check_fcs(pi, skb))
3583 goto drop;
3584
3585 if (__is_iframe(control))
3586 l2cap_data_channel_iframe(sk, control, skb);
3587 else
3588 l2cap_data_channel_sframe(sk, control, skb);
3589
3590 goto done;
3591
3592 case L2CAP_MODE_STREAMING:
3593 control = get_unaligned_le16(skb->data);
3594 skb_pull(skb, 2);
3595 len = skb->len;
3596
3597 if (__is_sar_start(control))
3598 len -= 2;
3599
3600 if (pi->fcs == L2CAP_FCS_CRC16)
3601 len -= 2;
3602
3603 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3604 goto drop;
3605
3606 if (l2cap_check_fcs(pi, skb))
3607 goto drop;
3608
3609 tx_seq = __get_txseq(control);
3610
3611 if (pi->expected_tx_seq == tx_seq)
3612 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3613 else
3614 pi->expected_tx_seq = (tx_seq + 1) % 64;
3615
3616 l2cap_sar_reassembly_sdu(sk, skb, control);
3617
3618 goto done;
3619
3620 default:
3621 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3622 break;
3623 }
3624
3625drop:
3626 kfree_skb(skb);
3627
3628done:
3629 if (sk)
3630 bh_unlock_sock(sk);
3631
3632 return 0;
3633}
3634
3635static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3636{
3637 struct sock *sk;
3638
3639 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3640 if (!sk)
3641 goto drop;
3642
3643 BT_DBG("sk %p, len %d", sk, skb->len);
3644
3645 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3646 goto drop;
3647
3648 if (l2cap_pi(sk)->imtu < skb->len)
3649 goto drop;
3650
3651 if (!sock_queue_rcv_skb(sk, skb))
3652 goto done;
3653
3654drop:
3655 kfree_skb(skb);
3656
3657done:
3658 if (sk)
3659 bh_unlock_sock(sk);
3660 return 0;
3661}
3662
3663static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3664{
3665 struct l2cap_hdr *lh = (void *) skb->data;
3666 u16 cid, len;
3667 __le16 psm;
3668
3669 skb_pull(skb, L2CAP_HDR_SIZE);
3670 cid = __le16_to_cpu(lh->cid);
3671 len = __le16_to_cpu(lh->len);
3672
3673 if (len != skb->len) {
3674 kfree_skb(skb);
3675 return;
3676 }
3677
3678 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3679
3680 switch (cid) {
3681 case L2CAP_CID_SIGNALING:
3682 l2cap_sig_channel(conn, skb);
3683 break;
3684
3685 case L2CAP_CID_CONN_LESS:
3686 psm = get_unaligned_le16(skb->data);
3687 skb_pull(skb, 2);
3688 l2cap_conless_channel(conn, psm, skb);
3689 break;
3690
3691 default:
3692 l2cap_data_channel(conn, cid, skb);
3693 break;
3694 }
3695}
3696
3697/* ---- L2CAP interface with lower layer (HCI) ---- */
3698
3699static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3700{
3701 int exact = 0, lm1 = 0, lm2 = 0;
3702 register struct sock *sk;
3703 struct hlist_node *node;
3704
3705 if (type != ACL_LINK)
3706 return 0;
3707
3708 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3709
3710 /* Find listening sockets and check their link_mode */
3711 read_lock(&l2cap_sk_list.lock);
3712 sk_for_each(sk, node, &l2cap_sk_list.head) {
3713 if (sk->sk_state != BT_LISTEN)
3714 continue;
3715
3716 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3717 lm1 |= HCI_LM_ACCEPT;
3718 if (l2cap_pi(sk)->role_switch)
3719 lm1 |= HCI_LM_MASTER;
3720 exact++;
3721 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3722 lm2 |= HCI_LM_ACCEPT;
3723 if (l2cap_pi(sk)->role_switch)
3724 lm2 |= HCI_LM_MASTER;
3725 }
3726 }
3727 read_unlock(&l2cap_sk_list.lock);
3728
3729 return exact ? lm1 : lm2;
3730}
3731
3732static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3733{
3734 struct l2cap_conn *conn;
3735
3736 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3737
3738 if (hcon->type != ACL_LINK)
3739 return 0;
3740
3741 if (!status) {
3742 conn = l2cap_conn_add(hcon, status);
3743 if (conn)
3744 l2cap_conn_ready(conn);
3745 } else
3746 l2cap_conn_del(hcon, bt_err(status));
3747
3748 return 0;
3749}
3750
3751static int l2cap_disconn_ind(struct hci_conn *hcon)
3752{
3753 struct l2cap_conn *conn = hcon->l2cap_data;
3754
3755 BT_DBG("hcon %p", hcon);
3756
3757 if (hcon->type != ACL_LINK || !conn)
3758 return 0x13;
3759
3760 return conn->disc_reason;
3761}
3762
3763static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3764{
3765 BT_DBG("hcon %p reason %d", hcon, reason);
3766
3767 if (hcon->type != ACL_LINK)
3768 return 0;
3769
3770 l2cap_conn_del(hcon, bt_err(reason));
3771
3772 return 0;
3773}
3774
3775static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3776{
3777 if (sk->sk_type != SOCK_SEQPACKET)
3778 return;
3779
3780 if (encrypt == 0x00) {
3781 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3782 l2cap_sock_clear_timer(sk);
3783 l2cap_sock_set_timer(sk, HZ * 5);
3784 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3785 __l2cap_sock_close(sk, ECONNREFUSED);
3786 } else {
3787 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3788 l2cap_sock_clear_timer(sk);
3789 }
3790}
3791
3792static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3793{
3794 struct l2cap_chan_list *l;
3795 struct l2cap_conn *conn = hcon->l2cap_data;
3796 struct sock *sk;
3797
3798 if (!conn)
3799 return 0;
3800
3801 l = &conn->chan_list;
3802
3803 BT_DBG("conn %p", conn);
3804
3805 read_lock(&l->lock);
3806
3807 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3808 bh_lock_sock(sk);
3809
3810 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3811 bh_unlock_sock(sk);
3812 continue;
3813 }
3814
3815 if (!status && (sk->sk_state == BT_CONNECTED ||
3816 sk->sk_state == BT_CONFIG)) {
3817 l2cap_check_encryption(sk, encrypt);
3818 bh_unlock_sock(sk);
3819 continue;
3820 }
3821
3822 if (sk->sk_state == BT_CONNECT) {
3823 if (!status) {
3824 struct l2cap_conn_req req;
3825 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3826 req.psm = l2cap_pi(sk)->psm;
3827
3828 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3829
3830 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3831 L2CAP_CONN_REQ, sizeof(req), &req);
3832 } else {
3833 l2cap_sock_clear_timer(sk);
3834 l2cap_sock_set_timer(sk, HZ / 10);
3835 }
3836 } else if (sk->sk_state == BT_CONNECT2) {
3837 struct l2cap_conn_rsp rsp;
3838 __u16 result;
3839
3840 if (!status) {
3841 sk->sk_state = BT_CONFIG;
3842 result = L2CAP_CR_SUCCESS;
3843 } else {
3844 sk->sk_state = BT_DISCONN;
3845 l2cap_sock_set_timer(sk, HZ / 10);
3846 result = L2CAP_CR_SEC_BLOCK;
3847 }
3848
3849 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3850 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3851 rsp.result = cpu_to_le16(result);
3852 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3853 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3854 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3855 }
3856
3857 bh_unlock_sock(sk);
3858 }
3859
3860 read_unlock(&l->lock);
3861
3862 return 0;
3863}
3864
3865static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3866{
3867 struct l2cap_conn *conn = hcon->l2cap_data;
3868
3869 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3870 goto drop;
3871
3872 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3873
3874 if (flags & ACL_START) {
3875 struct l2cap_hdr *hdr;
3876 int len;
3877
3878 if (conn->rx_len) {
3879 BT_ERR("Unexpected start frame (len %d)", skb->len);
3880 kfree_skb(conn->rx_skb);
3881 conn->rx_skb = NULL;
3882 conn->rx_len = 0;
3883 l2cap_conn_unreliable(conn, ECOMM);
3884 }
3885
3886 if (skb->len < 2) {
3887 BT_ERR("Frame is too short (len %d)", skb->len);
3888 l2cap_conn_unreliable(conn, ECOMM);
3889 goto drop;
3890 }
3891
3892 hdr = (struct l2cap_hdr *) skb->data;
3893 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3894
3895 if (len == skb->len) {
3896 /* Complete frame received */
3897 l2cap_recv_frame(conn, skb);
3898 return 0;
3899 }
3900
3901 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3902
3903 if (skb->len > len) {
3904 BT_ERR("Frame is too long (len %d, expected len %d)",
3905 skb->len, len);
3906 l2cap_conn_unreliable(conn, ECOMM);
3907 goto drop;
3908 }
3909
3910 /* Allocate skb for the complete frame (with header) */
3911 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3912 if (!conn->rx_skb)
3913 goto drop;
3914
3915 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3916 skb->len);
3917 conn->rx_len = len - skb->len;
3918 } else {
3919 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3920
3921 if (!conn->rx_len) {
3922 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3923 l2cap_conn_unreliable(conn, ECOMM);
3924 goto drop;
3925 }
3926
3927 if (skb->len > conn->rx_len) {
3928 BT_ERR("Fragment is too long (len %d, expected %d)",
3929 skb->len, conn->rx_len);
3930 kfree_skb(conn->rx_skb);
3931 conn->rx_skb = NULL;
3932 conn->rx_len = 0;
3933 l2cap_conn_unreliable(conn, ECOMM);
3934 goto drop;
3935 }
3936
3937 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3938 skb->len);
3939 conn->rx_len -= skb->len;
3940
3941 if (!conn->rx_len) {
3942 /* Complete frame received */
3943 l2cap_recv_frame(conn, conn->rx_skb);
3944 conn->rx_skb = NULL;
3945 }
3946 }
3947
3948drop:
3949 kfree_skb(skb);
3950 return 0;
3951}
3952
3953static int l2cap_debugfs_show(struct seq_file *f, void *p)
3954{
3955 struct sock *sk;
3956 struct hlist_node *node;
3957
3958 read_lock_bh(&l2cap_sk_list.lock);
3959
3960 sk_for_each(sk, node, &l2cap_sk_list.head) {
3961 struct l2cap_pinfo *pi = l2cap_pi(sk);
3962
3963 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3964 batostr(&bt_sk(sk)->src),
3965 batostr(&bt_sk(sk)->dst),
3966 sk->sk_state, __le16_to_cpu(pi->psm),
3967 pi->scid, pi->dcid,
3968 pi->imtu, pi->omtu, pi->sec_level);
3969 }
3970
3971 read_unlock_bh(&l2cap_sk_list.lock);
3972
3973 return 0;
3974}
3975
3976static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3977{
3978 return single_open(file, l2cap_debugfs_show, inode->i_private);
3979}
3980
3981static const struct file_operations l2cap_debugfs_fops = {
3982 .open = l2cap_debugfs_open,
3983 .read = seq_read,
3984 .llseek = seq_lseek,
3985 .release = single_release,
3986};
3987
3988static struct dentry *l2cap_debugfs;
3989
3990static const struct proto_ops l2cap_sock_ops = {
3991 .family = PF_BLUETOOTH,
3992 .owner = THIS_MODULE,
3993 .release = l2cap_sock_release,
3994 .bind = l2cap_sock_bind,
3995 .connect = l2cap_sock_connect,
3996 .listen = l2cap_sock_listen,
3997 .accept = l2cap_sock_accept,
3998 .getname = l2cap_sock_getname,
3999 .sendmsg = l2cap_sock_sendmsg,
4000 .recvmsg = l2cap_sock_recvmsg,
4001 .poll = bt_sock_poll,
4002 .ioctl = bt_sock_ioctl,
4003 .mmap = sock_no_mmap,
4004 .socketpair = sock_no_socketpair,
4005 .shutdown = l2cap_sock_shutdown,
4006 .setsockopt = l2cap_sock_setsockopt,
4007 .getsockopt = l2cap_sock_getsockopt
4008};
4009
4010static const struct net_proto_family l2cap_sock_family_ops = {
4011 .family = PF_BLUETOOTH,
4012 .owner = THIS_MODULE,
4013 .create = l2cap_sock_create,
4014};
4015
4016static struct hci_proto l2cap_hci_proto = {
4017 .name = "L2CAP",
4018 .id = HCI_PROTO_L2CAP,
4019 .connect_ind = l2cap_connect_ind,
4020 .connect_cfm = l2cap_connect_cfm,
4021 .disconn_ind = l2cap_disconn_ind,
4022 .disconn_cfm = l2cap_disconn_cfm,
4023 .security_cfm = l2cap_security_cfm,
4024 .recv_acldata = l2cap_recv_acldata
4025};
4026
4027static int __init l2cap_init(void)
4028{
4029 int err;
4030
4031 err = proto_register(&l2cap_proto, 0);
4032 if (err < 0)
4033 return err;
4034
4035 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4036 if (err < 0) {
4037 BT_ERR("L2CAP socket registration failed");
4038 goto error;
4039 }
4040
4041 err = hci_register_proto(&l2cap_hci_proto);
4042 if (err < 0) {
4043 BT_ERR("L2CAP protocol registration failed");
4044 bt_sock_unregister(BTPROTO_L2CAP);
4045 goto error;
4046 }
4047
4048 if (bt_debugfs) {
4049 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4050 bt_debugfs, NULL, &l2cap_debugfs_fops);
4051 if (!l2cap_debugfs)
4052 BT_ERR("Failed to create L2CAP debug file");
4053 }
4054
4055 BT_INFO("L2CAP ver %s", VERSION);
4056 BT_INFO("L2CAP socket layer initialized");
4057
4058 return 0;
4059
4060error:
4061 proto_unregister(&l2cap_proto);
4062 return err;
4063}
4064
4065static void __exit l2cap_exit(void)
4066{
4067 debugfs_remove(l2cap_debugfs);
4068
4069 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4070 BT_ERR("L2CAP socket unregistration failed");
4071
4072 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4073 BT_ERR("L2CAP protocol unregistration failed");
4074
4075 proto_unregister(&l2cap_proto);
4076}
4077
4078void l2cap_load(void)
4079{
4080 /* Dummy function to trigger automatic L2CAP module loading by
4081 * other modules that use L2CAP sockets but don't use any other
4082 * symbols from it. */
4083 return;
4084}
4085EXPORT_SYMBOL(l2cap_load);
4086
4087module_init(l2cap_init);
4088module_exit(l2cap_exit);
4089
4090module_param(enable_ertm, bool, 0644);
4091MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4092
4093module_param(max_transmit, uint, 0644);
4094MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4095
4096MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4097MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4098MODULE_VERSION(VERSION);
4099MODULE_LICENSE("GPL");
4100MODULE_ALIAS("bt-proto-0");