2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm
= 0;
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops
;
67 static struct workqueue_struct
*_busy_wq
;
69 static struct bt_sock_list l2cap_sk_list
= {
70 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
73 static void l2cap_busy_work(struct work_struct
*work
);
75 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
76 static void l2cap_sock_close(struct sock
*sk
);
77 static void l2cap_sock_kill(struct sock
*sk
);
79 static int l2cap_build_conf_req(struct sock
*sk
, void *data
);
80 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
81 u8 code
, u8 ident
, u16 dlen
, void *data
);
83 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg
)
88 struct sock
*sk
= (struct sock
*) arg
;
91 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
95 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
96 reason
= ECONNREFUSED
;
97 else if (sk
->sk_state
== BT_CONNECT
&&
98 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
99 reason
= ECONNREFUSED
;
103 __l2cap_sock_close(sk
, reason
);
111 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
113 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
114 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
117 static void l2cap_sock_clear_timer(struct sock
*sk
)
119 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
120 sk_stop_timer(sk
, &sk
->sk_timer
);
123 /* ---- L2CAP channels ---- */
124 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
127 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
128 if (l2cap_pi(s
)->dcid
== cid
)
134 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
137 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
138 if (l2cap_pi(s
)->scid
== cid
)
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
150 s
= __l2cap_get_chan_by_scid(l
, cid
);
153 read_unlock(&l
->lock
);
157 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
160 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
161 if (l2cap_pi(s
)->ident
== ident
)
167 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
171 s
= __l2cap_get_chan_by_ident(l
, ident
);
174 read_unlock(&l
->lock
);
178 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
180 u16 cid
= L2CAP_CID_DYN_START
;
182 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
183 if (!__l2cap_get_chan_by_scid(l
, cid
))
190 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
195 l2cap_pi(l
->head
)->prev_c
= sk
;
197 l2cap_pi(sk
)->next_c
= l
->head
;
198 l2cap_pi(sk
)->prev_c
= NULL
;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
204 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
206 write_lock_bh(&l
->lock
);
211 l2cap_pi(next
)->prev_c
= prev
;
213 l2cap_pi(prev
)->next_c
= next
;
214 write_unlock_bh(&l
->lock
);
219 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
221 struct l2cap_chan_list
*l
= &conn
->chan_list
;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
224 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
226 conn
->disc_reason
= 0x13;
228 l2cap_pi(sk
)->conn
= conn
;
230 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
233 } else if (sk
->sk_type
== SOCK_DGRAM
) {
234 /* Connectionless socket */
235 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
236 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
237 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
241 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
242 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
245 __l2cap_chan_link(l
, sk
);
248 bt_accept_enqueue(parent
, sk
);
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock
*sk
, int err
)
255 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
256 struct sock
*parent
= bt_sk(sk
)->parent
;
258 l2cap_sock_clear_timer(sk
);
260 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn
->chan_list
, sk
);
265 l2cap_pi(sk
)->conn
= NULL
;
266 hci_conn_put(conn
->hcon
);
269 sk
->sk_state
= BT_CLOSED
;
270 sock_set_flag(sk
, SOCK_ZAPPED
);
276 bt_accept_unlink(sk
);
277 parent
->sk_data_ready(parent
, 0);
279 sk
->sk_state_change(sk
);
281 skb_queue_purge(TX_QUEUE(sk
));
283 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
284 struct srej_list
*l
, *tmp
;
286 del_timer(&l2cap_pi(sk
)->retrans_timer
);
287 del_timer(&l2cap_pi(sk
)->monitor_timer
);
288 del_timer(&l2cap_pi(sk
)->ack_timer
);
290 skb_queue_purge(SREJ_QUEUE(sk
));
291 skb_queue_purge(BUSY_QUEUE(sk
));
293 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock
*sk
)
303 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
306 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
308 auth_type
= HCI_AT_NO_BONDING_MITM
;
310 auth_type
= HCI_AT_NO_BONDING
;
312 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
313 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
315 switch (l2cap_pi(sk
)->sec_level
) {
316 case BT_SECURITY_HIGH
:
317 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
319 case BT_SECURITY_MEDIUM
:
320 auth_type
= HCI_AT_GENERAL_BONDING
;
323 auth_type
= HCI_AT_NO_BONDING
;
328 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
332 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn
->lock
);
344 if (++conn
->tx_ident
> 128)
349 spin_unlock_bh(&conn
->lock
);
354 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
356 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
358 BT_DBG("code 0x%2.2x", code
);
363 hci_send_acl(conn
->hcon
, skb
, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
369 struct l2cap_hdr
*lh
;
370 struct l2cap_conn
*conn
= pi
->conn
;
371 struct sock
*sk
= (struct sock
*)pi
;
372 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
374 if (sk
->sk_state
!= BT_CONNECTED
)
377 if (pi
->fcs
== L2CAP_FCS_CRC16
)
380 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
382 count
= min_t(unsigned int, conn
->mtu
, hlen
);
383 control
|= L2CAP_CTRL_FRAME_TYPE
;
385 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
386 control
|= L2CAP_CTRL_FINAL
;
387 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
390 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
391 control
|= L2CAP_CTRL_POLL
;
392 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
395 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
399 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
400 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
401 lh
->cid
= cpu_to_le16(pi
->dcid
);
402 put_unaligned_le16(control
, skb_put(skb
, 2));
404 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
405 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
406 put_unaligned_le16(fcs
, skb_put(skb
, 2));
409 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
414 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
415 control
|= L2CAP_SUPER_RCV_NOT_READY
;
416 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
418 control
|= L2CAP_SUPER_RCV_READY
;
420 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
422 l2cap_send_sframe(pi
, control
);
425 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
427 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
430 static void l2cap_do_start(struct sock
*sk
)
432 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
434 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
435 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
438 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
439 struct l2cap_conn_req req
;
440 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
441 req
.psm
= l2cap_pi(sk
)->psm
;
443 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
444 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
446 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
447 L2CAP_CONN_REQ
, sizeof(req
), &req
);
450 struct l2cap_info_req req
;
451 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
453 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
454 conn
->info_ident
= l2cap_get_ident(conn
);
456 mod_timer(&conn
->info_timer
, jiffies
+
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
459 l2cap_send_cmd(conn
, conn
->info_ident
,
460 L2CAP_INFO_REQ
, sizeof(req
), &req
);
464 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
466 u32 local_feat_mask
= l2cap_feat_mask
;
468 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
471 case L2CAP_MODE_ERTM
:
472 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
473 case L2CAP_MODE_STREAMING
:
474 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
480 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
482 struct l2cap_disconn_req req
;
487 skb_queue_purge(TX_QUEUE(sk
));
489 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
490 del_timer(&l2cap_pi(sk
)->retrans_timer
);
491 del_timer(&l2cap_pi(sk
)->monitor_timer
);
492 del_timer(&l2cap_pi(sk
)->ack_timer
);
495 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
496 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
497 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
498 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
500 sk
->sk_state
= BT_DISCONN
;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn
*conn
)
507 struct l2cap_chan_list
*l
= &conn
->chan_list
;
508 struct sock_del_list del
, *tmp1
, *tmp2
;
511 BT_DBG("conn %p", conn
);
513 INIT_LIST_HEAD(&del
.list
);
517 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
520 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
521 sk
->sk_type
!= SOCK_STREAM
) {
526 if (sk
->sk_state
== BT_CONNECT
) {
527 struct l2cap_conn_req req
;
529 if (!l2cap_check_security(sk
) ||
530 !__l2cap_no_conn_pending(sk
)) {
535 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
537 && l2cap_pi(sk
)->conf_state
&
538 L2CAP_CONF_STATE2_DEVICE
) {
539 tmp1
= kzalloc(sizeof(struct sock_del_list
),
542 list_add_tail(&tmp1
->list
, &del
.list
);
547 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
548 req
.psm
= l2cap_pi(sk
)->psm
;
550 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
551 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
553 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
554 L2CAP_CONN_REQ
, sizeof(req
), &req
);
556 } else if (sk
->sk_state
== BT_CONNECT2
) {
557 struct l2cap_conn_rsp rsp
;
559 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
560 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
562 if (l2cap_check_security(sk
)) {
563 if (bt_sk(sk
)->defer_setup
) {
564 struct sock
*parent
= bt_sk(sk
)->parent
;
565 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
566 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
567 parent
->sk_data_ready(parent
, 0);
570 sk
->sk_state
= BT_CONFIG
;
571 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
572 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
575 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
576 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
579 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
580 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
582 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
583 rsp
.result
!= L2CAP_CR_SUCCESS
) {
588 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
589 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
590 l2cap_build_conf_req(sk
, buf
), buf
);
591 l2cap_pi(sk
)->num_conf_req
++;
597 read_unlock(&l
->lock
);
599 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
600 bh_lock_sock(tmp1
->sk
);
601 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
602 bh_unlock_sock(tmp1
->sk
);
603 list_del(&tmp1
->list
);
608 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
610 struct l2cap_chan_list
*l
= &conn
->chan_list
;
613 BT_DBG("conn %p", conn
);
617 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
620 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
621 sk
->sk_type
!= SOCK_STREAM
) {
622 l2cap_sock_clear_timer(sk
);
623 sk
->sk_state
= BT_CONNECTED
;
624 sk
->sk_state_change(sk
);
625 } else if (sk
->sk_state
== BT_CONNECT
)
631 read_unlock(&l
->lock
);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
637 struct l2cap_chan_list
*l
= &conn
->chan_list
;
640 BT_DBG("conn %p", conn
);
644 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
645 if (l2cap_pi(sk
)->force_reliable
)
649 read_unlock(&l
->lock
);
652 static void l2cap_info_timeout(unsigned long arg
)
654 struct l2cap_conn
*conn
= (void *) arg
;
656 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
657 conn
->info_ident
= 0;
659 l2cap_conn_start(conn
);
662 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
664 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
669 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
673 hcon
->l2cap_data
= conn
;
676 BT_DBG("hcon %p conn %p", hcon
, conn
);
678 conn
->mtu
= hcon
->hdev
->acl_mtu
;
679 conn
->src
= &hcon
->hdev
->bdaddr
;
680 conn
->dst
= &hcon
->dst
;
684 spin_lock_init(&conn
->lock
);
685 rwlock_init(&conn
->chan_list
.lock
);
687 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
688 (unsigned long) conn
);
690 conn
->disc_reason
= 0x13;
695 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
697 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
703 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
705 kfree_skb(conn
->rx_skb
);
708 while ((sk
= conn
->chan_list
.head
)) {
710 l2cap_chan_del(sk
, err
);
715 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
716 del_timer_sync(&conn
->info_timer
);
718 hcon
->l2cap_data
= NULL
;
722 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
724 struct l2cap_chan_list
*l
= &conn
->chan_list
;
725 write_lock_bh(&l
->lock
);
726 __l2cap_chan_add(conn
, sk
, parent
);
727 write_unlock_bh(&l
->lock
);
730 /* ---- Socket interface ---- */
731 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
734 struct hlist_node
*node
;
735 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
736 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
748 struct sock
*sk
= NULL
, *sk1
= NULL
;
749 struct hlist_node
*node
;
751 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
752 if (state
&& sk
->sk_state
!= state
)
755 if (l2cap_pi(sk
)->psm
== psm
) {
757 if (!bacmp(&bt_sk(sk
)->src
, src
))
761 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
765 return node
? sk
: sk1
;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
773 read_lock(&l2cap_sk_list
.lock
);
774 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
777 read_unlock(&l2cap_sk_list
.lock
);
781 static void l2cap_sock_destruct(struct sock
*sk
)
785 skb_queue_purge(&sk
->sk_receive_queue
);
786 skb_queue_purge(&sk
->sk_write_queue
);
789 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
793 BT_DBG("parent %p", parent
);
795 /* Close not yet accepted channels */
796 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
797 l2cap_sock_close(sk
);
799 parent
->sk_state
= BT_CLOSED
;
800 sock_set_flag(parent
, SOCK_ZAPPED
);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock
*sk
)
808 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
811 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list
, sk
);
815 sock_set_flag(sk
, SOCK_DEAD
);
819 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
821 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
823 switch (sk
->sk_state
) {
825 l2cap_sock_cleanup_listen(sk
);
830 if (sk
->sk_type
== SOCK_SEQPACKET
||
831 sk
->sk_type
== SOCK_STREAM
) {
832 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
834 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
835 l2cap_send_disconn_req(conn
, sk
, reason
);
837 l2cap_chan_del(sk
, reason
);
841 if (sk
->sk_type
== SOCK_SEQPACKET
||
842 sk
->sk_type
== SOCK_STREAM
) {
843 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
844 struct l2cap_conn_rsp rsp
;
847 if (bt_sk(sk
)->defer_setup
)
848 result
= L2CAP_CR_SEC_BLOCK
;
850 result
= L2CAP_CR_BAD_PSM
;
852 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
853 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
854 rsp
.result
= cpu_to_le16(result
);
855 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
856 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
857 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
859 l2cap_chan_del(sk
, reason
);
864 l2cap_chan_del(sk
, reason
);
868 sock_set_flag(sk
, SOCK_ZAPPED
);
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock
*sk
)
876 l2cap_sock_clear_timer(sk
);
878 __l2cap_sock_close(sk
, ECONNRESET
);
883 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
885 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
890 sk
->sk_type
= parent
->sk_type
;
891 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
893 pi
->imtu
= l2cap_pi(parent
)->imtu
;
894 pi
->omtu
= l2cap_pi(parent
)->omtu
;
895 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
896 pi
->mode
= l2cap_pi(parent
)->mode
;
897 pi
->fcs
= l2cap_pi(parent
)->fcs
;
898 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
899 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
900 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
901 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
902 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
904 pi
->imtu
= L2CAP_DEFAULT_MTU
;
906 if (!disable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
907 pi
->mode
= L2CAP_MODE_ERTM
;
908 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
910 pi
->mode
= L2CAP_MODE_BASIC
;
912 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
913 pi
->fcs
= L2CAP_FCS_CRC16
;
914 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
915 pi
->sec_level
= BT_SECURITY_LOW
;
917 pi
->force_reliable
= 0;
920 /* Default config options */
922 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
923 skb_queue_head_init(TX_QUEUE(sk
));
924 skb_queue_head_init(SREJ_QUEUE(sk
));
925 skb_queue_head_init(BUSY_QUEUE(sk
));
926 INIT_LIST_HEAD(SREJ_LIST(sk
));
929 static struct proto l2cap_proto
= {
931 .owner
= THIS_MODULE
,
932 .obj_size
= sizeof(struct l2cap_pinfo
)
935 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
939 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
943 sock_init_data(sock
, sk
);
944 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
946 sk
->sk_destruct
= l2cap_sock_destruct
;
947 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
949 sock_reset_flag(sk
, SOCK_ZAPPED
);
951 sk
->sk_protocol
= proto
;
952 sk
->sk_state
= BT_OPEN
;
954 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
956 bt_sock_link(&l2cap_sk_list
, sk
);
960 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
965 BT_DBG("sock %p", sock
);
967 sock
->state
= SS_UNCONNECTED
;
969 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
970 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
971 return -ESOCKTNOSUPPORT
;
973 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
976 sock
->ops
= &l2cap_sock_ops
;
978 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
982 l2cap_sock_init(sk
, NULL
);
986 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
988 struct sock
*sk
= sock
->sk
;
989 struct sockaddr_l2 la
;
994 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
997 memset(&la
, 0, sizeof(la
));
998 len
= min_t(unsigned int, sizeof(la
), alen
);
999 memcpy(&la
, addr
, len
);
1006 if (sk
->sk_state
!= BT_OPEN
) {
1012 __u16 psm
= __le16_to_cpu(la
.l2_psm
);
1014 /* PSM must be odd and lsb of upper byte must be 0 */
1015 if ((psm
& 0x0101) != 0x0001) {
1020 /* Restrict usage of well-known PSMs */
1021 if (psm
< 0x1001 && !capable(CAP_NET_BIND_SERVICE
)) {
1027 write_lock_bh(&l2cap_sk_list
.lock
);
1029 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
1032 /* Save source address */
1033 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
1034 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1035 l2cap_pi(sk
)->sport
= la
.l2_psm
;
1036 sk
->sk_state
= BT_BOUND
;
1038 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
1039 __le16_to_cpu(la
.l2_psm
) == 0x0003)
1040 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1043 write_unlock_bh(&l2cap_sk_list
.lock
);
1050 static int l2cap_do_connect(struct sock
*sk
)
1052 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1053 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1054 struct l2cap_conn
*conn
;
1055 struct hci_conn
*hcon
;
1056 struct hci_dev
*hdev
;
1060 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1063 hdev
= hci_get_route(dst
, src
);
1065 return -EHOSTUNREACH
;
1067 hci_dev_lock_bh(hdev
);
1071 if (sk
->sk_type
== SOCK_RAW
) {
1072 switch (l2cap_pi(sk
)->sec_level
) {
1073 case BT_SECURITY_HIGH
:
1074 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1076 case BT_SECURITY_MEDIUM
:
1077 auth_type
= HCI_AT_DEDICATED_BONDING
;
1080 auth_type
= HCI_AT_NO_BONDING
;
1083 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1084 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1085 auth_type
= HCI_AT_NO_BONDING_MITM
;
1087 auth_type
= HCI_AT_NO_BONDING
;
1089 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1090 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1092 switch (l2cap_pi(sk
)->sec_level
) {
1093 case BT_SECURITY_HIGH
:
1094 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1096 case BT_SECURITY_MEDIUM
:
1097 auth_type
= HCI_AT_GENERAL_BONDING
;
1100 auth_type
= HCI_AT_NO_BONDING
;
1105 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1106 l2cap_pi(sk
)->sec_level
, auth_type
);
1110 conn
= l2cap_conn_add(hcon
, 0);
1118 /* Update source addr of the socket */
1119 bacpy(src
, conn
->src
);
1121 l2cap_chan_add(conn
, sk
, NULL
);
1123 sk
->sk_state
= BT_CONNECT
;
1124 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1126 if (hcon
->state
== BT_CONNECTED
) {
1127 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1128 sk
->sk_type
!= SOCK_STREAM
) {
1129 l2cap_sock_clear_timer(sk
);
1130 sk
->sk_state
= BT_CONNECTED
;
1136 hci_dev_unlock_bh(hdev
);
1141 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1143 struct sock
*sk
= sock
->sk
;
1144 struct sockaddr_l2 la
;
1147 BT_DBG("sk %p", sk
);
1149 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1150 addr
->sa_family
!= AF_BLUETOOTH
)
1153 memset(&la
, 0, sizeof(la
));
1154 len
= min_t(unsigned int, sizeof(la
), alen
);
1155 memcpy(&la
, addr
, len
);
1162 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1168 switch (l2cap_pi(sk
)->mode
) {
1169 case L2CAP_MODE_BASIC
:
1171 case L2CAP_MODE_ERTM
:
1172 case L2CAP_MODE_STREAMING
:
1181 switch (sk
->sk_state
) {
1185 /* Already connecting */
1189 /* Already connected */
1203 /* PSM must be odd and lsb of upper byte must be 0 */
1204 if ((__le16_to_cpu(la
.l2_psm
) & 0x0101) != 0x0001 &&
1205 sk
->sk_type
!= SOCK_RAW
) {
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1212 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1214 err
= l2cap_do_connect(sk
);
1219 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1220 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1226 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1228 struct sock
*sk
= sock
->sk
;
1231 BT_DBG("sk %p backlog %d", sk
, backlog
);
1235 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1236 || sk
->sk_state
!= BT_BOUND
) {
1241 switch (l2cap_pi(sk
)->mode
) {
1242 case L2CAP_MODE_BASIC
:
1244 case L2CAP_MODE_ERTM
:
1245 case L2CAP_MODE_STREAMING
:
1254 if (!l2cap_pi(sk
)->psm
) {
1255 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1260 write_lock_bh(&l2cap_sk_list
.lock
);
1262 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1263 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1264 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1265 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1270 write_unlock_bh(&l2cap_sk_list
.lock
);
1276 sk
->sk_max_ack_backlog
= backlog
;
1277 sk
->sk_ack_backlog
= 0;
1278 sk
->sk_state
= BT_LISTEN
;
1285 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1287 DECLARE_WAITQUEUE(wait
, current
);
1288 struct sock
*sk
= sock
->sk
, *nsk
;
1292 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1294 if (sk
->sk_state
!= BT_LISTEN
) {
1299 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1301 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1303 /* Wait for an incoming connection. (wake-one). */
1304 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1305 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1306 set_current_state(TASK_INTERRUPTIBLE
);
1313 timeo
= schedule_timeout(timeo
);
1314 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1316 if (sk
->sk_state
!= BT_LISTEN
) {
1321 if (signal_pending(current
)) {
1322 err
= sock_intr_errno(timeo
);
1326 set_current_state(TASK_RUNNING
);
1327 remove_wait_queue(sk_sleep(sk
), &wait
);
1332 newsock
->state
= SS_CONNECTED
;
1334 BT_DBG("new socket %p", nsk
);
1341 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1343 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1344 struct sock
*sk
= sock
->sk
;
1346 BT_DBG("sock %p, sk %p", sock
, sk
);
1348 addr
->sa_family
= AF_BLUETOOTH
;
1349 *len
= sizeof(struct sockaddr_l2
);
1352 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1353 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1354 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1356 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1357 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1358 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1364 static int __l2cap_wait_ack(struct sock
*sk
)
1366 DECLARE_WAITQUEUE(wait
, current
);
1370 add_wait_queue(sk_sleep(sk
), &wait
);
1371 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1372 set_current_state(TASK_INTERRUPTIBLE
);
1377 if (signal_pending(current
)) {
1378 err
= sock_intr_errno(timeo
);
1383 timeo
= schedule_timeout(timeo
);
1386 err
= sock_error(sk
);
1390 set_current_state(TASK_RUNNING
);
1391 remove_wait_queue(sk_sleep(sk
), &wait
);
1395 static void l2cap_monitor_timeout(unsigned long arg
)
1397 struct sock
*sk
= (void *) arg
;
1399 BT_DBG("sk %p", sk
);
1402 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1403 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1408 l2cap_pi(sk
)->retry_count
++;
1409 __mod_monitor_timer();
1411 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1415 static void l2cap_retrans_timeout(unsigned long arg
)
1417 struct sock
*sk
= (void *) arg
;
1419 BT_DBG("sk %p", sk
);
1422 l2cap_pi(sk
)->retry_count
= 1;
1423 __mod_monitor_timer();
1425 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1427 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1431 static void l2cap_drop_acked_frames(struct sock
*sk
)
1433 struct sk_buff
*skb
;
1435 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1436 l2cap_pi(sk
)->unacked_frames
) {
1437 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1440 skb
= skb_dequeue(TX_QUEUE(sk
));
1443 l2cap_pi(sk
)->unacked_frames
--;
1446 if (!l2cap_pi(sk
)->unacked_frames
)
1447 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1450 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1452 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1454 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1456 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1459 static void l2cap_streaming_send(struct sock
*sk
)
1461 struct sk_buff
*skb
;
1462 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1465 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1466 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1467 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1468 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1470 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1471 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1472 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1475 l2cap_do_send(sk
, skb
);
1477 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1481 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1483 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1484 struct sk_buff
*skb
, *tx_skb
;
1487 skb
= skb_peek(TX_QUEUE(sk
));
1492 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1495 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1498 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1500 if (pi
->remote_max_tx
&&
1501 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1502 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1506 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1507 bt_cb(skb
)->retries
++;
1508 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1510 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1511 control
|= L2CAP_CTRL_FINAL
;
1512 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1515 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1516 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1518 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1520 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1521 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1522 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1525 l2cap_do_send(sk
, tx_skb
);
1528 static int l2cap_ertm_send(struct sock
*sk
)
1530 struct sk_buff
*skb
, *tx_skb
;
1531 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1535 if (sk
->sk_state
!= BT_CONNECTED
)
1538 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1540 if (pi
->remote_max_tx
&&
1541 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1542 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1546 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1548 bt_cb(skb
)->retries
++;
1550 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1551 control
&= L2CAP_CTRL_SAR
;
1553 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1554 control
|= L2CAP_CTRL_FINAL
;
1555 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1557 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1558 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1559 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1562 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1563 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1564 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1567 l2cap_do_send(sk
, tx_skb
);
1569 __mod_retrans_timer();
1571 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1572 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1574 pi
->unacked_frames
++;
1577 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1578 sk
->sk_send_head
= NULL
;
1580 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1588 static int l2cap_retransmit_frames(struct sock
*sk
)
1590 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1593 if (!skb_queue_empty(TX_QUEUE(sk
)))
1594 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1596 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1597 ret
= l2cap_ertm_send(sk
);
1601 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1603 struct sock
*sk
= (struct sock
*)pi
;
1606 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1608 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1609 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1610 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1611 l2cap_send_sframe(pi
, control
);
1615 if (l2cap_ertm_send(sk
) > 0)
1618 control
|= L2CAP_SUPER_RCV_READY
;
1619 l2cap_send_sframe(pi
, control
);
1622 static void l2cap_send_srejtail(struct sock
*sk
)
1624 struct srej_list
*tail
;
1627 control
= L2CAP_SUPER_SELECT_REJECT
;
1628 control
|= L2CAP_CTRL_FINAL
;
1630 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1631 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1633 l2cap_send_sframe(l2cap_pi(sk
), control
);
1636 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1638 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1639 struct sk_buff
**frag
;
1642 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1648 /* Continuation fragments (no L2CAP header) */
1649 frag
= &skb_shinfo(skb
)->frag_list
;
1651 count
= min_t(unsigned int, conn
->mtu
, len
);
1653 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1656 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1662 frag
= &(*frag
)->next
;
1668 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1670 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1671 struct sk_buff
*skb
;
1672 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1673 struct l2cap_hdr
*lh
;
1675 BT_DBG("sk %p len %d", sk
, (int)len
);
1677 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1678 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1679 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1681 return ERR_PTR(-ENOMEM
);
1683 /* Create L2CAP header */
1684 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1685 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1686 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1687 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1689 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1690 if (unlikely(err
< 0)) {
1692 return ERR_PTR(err
);
1697 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1699 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1700 struct sk_buff
*skb
;
1701 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1702 struct l2cap_hdr
*lh
;
1704 BT_DBG("sk %p len %d", sk
, (int)len
);
1706 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1707 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1708 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1710 return ERR_PTR(-ENOMEM
);
1712 /* Create L2CAP header */
1713 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1714 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1715 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1717 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1718 if (unlikely(err
< 0)) {
1720 return ERR_PTR(err
);
1725 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1727 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1728 struct sk_buff
*skb
;
1729 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1730 struct l2cap_hdr
*lh
;
1732 BT_DBG("sk %p len %d", sk
, (int)len
);
1735 return ERR_PTR(-ENOTCONN
);
1740 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1743 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1744 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1745 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1747 return ERR_PTR(-ENOMEM
);
1749 /* Create L2CAP header */
1750 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1751 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1752 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1753 put_unaligned_le16(control
, skb_put(skb
, 2));
1755 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1757 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1758 if (unlikely(err
< 0)) {
1760 return ERR_PTR(err
);
1763 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1764 put_unaligned_le16(0, skb_put(skb
, 2));
1766 bt_cb(skb
)->retries
= 0;
1770 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1772 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1773 struct sk_buff
*skb
;
1774 struct sk_buff_head sar_queue
;
1778 skb_queue_head_init(&sar_queue
);
1779 control
= L2CAP_SDU_START
;
1780 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1782 return PTR_ERR(skb
);
1784 __skb_queue_tail(&sar_queue
, skb
);
1785 len
-= pi
->remote_mps
;
1786 size
+= pi
->remote_mps
;
1791 if (len
> pi
->remote_mps
) {
1792 control
= L2CAP_SDU_CONTINUE
;
1793 buflen
= pi
->remote_mps
;
1795 control
= L2CAP_SDU_END
;
1799 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1801 skb_queue_purge(&sar_queue
);
1802 return PTR_ERR(skb
);
1805 __skb_queue_tail(&sar_queue
, skb
);
1809 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1810 if (sk
->sk_send_head
== NULL
)
1811 sk
->sk_send_head
= sar_queue
.next
;
1816 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1818 struct sock
*sk
= sock
->sk
;
1819 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1820 struct sk_buff
*skb
;
1824 BT_DBG("sock %p, sk %p", sock
, sk
);
1826 err
= sock_error(sk
);
1830 if (msg
->msg_flags
& MSG_OOB
)
1835 if (sk
->sk_state
!= BT_CONNECTED
) {
1840 /* Connectionless channel */
1841 if (sk
->sk_type
== SOCK_DGRAM
) {
1842 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1846 l2cap_do_send(sk
, skb
);
1853 case L2CAP_MODE_BASIC
:
1854 /* Check outgoing MTU */
1855 if (len
> pi
->omtu
) {
1860 /* Create a basic PDU */
1861 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1867 l2cap_do_send(sk
, skb
);
1871 case L2CAP_MODE_ERTM
:
1872 case L2CAP_MODE_STREAMING
:
1873 /* Entire SDU fits into one PDU */
1874 if (len
<= pi
->remote_mps
) {
1875 control
= L2CAP_SDU_UNSEGMENTED
;
1876 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1881 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1883 if (sk
->sk_send_head
== NULL
)
1884 sk
->sk_send_head
= skb
;
1887 /* Segment SDU into multiples PDUs */
1888 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1893 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1894 l2cap_streaming_send(sk
);
1896 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1897 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1901 err
= l2cap_ertm_send(sk
);
1909 BT_DBG("bad state %1.1x", pi
->mode
);
1918 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1920 struct sock
*sk
= sock
->sk
;
1924 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1925 struct l2cap_conn_rsp rsp
;
1926 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1929 sk
->sk_state
= BT_CONFIG
;
1931 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1932 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1933 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1934 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1935 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1936 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1938 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) {
1943 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1944 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1945 l2cap_build_conf_req(sk
, buf
), buf
);
1946 l2cap_pi(sk
)->num_conf_req
++;
1954 if (sock
->type
== SOCK_STREAM
)
1955 return bt_sock_stream_recvmsg(iocb
, sock
, msg
, len
, flags
);
1957 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1960 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1962 struct sock
*sk
= sock
->sk
;
1963 struct l2cap_options opts
;
1967 BT_DBG("sk %p", sk
);
1973 if (sk
->sk_state
== BT_CONNECTED
) {
1978 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1979 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1980 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1981 opts
.mode
= l2cap_pi(sk
)->mode
;
1982 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1983 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1984 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1986 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1987 if (copy_from_user((char *) &opts
, optval
, len
)) {
1992 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1997 l2cap_pi(sk
)->mode
= opts
.mode
;
1998 switch (l2cap_pi(sk
)->mode
) {
1999 case L2CAP_MODE_BASIC
:
2000 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
2002 case L2CAP_MODE_ERTM
:
2003 case L2CAP_MODE_STREAMING
:
2012 l2cap_pi(sk
)->imtu
= opts
.imtu
;
2013 l2cap_pi(sk
)->omtu
= opts
.omtu
;
2014 l2cap_pi(sk
)->fcs
= opts
.fcs
;
2015 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
2016 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
2020 if (get_user(opt
, (u32 __user
*) optval
)) {
2025 if (opt
& L2CAP_LM_AUTH
)
2026 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
2027 if (opt
& L2CAP_LM_ENCRYPT
)
2028 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
2029 if (opt
& L2CAP_LM_SECURE
)
2030 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
2032 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
2033 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
2045 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
2047 struct sock
*sk
= sock
->sk
;
2048 struct bt_security sec
;
2052 BT_DBG("sk %p", sk
);
2054 if (level
== SOL_L2CAP
)
2055 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
2057 if (level
!= SOL_BLUETOOTH
)
2058 return -ENOPROTOOPT
;
2064 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2065 && sk
->sk_type
!= SOCK_RAW
) {
2070 sec
.level
= BT_SECURITY_LOW
;
2072 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2073 if (copy_from_user((char *) &sec
, optval
, len
)) {
2078 if (sec
.level
< BT_SECURITY_LOW
||
2079 sec
.level
> BT_SECURITY_HIGH
) {
2084 l2cap_pi(sk
)->sec_level
= sec
.level
;
2087 case BT_DEFER_SETUP
:
2088 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2093 if (get_user(opt
, (u32 __user
*) optval
)) {
2098 bt_sk(sk
)->defer_setup
= opt
;
2110 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2112 struct sock
*sk
= sock
->sk
;
2113 struct l2cap_options opts
;
2114 struct l2cap_conninfo cinfo
;
2118 BT_DBG("sk %p", sk
);
2120 if (get_user(len
, optlen
))
2127 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2128 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2129 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2130 opts
.mode
= l2cap_pi(sk
)->mode
;
2131 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2132 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2133 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2135 len
= min_t(unsigned int, len
, sizeof(opts
));
2136 if (copy_to_user(optval
, (char *) &opts
, len
))
2142 switch (l2cap_pi(sk
)->sec_level
) {
2143 case BT_SECURITY_LOW
:
2144 opt
= L2CAP_LM_AUTH
;
2146 case BT_SECURITY_MEDIUM
:
2147 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2149 case BT_SECURITY_HIGH
:
2150 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2158 if (l2cap_pi(sk
)->role_switch
)
2159 opt
|= L2CAP_LM_MASTER
;
2161 if (l2cap_pi(sk
)->force_reliable
)
2162 opt
|= L2CAP_LM_RELIABLE
;
2164 if (put_user(opt
, (u32 __user
*) optval
))
2168 case L2CAP_CONNINFO
:
2169 if (sk
->sk_state
!= BT_CONNECTED
&&
2170 !(sk
->sk_state
== BT_CONNECT2
&&
2171 bt_sk(sk
)->defer_setup
)) {
2176 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2177 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2179 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2180 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2194 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2196 struct sock
*sk
= sock
->sk
;
2197 struct bt_security sec
;
2200 BT_DBG("sk %p", sk
);
2202 if (level
== SOL_L2CAP
)
2203 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2205 if (level
!= SOL_BLUETOOTH
)
2206 return -ENOPROTOOPT
;
2208 if (get_user(len
, optlen
))
2215 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2216 && sk
->sk_type
!= SOCK_RAW
) {
2221 sec
.level
= l2cap_pi(sk
)->sec_level
;
2223 len
= min_t(unsigned int, len
, sizeof(sec
));
2224 if (copy_to_user(optval
, (char *) &sec
, len
))
2229 case BT_DEFER_SETUP
:
2230 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2235 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2249 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2251 struct sock
*sk
= sock
->sk
;
2254 BT_DBG("sock %p, sk %p", sock
, sk
);
2260 if (!sk
->sk_shutdown
) {
2261 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2262 err
= __l2cap_wait_ack(sk
);
2264 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2265 l2cap_sock_clear_timer(sk
);
2266 __l2cap_sock_close(sk
, 0);
2268 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2269 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2273 if (!err
&& sk
->sk_err
)
2280 static int l2cap_sock_release(struct socket
*sock
)
2282 struct sock
*sk
= sock
->sk
;
2285 BT_DBG("sock %p, sk %p", sock
, sk
);
2290 err
= l2cap_sock_shutdown(sock
, 2);
2293 l2cap_sock_kill(sk
);
2297 static void l2cap_chan_ready(struct sock
*sk
)
2299 struct sock
*parent
= bt_sk(sk
)->parent
;
2301 BT_DBG("sk %p, parent %p", sk
, parent
);
2303 l2cap_pi(sk
)->conf_state
= 0;
2304 l2cap_sock_clear_timer(sk
);
2307 /* Outgoing channel.
2308 * Wake up socket sleeping on connect.
2310 sk
->sk_state
= BT_CONNECTED
;
2311 sk
->sk_state_change(sk
);
2313 /* Incoming channel.
2314 * Wake up socket sleeping on accept.
2316 parent
->sk_data_ready(parent
, 0);
2320 /* Copy frame to all raw sockets on that connection */
2321 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2323 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2324 struct sk_buff
*nskb
;
2327 BT_DBG("conn %p", conn
);
2329 read_lock(&l
->lock
);
2330 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2331 if (sk
->sk_type
!= SOCK_RAW
)
2334 /* Don't send frame to the socket it came from */
2337 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2341 if (sock_queue_rcv_skb(sk
, nskb
))
2344 read_unlock(&l
->lock
);
2347 /* ---- L2CAP signalling commands ---- */
2348 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2349 u8 code
, u8 ident
, u16 dlen
, void *data
)
2351 struct sk_buff
*skb
, **frag
;
2352 struct l2cap_cmd_hdr
*cmd
;
2353 struct l2cap_hdr
*lh
;
2356 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2357 conn
, code
, ident
, dlen
);
2359 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2360 count
= min_t(unsigned int, conn
->mtu
, len
);
2362 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2366 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2367 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2368 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2370 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2373 cmd
->len
= cpu_to_le16(dlen
);
2376 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2377 memcpy(skb_put(skb
, count
), data
, count
);
2383 /* Continuation fragments (no L2CAP header) */
2384 frag
= &skb_shinfo(skb
)->frag_list
;
2386 count
= min_t(unsigned int, conn
->mtu
, len
);
2388 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2392 memcpy(skb_put(*frag
, count
), data
, count
);
2397 frag
= &(*frag
)->next
;
2407 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2409 struct l2cap_conf_opt
*opt
= *ptr
;
2412 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2420 *val
= *((u8
*) opt
->val
);
2424 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2428 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2432 *val
= (unsigned long) opt
->val
;
2436 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2440 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2442 struct l2cap_conf_opt
*opt
= *ptr
;
2444 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2451 *((u8
*) opt
->val
) = val
;
2455 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2459 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2463 memcpy(opt
->val
, (void *) val
, len
);
2467 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2470 static void l2cap_ack_timeout(unsigned long arg
)
2472 struct sock
*sk
= (void *) arg
;
2475 l2cap_send_ack(l2cap_pi(sk
));
2479 static inline void l2cap_ertm_init(struct sock
*sk
)
2481 l2cap_pi(sk
)->expected_ack_seq
= 0;
2482 l2cap_pi(sk
)->unacked_frames
= 0;
2483 l2cap_pi(sk
)->buffer_seq
= 0;
2484 l2cap_pi(sk
)->num_acked
= 0;
2485 l2cap_pi(sk
)->frames_sent
= 0;
2487 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2488 l2cap_retrans_timeout
, (unsigned long) sk
);
2489 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2490 l2cap_monitor_timeout
, (unsigned long) sk
);
2491 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2492 l2cap_ack_timeout
, (unsigned long) sk
);
2494 __skb_queue_head_init(SREJ_QUEUE(sk
));
2495 __skb_queue_head_init(BUSY_QUEUE(sk
));
2497 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2499 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
2502 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2505 case L2CAP_MODE_STREAMING
:
2506 case L2CAP_MODE_ERTM
:
2507 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2511 return L2CAP_MODE_BASIC
;
2515 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2517 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2518 struct l2cap_conf_req
*req
= data
;
2519 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2520 void *ptr
= req
->data
;
2522 BT_DBG("sk %p", sk
);
2524 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2528 case L2CAP_MODE_STREAMING
:
2529 case L2CAP_MODE_ERTM
:
2530 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2535 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2541 case L2CAP_MODE_BASIC
:
2542 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2543 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2545 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2546 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2549 rfc
.mode
= L2CAP_MODE_BASIC
;
2551 rfc
.max_transmit
= 0;
2552 rfc
.retrans_timeout
= 0;
2553 rfc
.monitor_timeout
= 0;
2554 rfc
.max_pdu_size
= 0;
2556 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2557 (unsigned long) &rfc
);
2560 case L2CAP_MODE_ERTM
:
2561 rfc
.mode
= L2CAP_MODE_ERTM
;
2562 rfc
.txwin_size
= pi
->tx_win
;
2563 rfc
.max_transmit
= pi
->max_tx
;
2564 rfc
.retrans_timeout
= 0;
2565 rfc
.monitor_timeout
= 0;
2566 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2567 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2568 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2570 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2571 (unsigned long) &rfc
);
2573 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2576 if (pi
->fcs
== L2CAP_FCS_NONE
||
2577 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2578 pi
->fcs
= L2CAP_FCS_NONE
;
2579 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2583 case L2CAP_MODE_STREAMING
:
2584 rfc
.mode
= L2CAP_MODE_STREAMING
;
2586 rfc
.max_transmit
= 0;
2587 rfc
.retrans_timeout
= 0;
2588 rfc
.monitor_timeout
= 0;
2589 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2590 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2591 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2593 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2594 (unsigned long) &rfc
);
2596 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2599 if (pi
->fcs
== L2CAP_FCS_NONE
||
2600 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2601 pi
->fcs
= L2CAP_FCS_NONE
;
2602 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2607 /* FIXME: Need actual value of the flush timeout */
2608 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2609 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2611 req
->dcid
= cpu_to_le16(pi
->dcid
);
2612 req
->flags
= cpu_to_le16(0);
2617 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2619 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2620 struct l2cap_conf_rsp
*rsp
= data
;
2621 void *ptr
= rsp
->data
;
2622 void *req
= pi
->conf_req
;
2623 int len
= pi
->conf_len
;
2624 int type
, hint
, olen
;
2626 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2627 u16 mtu
= L2CAP_DEFAULT_MTU
;
2628 u16 result
= L2CAP_CONF_SUCCESS
;
2630 BT_DBG("sk %p", sk
);
2632 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2633 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2635 hint
= type
& L2CAP_CONF_HINT
;
2636 type
&= L2CAP_CONF_MASK
;
2639 case L2CAP_CONF_MTU
:
2643 case L2CAP_CONF_FLUSH_TO
:
2647 case L2CAP_CONF_QOS
:
2650 case L2CAP_CONF_RFC
:
2651 if (olen
== sizeof(rfc
))
2652 memcpy(&rfc
, (void *) val
, olen
);
2655 case L2CAP_CONF_FCS
:
2656 if (val
== L2CAP_FCS_NONE
)
2657 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2665 result
= L2CAP_CONF_UNKNOWN
;
2666 *((u8
*) ptr
++) = type
;
2671 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
2675 case L2CAP_MODE_STREAMING
:
2676 case L2CAP_MODE_ERTM
:
2677 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2678 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2679 pi
->conn
->feat_mask
);
2683 if (pi
->mode
!= rfc
.mode
)
2684 return -ECONNREFUSED
;
2690 if (pi
->mode
!= rfc
.mode
) {
2691 result
= L2CAP_CONF_UNACCEPT
;
2692 rfc
.mode
= pi
->mode
;
2694 if (pi
->num_conf_rsp
== 1)
2695 return -ECONNREFUSED
;
2697 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2698 sizeof(rfc
), (unsigned long) &rfc
);
2702 if (result
== L2CAP_CONF_SUCCESS
) {
2703 /* Configure output options and let the other side know
2704 * which ones we don't like. */
2706 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2707 result
= L2CAP_CONF_UNACCEPT
;
2710 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2712 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2715 case L2CAP_MODE_BASIC
:
2716 pi
->fcs
= L2CAP_FCS_NONE
;
2717 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2720 case L2CAP_MODE_ERTM
:
2721 pi
->remote_tx_win
= rfc
.txwin_size
;
2722 pi
->remote_max_tx
= rfc
.max_transmit
;
2724 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2725 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2727 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2729 rfc
.retrans_timeout
=
2730 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2731 rfc
.monitor_timeout
=
2732 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2734 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2736 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2737 sizeof(rfc
), (unsigned long) &rfc
);
2741 case L2CAP_MODE_STREAMING
:
2742 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2743 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2745 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2747 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2749 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2750 sizeof(rfc
), (unsigned long) &rfc
);
2755 result
= L2CAP_CONF_UNACCEPT
;
2757 memset(&rfc
, 0, sizeof(rfc
));
2758 rfc
.mode
= pi
->mode
;
2761 if (result
== L2CAP_CONF_SUCCESS
)
2762 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2764 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2765 rsp
->result
= cpu_to_le16(result
);
2766 rsp
->flags
= cpu_to_le16(0x0000);
2771 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2773 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2774 struct l2cap_conf_req
*req
= data
;
2775 void *ptr
= req
->data
;
2778 struct l2cap_conf_rfc rfc
;
2780 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2782 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2783 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2786 case L2CAP_CONF_MTU
:
2787 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2788 *result
= L2CAP_CONF_UNACCEPT
;
2789 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2792 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2795 case L2CAP_CONF_FLUSH_TO
:
2797 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2801 case L2CAP_CONF_RFC
:
2802 if (olen
== sizeof(rfc
))
2803 memcpy(&rfc
, (void *)val
, olen
);
2805 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2806 rfc
.mode
!= pi
->mode
)
2807 return -ECONNREFUSED
;
2811 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2812 sizeof(rfc
), (unsigned long) &rfc
);
2817 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2818 return -ECONNREFUSED
;
2820 pi
->mode
= rfc
.mode
;
2822 if (*result
== L2CAP_CONF_SUCCESS
) {
2824 case L2CAP_MODE_ERTM
:
2825 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2826 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2827 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2829 case L2CAP_MODE_STREAMING
:
2830 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2834 req
->dcid
= cpu_to_le16(pi
->dcid
);
2835 req
->flags
= cpu_to_le16(0x0000);
2840 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2842 struct l2cap_conf_rsp
*rsp
= data
;
2843 void *ptr
= rsp
->data
;
2845 BT_DBG("sk %p", sk
);
2847 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2848 rsp
->result
= cpu_to_le16(result
);
2849 rsp
->flags
= cpu_to_le16(flags
);
2854 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2856 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2859 struct l2cap_conf_rfc rfc
;
2861 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2863 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2866 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2867 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2870 case L2CAP_CONF_RFC
:
2871 if (olen
== sizeof(rfc
))
2872 memcpy(&rfc
, (void *)val
, olen
);
2879 case L2CAP_MODE_ERTM
:
2880 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2881 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2882 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2884 case L2CAP_MODE_STREAMING
:
2885 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2889 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2891 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2893 if (rej
->reason
!= 0x0000)
2896 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2897 cmd
->ident
== conn
->info_ident
) {
2898 del_timer(&conn
->info_timer
);
2900 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2901 conn
->info_ident
= 0;
2903 l2cap_conn_start(conn
);
2909 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2911 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2912 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2913 struct l2cap_conn_rsp rsp
;
2914 struct sock
*parent
, *uninitialized_var(sk
);
2915 int result
, status
= L2CAP_CS_NO_INFO
;
2917 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2918 __le16 psm
= req
->psm
;
2920 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2922 /* Check if we have socket listening on psm */
2923 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2925 result
= L2CAP_CR_BAD_PSM
;
2929 /* Check if the ACL is secure enough (if not SDP) */
2930 if (psm
!= cpu_to_le16(0x0001) &&
2931 !hci_conn_check_link_mode(conn
->hcon
)) {
2932 conn
->disc_reason
= 0x05;
2933 result
= L2CAP_CR_SEC_BLOCK
;
2937 result
= L2CAP_CR_NO_MEM
;
2939 /* Check for backlog size */
2940 if (sk_acceptq_is_full(parent
)) {
2941 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2945 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2949 write_lock_bh(&list
->lock
);
2951 /* Check if we already have channel with that dcid */
2952 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2953 write_unlock_bh(&list
->lock
);
2954 sock_set_flag(sk
, SOCK_ZAPPED
);
2955 l2cap_sock_kill(sk
);
2959 hci_conn_hold(conn
->hcon
);
2961 l2cap_sock_init(sk
, parent
);
2962 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2963 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2964 l2cap_pi(sk
)->psm
= psm
;
2965 l2cap_pi(sk
)->dcid
= scid
;
2967 __l2cap_chan_add(conn
, sk
, parent
);
2968 dcid
= l2cap_pi(sk
)->scid
;
2970 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2972 l2cap_pi(sk
)->ident
= cmd
->ident
;
2974 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2975 if (l2cap_check_security(sk
)) {
2976 if (bt_sk(sk
)->defer_setup
) {
2977 sk
->sk_state
= BT_CONNECT2
;
2978 result
= L2CAP_CR_PEND
;
2979 status
= L2CAP_CS_AUTHOR_PEND
;
2980 parent
->sk_data_ready(parent
, 0);
2982 sk
->sk_state
= BT_CONFIG
;
2983 result
= L2CAP_CR_SUCCESS
;
2984 status
= L2CAP_CS_NO_INFO
;
2987 sk
->sk_state
= BT_CONNECT2
;
2988 result
= L2CAP_CR_PEND
;
2989 status
= L2CAP_CS_AUTHEN_PEND
;
2992 sk
->sk_state
= BT_CONNECT2
;
2993 result
= L2CAP_CR_PEND
;
2994 status
= L2CAP_CS_NO_INFO
;
2997 write_unlock_bh(&list
->lock
);
3000 bh_unlock_sock(parent
);
3003 rsp
.scid
= cpu_to_le16(scid
);
3004 rsp
.dcid
= cpu_to_le16(dcid
);
3005 rsp
.result
= cpu_to_le16(result
);
3006 rsp
.status
= cpu_to_le16(status
);
3007 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3009 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3010 struct l2cap_info_req info
;
3011 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3013 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3014 conn
->info_ident
= l2cap_get_ident(conn
);
3016 mod_timer(&conn
->info_timer
, jiffies
+
3017 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
3019 l2cap_send_cmd(conn
, conn
->info_ident
,
3020 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3023 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
3024 result
== L2CAP_CR_SUCCESS
) {
3026 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3027 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3028 l2cap_build_conf_req(sk
, buf
), buf
);
3029 l2cap_pi(sk
)->num_conf_req
++;
3035 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3037 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3038 u16 scid
, dcid
, result
, status
;
3042 scid
= __le16_to_cpu(rsp
->scid
);
3043 dcid
= __le16_to_cpu(rsp
->dcid
);
3044 result
= __le16_to_cpu(rsp
->result
);
3045 status
= __le16_to_cpu(rsp
->status
);
3047 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
3050 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3054 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
3060 case L2CAP_CR_SUCCESS
:
3061 sk
->sk_state
= BT_CONFIG
;
3062 l2cap_pi(sk
)->ident
= 0;
3063 l2cap_pi(sk
)->dcid
= dcid
;
3064 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
3066 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
3069 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3071 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3072 l2cap_build_conf_req(sk
, req
), req
);
3073 l2cap_pi(sk
)->num_conf_req
++;
3077 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3081 l2cap_chan_del(sk
, ECONNREFUSED
);
3089 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
3091 /* FCS is enabled only in ERTM or streaming mode, if one or both
3094 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
3095 pi
->fcs
= L2CAP_FCS_NONE
;
3096 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
3097 pi
->fcs
= L2CAP_FCS_CRC16
;
3100 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3102 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3108 dcid
= __le16_to_cpu(req
->dcid
);
3109 flags
= __le16_to_cpu(req
->flags
);
3111 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3113 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3117 if (sk
->sk_state
== BT_DISCONN
)
3120 /* Reject if config buffer is too small. */
3121 len
= cmd_len
- sizeof(*req
);
3122 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3123 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3124 l2cap_build_conf_rsp(sk
, rsp
,
3125 L2CAP_CONF_REJECT
, flags
), rsp
);
3130 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3131 l2cap_pi(sk
)->conf_len
+= len
;
3133 if (flags
& 0x0001) {
3134 /* Incomplete config. Send empty response. */
3135 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3136 l2cap_build_conf_rsp(sk
, rsp
,
3137 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3141 /* Complete config. */
3142 len
= l2cap_parse_conf_req(sk
, rsp
);
3144 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3148 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3149 l2cap_pi(sk
)->num_conf_rsp
++;
3151 /* Reset config buffer. */
3152 l2cap_pi(sk
)->conf_len
= 0;
3154 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3157 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3158 set_default_fcs(l2cap_pi(sk
));
3160 sk
->sk_state
= BT_CONNECTED
;
3162 l2cap_pi(sk
)->next_tx_seq
= 0;
3163 l2cap_pi(sk
)->expected_tx_seq
= 0;
3164 __skb_queue_head_init(TX_QUEUE(sk
));
3165 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3166 l2cap_ertm_init(sk
);
3168 l2cap_chan_ready(sk
);
3172 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3174 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3175 l2cap_build_conf_req(sk
, buf
), buf
);
3176 l2cap_pi(sk
)->num_conf_req
++;
3184 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3186 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3187 u16 scid
, flags
, result
;
3189 int len
= cmd
->len
- sizeof(*rsp
);
3191 scid
= __le16_to_cpu(rsp
->scid
);
3192 flags
= __le16_to_cpu(rsp
->flags
);
3193 result
= __le16_to_cpu(rsp
->result
);
3195 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3196 scid
, flags
, result
);
3198 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3203 case L2CAP_CONF_SUCCESS
:
3204 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3207 case L2CAP_CONF_UNACCEPT
:
3208 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3211 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3212 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3216 /* throw out any old stored conf requests */
3217 result
= L2CAP_CONF_SUCCESS
;
3218 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3221 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3225 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3226 L2CAP_CONF_REQ
, len
, req
);
3227 l2cap_pi(sk
)->num_conf_req
++;
3228 if (result
!= L2CAP_CONF_SUCCESS
)
3234 sk
->sk_err
= ECONNRESET
;
3235 l2cap_sock_set_timer(sk
, HZ
* 5);
3236 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3243 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3245 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3246 set_default_fcs(l2cap_pi(sk
));
3248 sk
->sk_state
= BT_CONNECTED
;
3249 l2cap_pi(sk
)->next_tx_seq
= 0;
3250 l2cap_pi(sk
)->expected_tx_seq
= 0;
3251 __skb_queue_head_init(TX_QUEUE(sk
));
3252 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3253 l2cap_ertm_init(sk
);
3255 l2cap_chan_ready(sk
);
3263 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3265 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3266 struct l2cap_disconn_rsp rsp
;
3270 scid
= __le16_to_cpu(req
->scid
);
3271 dcid
= __le16_to_cpu(req
->dcid
);
3273 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3275 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3279 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3280 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3281 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3283 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3285 l2cap_chan_del(sk
, ECONNRESET
);
3288 l2cap_sock_kill(sk
);
3292 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3294 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3298 scid
= __le16_to_cpu(rsp
->scid
);
3299 dcid
= __le16_to_cpu(rsp
->dcid
);
3301 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3303 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3307 l2cap_chan_del(sk
, 0);
3310 l2cap_sock_kill(sk
);
3314 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3316 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3319 type
= __le16_to_cpu(req
->type
);
3321 BT_DBG("type 0x%4.4x", type
);
3323 if (type
== L2CAP_IT_FEAT_MASK
) {
3325 u32 feat_mask
= l2cap_feat_mask
;
3326 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3327 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3328 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3330 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3332 put_unaligned_le32(feat_mask
, rsp
->data
);
3333 l2cap_send_cmd(conn
, cmd
->ident
,
3334 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3335 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3337 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3338 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3339 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3340 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3341 l2cap_send_cmd(conn
, cmd
->ident
,
3342 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3344 struct l2cap_info_rsp rsp
;
3345 rsp
.type
= cpu_to_le16(type
);
3346 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3347 l2cap_send_cmd(conn
, cmd
->ident
,
3348 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3354 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3356 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3359 type
= __le16_to_cpu(rsp
->type
);
3360 result
= __le16_to_cpu(rsp
->result
);
3362 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3364 del_timer(&conn
->info_timer
);
3366 if (result
!= L2CAP_IR_SUCCESS
) {
3367 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3368 conn
->info_ident
= 0;
3370 l2cap_conn_start(conn
);
3375 if (type
== L2CAP_IT_FEAT_MASK
) {
3376 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3378 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3379 struct l2cap_info_req req
;
3380 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3382 conn
->info_ident
= l2cap_get_ident(conn
);
3384 l2cap_send_cmd(conn
, conn
->info_ident
,
3385 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3387 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3388 conn
->info_ident
= 0;
3390 l2cap_conn_start(conn
);
3392 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3393 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3394 conn
->info_ident
= 0;
3396 l2cap_conn_start(conn
);
3402 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3404 u8
*data
= skb
->data
;
3406 struct l2cap_cmd_hdr cmd
;
3409 l2cap_raw_recv(conn
, skb
);
3411 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3413 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3414 data
+= L2CAP_CMD_HDR_SIZE
;
3415 len
-= L2CAP_CMD_HDR_SIZE
;
3417 cmd_len
= le16_to_cpu(cmd
.len
);
3419 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3421 if (cmd_len
> len
|| !cmd
.ident
) {
3422 BT_DBG("corrupted command");
3427 case L2CAP_COMMAND_REJ
:
3428 l2cap_command_rej(conn
, &cmd
, data
);
3431 case L2CAP_CONN_REQ
:
3432 err
= l2cap_connect_req(conn
, &cmd
, data
);
3435 case L2CAP_CONN_RSP
:
3436 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3439 case L2CAP_CONF_REQ
:
3440 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3443 case L2CAP_CONF_RSP
:
3444 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3447 case L2CAP_DISCONN_REQ
:
3448 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3451 case L2CAP_DISCONN_RSP
:
3452 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3455 case L2CAP_ECHO_REQ
:
3456 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3459 case L2CAP_ECHO_RSP
:
3462 case L2CAP_INFO_REQ
:
3463 err
= l2cap_information_req(conn
, &cmd
, data
);
3466 case L2CAP_INFO_RSP
:
3467 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3471 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3477 struct l2cap_cmd_rej rej
;
3478 BT_DBG("error %d", err
);
3480 /* FIXME: Map err to a valid reason */
3481 rej
.reason
= cpu_to_le16(0);
3482 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3492 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3494 u16 our_fcs
, rcv_fcs
;
3495 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3497 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3498 skb_trim(skb
, skb
->len
- 2);
3499 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3500 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3502 if (our_fcs
!= rcv_fcs
)
3508 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3510 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3513 pi
->frames_sent
= 0;
3515 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3517 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3518 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3519 l2cap_send_sframe(pi
, control
);
3520 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3523 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3524 l2cap_retransmit_frames(sk
);
3526 l2cap_ertm_send(sk
);
3528 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3529 pi
->frames_sent
== 0) {
3530 control
|= L2CAP_SUPER_RCV_READY
;
3531 l2cap_send_sframe(pi
, control
);
3535 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3537 struct sk_buff
*next_skb
;
3538 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3539 int tx_seq_offset
, next_tx_seq_offset
;
3541 bt_cb(skb
)->tx_seq
= tx_seq
;
3542 bt_cb(skb
)->sar
= sar
;
3544 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3546 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3550 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3551 if (tx_seq_offset
< 0)
3552 tx_seq_offset
+= 64;
3555 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3558 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3559 pi
->buffer_seq
) % 64;
3560 if (next_tx_seq_offset
< 0)
3561 next_tx_seq_offset
+= 64;
3563 if (next_tx_seq_offset
> tx_seq_offset
) {
3564 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3568 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3571 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3573 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3578 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3580 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3581 struct sk_buff
*_skb
;
3584 switch (control
& L2CAP_CTRL_SAR
) {
3585 case L2CAP_SDU_UNSEGMENTED
:
3586 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3589 err
= sock_queue_rcv_skb(sk
, skb
);
3595 case L2CAP_SDU_START
:
3596 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3599 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3601 if (pi
->sdu_len
> pi
->imtu
)
3604 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3608 /* pull sdu_len bytes only after alloc, because of Local Busy
3609 * condition we have to be sure that this will be executed
3610 * only once, i.e., when alloc does not fail */
3613 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3615 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3616 pi
->partial_sdu_len
= skb
->len
;
3619 case L2CAP_SDU_CONTINUE
:
3620 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3626 pi
->partial_sdu_len
+= skb
->len
;
3627 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3630 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3635 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3641 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3642 pi
->partial_sdu_len
+= skb
->len
;
3644 if (pi
->partial_sdu_len
> pi
->imtu
)
3647 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3650 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3653 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3655 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3659 err
= sock_queue_rcv_skb(sk
, _skb
);
3662 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3666 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3667 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3681 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3686 static int l2cap_try_push_rx_skb(struct sock
*sk
)
3688 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3689 struct sk_buff
*skb
;
3693 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3694 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3695 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3697 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3701 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3704 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3707 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3708 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3709 l2cap_send_sframe(pi
, control
);
3710 l2cap_pi(sk
)->retry_count
= 1;
3712 del_timer(&pi
->retrans_timer
);
3713 __mod_monitor_timer();
3715 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3718 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3719 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3721 BT_DBG("sk %p, Exit local busy", sk
);
3726 static void l2cap_busy_work(struct work_struct
*work
)
3728 DECLARE_WAITQUEUE(wait
, current
);
3729 struct l2cap_pinfo
*pi
=
3730 container_of(work
, struct l2cap_pinfo
, busy_work
);
3731 struct sock
*sk
= (struct sock
*)pi
;
3732 int n_tries
= 0, timeo
= HZ
/5, err
;
3733 struct sk_buff
*skb
;
3737 add_wait_queue(sk_sleep(sk
), &wait
);
3738 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3739 set_current_state(TASK_INTERRUPTIBLE
);
3741 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3743 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3750 if (signal_pending(current
)) {
3751 err
= sock_intr_errno(timeo
);
3756 timeo
= schedule_timeout(timeo
);
3759 err
= sock_error(sk
);
3763 if (l2cap_try_push_rx_skb(sk
) == 0)
3767 set_current_state(TASK_RUNNING
);
3768 remove_wait_queue(sk_sleep(sk
), &wait
);
3773 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3775 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3778 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3779 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3780 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3781 return l2cap_try_push_rx_skb(sk
);
3786 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3788 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3792 /* Busy Condition */
3793 BT_DBG("sk %p, Enter local busy", sk
);
3795 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3796 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3797 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3799 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3800 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3801 l2cap_send_sframe(pi
, sctrl
);
3803 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3805 del_timer(&pi
->ack_timer
);
3807 queue_work(_busy_wq
, &pi
->busy_work
);
3812 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3814 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3815 struct sk_buff
*_skb
;
3819 * TODO: We have to notify the userland if some data is lost with the
3823 switch (control
& L2CAP_CTRL_SAR
) {
3824 case L2CAP_SDU_UNSEGMENTED
:
3825 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3830 err
= sock_queue_rcv_skb(sk
, skb
);
3836 case L2CAP_SDU_START
:
3837 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3842 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3845 if (pi
->sdu_len
> pi
->imtu
) {
3850 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3856 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3858 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3859 pi
->partial_sdu_len
= skb
->len
;
3863 case L2CAP_SDU_CONTINUE
:
3864 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3867 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3869 pi
->partial_sdu_len
+= skb
->len
;
3870 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3878 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3881 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3883 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3884 pi
->partial_sdu_len
+= skb
->len
;
3886 if (pi
->partial_sdu_len
> pi
->imtu
)
3889 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3890 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3891 err
= sock_queue_rcv_skb(sk
, _skb
);
3906 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3908 struct sk_buff
*skb
;
3911 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3912 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3915 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3916 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3917 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3918 l2cap_pi(sk
)->buffer_seq_srej
=
3919 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3920 tx_seq
= (tx_seq
+ 1) % 64;
3924 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3926 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3927 struct srej_list
*l
, *tmp
;
3930 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3931 if (l
->tx_seq
== tx_seq
) {
3936 control
= L2CAP_SUPER_SELECT_REJECT
;
3937 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3938 l2cap_send_sframe(pi
, control
);
3940 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3944 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3946 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3947 struct srej_list
*new;
3950 while (tx_seq
!= pi
->expected_tx_seq
) {
3951 control
= L2CAP_SUPER_SELECT_REJECT
;
3952 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3953 l2cap_send_sframe(pi
, control
);
3955 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3956 new->tx_seq
= pi
->expected_tx_seq
;
3957 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3958 list_add_tail(&new->list
, SREJ_LIST(sk
));
3960 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3963 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3965 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3966 u8 tx_seq
= __get_txseq(rx_control
);
3967 u8 req_seq
= __get_reqseq(rx_control
);
3968 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3969 int tx_seq_offset
, expected_tx_seq_offset
;
3970 int num_to_ack
= (pi
->tx_win
/6) + 1;
3973 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3976 if (L2CAP_CTRL_FINAL
& rx_control
&&
3977 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3978 del_timer(&pi
->monitor_timer
);
3979 if (pi
->unacked_frames
> 0)
3980 __mod_retrans_timer();
3981 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3984 pi
->expected_ack_seq
= req_seq
;
3985 l2cap_drop_acked_frames(sk
);
3987 if (tx_seq
== pi
->expected_tx_seq
)
3990 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3991 if (tx_seq_offset
< 0)
3992 tx_seq_offset
+= 64;
3994 /* invalid tx_seq */
3995 if (tx_seq_offset
>= pi
->tx_win
) {
3996 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4000 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
4003 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4004 struct srej_list
*first
;
4006 first
= list_first_entry(SREJ_LIST(sk
),
4007 struct srej_list
, list
);
4008 if (tx_seq
== first
->tx_seq
) {
4009 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
4010 l2cap_check_srej_gap(sk
, tx_seq
);
4012 list_del(&first
->list
);
4015 if (list_empty(SREJ_LIST(sk
))) {
4016 pi
->buffer_seq
= pi
->buffer_seq_srej
;
4017 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
4019 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
4022 struct srej_list
*l
;
4024 /* duplicated tx_seq */
4025 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
4028 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
4029 if (l
->tx_seq
== tx_seq
) {
4030 l2cap_resend_srejframe(sk
, tx_seq
);
4034 l2cap_send_srejframe(sk
, tx_seq
);
4037 expected_tx_seq_offset
=
4038 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
4039 if (expected_tx_seq_offset
< 0)
4040 expected_tx_seq_offset
+= 64;
4042 /* duplicated tx_seq */
4043 if (tx_seq_offset
< expected_tx_seq_offset
)
4046 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
4048 BT_DBG("sk %p, Enter SREJ", sk
);
4050 INIT_LIST_HEAD(SREJ_LIST(sk
));
4051 pi
->buffer_seq_srej
= pi
->buffer_seq
;
4053 __skb_queue_head_init(SREJ_QUEUE(sk
));
4054 __skb_queue_head_init(BUSY_QUEUE(sk
));
4055 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
4057 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
4059 l2cap_send_srejframe(sk
, tx_seq
);
4061 del_timer(&pi
->ack_timer
);
4066 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4068 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4069 bt_cb(skb
)->tx_seq
= tx_seq
;
4070 bt_cb(skb
)->sar
= sar
;
4071 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
4075 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
4079 if (rx_control
& L2CAP_CTRL_FINAL
) {
4080 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4081 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4083 l2cap_retransmit_frames(sk
);
4088 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
4089 if (pi
->num_acked
== num_to_ack
- 1)
4099 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4101 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4103 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4106 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4107 l2cap_drop_acked_frames(sk
);
4109 if (rx_control
& L2CAP_CTRL_POLL
) {
4110 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4111 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4112 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4113 (pi
->unacked_frames
> 0))
4114 __mod_retrans_timer();
4116 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4117 l2cap_send_srejtail(sk
);
4119 l2cap_send_i_or_rr_or_rnr(sk
);
4122 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4123 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4125 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4126 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4128 l2cap_retransmit_frames(sk
);
4131 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4132 (pi
->unacked_frames
> 0))
4133 __mod_retrans_timer();
4135 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4136 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4139 l2cap_ertm_send(sk
);
4144 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4146 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4147 u8 tx_seq
= __get_reqseq(rx_control
);
4149 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4151 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4153 pi
->expected_ack_seq
= tx_seq
;
4154 l2cap_drop_acked_frames(sk
);
4156 if (rx_control
& L2CAP_CTRL_FINAL
) {
4157 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4158 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4160 l2cap_retransmit_frames(sk
);
4162 l2cap_retransmit_frames(sk
);
4164 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4165 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4168 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4170 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4171 u8 tx_seq
= __get_reqseq(rx_control
);
4173 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4175 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4177 if (rx_control
& L2CAP_CTRL_POLL
) {
4178 pi
->expected_ack_seq
= tx_seq
;
4179 l2cap_drop_acked_frames(sk
);
4181 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4182 l2cap_retransmit_one_frame(sk
, tx_seq
);
4184 l2cap_ertm_send(sk
);
4186 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4187 pi
->srej_save_reqseq
= tx_seq
;
4188 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4190 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4191 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4192 pi
->srej_save_reqseq
== tx_seq
)
4193 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4195 l2cap_retransmit_one_frame(sk
, tx_seq
);
4197 l2cap_retransmit_one_frame(sk
, tx_seq
);
4198 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4199 pi
->srej_save_reqseq
= tx_seq
;
4200 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4205 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4207 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4208 u8 tx_seq
= __get_reqseq(rx_control
);
4210 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4212 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4213 pi
->expected_ack_seq
= tx_seq
;
4214 l2cap_drop_acked_frames(sk
);
4216 if (rx_control
& L2CAP_CTRL_POLL
)
4217 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4219 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4220 del_timer(&pi
->retrans_timer
);
4221 if (rx_control
& L2CAP_CTRL_POLL
)
4222 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4226 if (rx_control
& L2CAP_CTRL_POLL
)
4227 l2cap_send_srejtail(sk
);
4229 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4232 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4234 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4236 if (L2CAP_CTRL_FINAL
& rx_control
&&
4237 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4238 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4239 if (l2cap_pi(sk
)->unacked_frames
> 0)
4240 __mod_retrans_timer();
4241 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4244 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4245 case L2CAP_SUPER_RCV_READY
:
4246 l2cap_data_channel_rrframe(sk
, rx_control
);
4249 case L2CAP_SUPER_REJECT
:
4250 l2cap_data_channel_rejframe(sk
, rx_control
);
4253 case L2CAP_SUPER_SELECT_REJECT
:
4254 l2cap_data_channel_srejframe(sk
, rx_control
);
4257 case L2CAP_SUPER_RCV_NOT_READY
:
4258 l2cap_data_channel_rnrframe(sk
, rx_control
);
4266 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4268 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4271 int len
, next_tx_seq_offset
, req_seq_offset
;
4273 control
= get_unaligned_le16(skb
->data
);
4278 * We can just drop the corrupted I-frame here.
4279 * Receiver will miss it and start proper recovery
4280 * procedures and ask retransmission.
4282 if (l2cap_check_fcs(pi
, skb
))
4285 if (__is_sar_start(control
) && __is_iframe(control
))
4288 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4291 if (len
> pi
->mps
) {
4292 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4296 req_seq
= __get_reqseq(control
);
4297 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4298 if (req_seq_offset
< 0)
4299 req_seq_offset
+= 64;
4301 next_tx_seq_offset
=
4302 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4303 if (next_tx_seq_offset
< 0)
4304 next_tx_seq_offset
+= 64;
4306 /* check for invalid req-seq */
4307 if (req_seq_offset
> next_tx_seq_offset
) {
4308 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4312 if (__is_iframe(control
)) {
4314 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4318 l2cap_data_channel_iframe(sk
, control
, skb
);
4322 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4326 l2cap_data_channel_sframe(sk
, control
, skb
);
4336 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4339 struct l2cap_pinfo
*pi
;
4344 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4346 BT_DBG("unknown cid 0x%4.4x", cid
);
4352 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4354 if (sk
->sk_state
!= BT_CONNECTED
)
4358 case L2CAP_MODE_BASIC
:
4359 /* If socket recv buffers overflows we drop data here
4360 * which is *bad* because L2CAP has to be reliable.
4361 * But we don't have any other choice. L2CAP doesn't
4362 * provide flow control mechanism. */
4364 if (pi
->imtu
< skb
->len
)
4367 if (!sock_queue_rcv_skb(sk
, skb
))
4371 case L2CAP_MODE_ERTM
:
4372 if (!sock_owned_by_user(sk
)) {
4373 l2cap_ertm_data_rcv(sk
, skb
);
4375 if (sk_add_backlog(sk
, skb
))
4381 case L2CAP_MODE_STREAMING
:
4382 control
= get_unaligned_le16(skb
->data
);
4386 if (l2cap_check_fcs(pi
, skb
))
4389 if (__is_sar_start(control
))
4392 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4395 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4398 tx_seq
= __get_txseq(control
);
4400 if (pi
->expected_tx_seq
== tx_seq
)
4401 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4403 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4405 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4410 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4424 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4428 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4432 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4434 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4437 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4440 if (!sock_queue_rcv_skb(sk
, skb
))
4452 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4454 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4458 skb_pull(skb
, L2CAP_HDR_SIZE
);
4459 cid
= __le16_to_cpu(lh
->cid
);
4460 len
= __le16_to_cpu(lh
->len
);
4462 if (len
!= skb
->len
) {
4467 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4470 case L2CAP_CID_SIGNALING
:
4471 l2cap_sig_channel(conn
, skb
);
4474 case L2CAP_CID_CONN_LESS
:
4475 psm
= get_unaligned_le16(skb
->data
);
4477 l2cap_conless_channel(conn
, psm
, skb
);
4481 l2cap_data_channel(conn
, cid
, skb
);
4486 /* ---- L2CAP interface with lower layer (HCI) ---- */
4488 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4490 int exact
= 0, lm1
= 0, lm2
= 0;
4491 register struct sock
*sk
;
4492 struct hlist_node
*node
;
4494 if (type
!= ACL_LINK
)
4497 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4499 /* Find listening sockets and check their link_mode */
4500 read_lock(&l2cap_sk_list
.lock
);
4501 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4502 if (sk
->sk_state
!= BT_LISTEN
)
4505 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4506 lm1
|= HCI_LM_ACCEPT
;
4507 if (l2cap_pi(sk
)->role_switch
)
4508 lm1
|= HCI_LM_MASTER
;
4510 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4511 lm2
|= HCI_LM_ACCEPT
;
4512 if (l2cap_pi(sk
)->role_switch
)
4513 lm2
|= HCI_LM_MASTER
;
4516 read_unlock(&l2cap_sk_list
.lock
);
4518 return exact
? lm1
: lm2
;
4521 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4523 struct l2cap_conn
*conn
;
4525 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4527 if (hcon
->type
!= ACL_LINK
)
4531 conn
= l2cap_conn_add(hcon
, status
);
4533 l2cap_conn_ready(conn
);
4535 l2cap_conn_del(hcon
, bt_err(status
));
4540 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4542 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4544 BT_DBG("hcon %p", hcon
);
4546 if (hcon
->type
!= ACL_LINK
|| !conn
)
4549 return conn
->disc_reason
;
4552 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4554 BT_DBG("hcon %p reason %d", hcon
, reason
);
4556 if (hcon
->type
!= ACL_LINK
)
4559 l2cap_conn_del(hcon
, bt_err(reason
));
4564 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4566 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4569 if (encrypt
== 0x00) {
4570 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4571 l2cap_sock_clear_timer(sk
);
4572 l2cap_sock_set_timer(sk
, HZ
* 5);
4573 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4574 __l2cap_sock_close(sk
, ECONNREFUSED
);
4576 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4577 l2cap_sock_clear_timer(sk
);
4581 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4583 struct l2cap_chan_list
*l
;
4584 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4590 l
= &conn
->chan_list
;
4592 BT_DBG("conn %p", conn
);
4594 read_lock(&l
->lock
);
4596 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4599 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4604 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4605 sk
->sk_state
== BT_CONFIG
)) {
4606 l2cap_check_encryption(sk
, encrypt
);
4611 if (sk
->sk_state
== BT_CONNECT
) {
4613 struct l2cap_conn_req req
;
4614 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4615 req
.psm
= l2cap_pi(sk
)->psm
;
4617 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4618 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4620 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4621 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4623 l2cap_sock_clear_timer(sk
);
4624 l2cap_sock_set_timer(sk
, HZ
/ 10);
4626 } else if (sk
->sk_state
== BT_CONNECT2
) {
4627 struct l2cap_conn_rsp rsp
;
4631 sk
->sk_state
= BT_CONFIG
;
4632 result
= L2CAP_CR_SUCCESS
;
4634 sk
->sk_state
= BT_DISCONN
;
4635 l2cap_sock_set_timer(sk
, HZ
/ 10);
4636 result
= L2CAP_CR_SEC_BLOCK
;
4639 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4640 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4641 rsp
.result
= cpu_to_le16(result
);
4642 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4643 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4644 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4650 read_unlock(&l
->lock
);
4655 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4657 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4659 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4662 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4664 if (flags
& ACL_START
) {
4665 struct l2cap_hdr
*hdr
;
4671 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4672 kfree_skb(conn
->rx_skb
);
4673 conn
->rx_skb
= NULL
;
4675 l2cap_conn_unreliable(conn
, ECOMM
);
4678 /* Start fragment always begin with Basic L2CAP header */
4679 if (skb
->len
< L2CAP_HDR_SIZE
) {
4680 BT_ERR("Frame is too short (len %d)", skb
->len
);
4681 l2cap_conn_unreliable(conn
, ECOMM
);
4685 hdr
= (struct l2cap_hdr
*) skb
->data
;
4686 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4687 cid
= __le16_to_cpu(hdr
->cid
);
4689 if (len
== skb
->len
) {
4690 /* Complete frame received */
4691 l2cap_recv_frame(conn
, skb
);
4695 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4697 if (skb
->len
> len
) {
4698 BT_ERR("Frame is too long (len %d, expected len %d)",
4700 l2cap_conn_unreliable(conn
, ECOMM
);
4704 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4706 if (sk
&& l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
4707 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4708 len
, l2cap_pi(sk
)->imtu
);
4710 l2cap_conn_unreliable(conn
, ECOMM
);
4717 /* Allocate skb for the complete frame (with header) */
4718 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4722 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4724 conn
->rx_len
= len
- skb
->len
;
4726 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4728 if (!conn
->rx_len
) {
4729 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4730 l2cap_conn_unreliable(conn
, ECOMM
);
4734 if (skb
->len
> conn
->rx_len
) {
4735 BT_ERR("Fragment is too long (len %d, expected %d)",
4736 skb
->len
, conn
->rx_len
);
4737 kfree_skb(conn
->rx_skb
);
4738 conn
->rx_skb
= NULL
;
4740 l2cap_conn_unreliable(conn
, ECOMM
);
4744 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4746 conn
->rx_len
-= skb
->len
;
4748 if (!conn
->rx_len
) {
4749 /* Complete frame received */
4750 l2cap_recv_frame(conn
, conn
->rx_skb
);
4751 conn
->rx_skb
= NULL
;
4760 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4763 struct hlist_node
*node
;
4765 read_lock_bh(&l2cap_sk_list
.lock
);
4767 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4768 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4770 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4771 batostr(&bt_sk(sk
)->src
),
4772 batostr(&bt_sk(sk
)->dst
),
4773 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4775 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4778 read_unlock_bh(&l2cap_sk_list
.lock
);
4783 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4785 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4788 static const struct file_operations l2cap_debugfs_fops
= {
4789 .open
= l2cap_debugfs_open
,
4791 .llseek
= seq_lseek
,
4792 .release
= single_release
,
4795 static struct dentry
*l2cap_debugfs
;
4797 static const struct proto_ops l2cap_sock_ops
= {
4798 .family
= PF_BLUETOOTH
,
4799 .owner
= THIS_MODULE
,
4800 .release
= l2cap_sock_release
,
4801 .bind
= l2cap_sock_bind
,
4802 .connect
= l2cap_sock_connect
,
4803 .listen
= l2cap_sock_listen
,
4804 .accept
= l2cap_sock_accept
,
4805 .getname
= l2cap_sock_getname
,
4806 .sendmsg
= l2cap_sock_sendmsg
,
4807 .recvmsg
= l2cap_sock_recvmsg
,
4808 .poll
= bt_sock_poll
,
4809 .ioctl
= bt_sock_ioctl
,
4810 .mmap
= sock_no_mmap
,
4811 .socketpair
= sock_no_socketpair
,
4812 .shutdown
= l2cap_sock_shutdown
,
4813 .setsockopt
= l2cap_sock_setsockopt
,
4814 .getsockopt
= l2cap_sock_getsockopt
4817 static const struct net_proto_family l2cap_sock_family_ops
= {
4818 .family
= PF_BLUETOOTH
,
4819 .owner
= THIS_MODULE
,
4820 .create
= l2cap_sock_create
,
4823 static struct hci_proto l2cap_hci_proto
= {
4825 .id
= HCI_PROTO_L2CAP
,
4826 .connect_ind
= l2cap_connect_ind
,
4827 .connect_cfm
= l2cap_connect_cfm
,
4828 .disconn_ind
= l2cap_disconn_ind
,
4829 .disconn_cfm
= l2cap_disconn_cfm
,
4830 .security_cfm
= l2cap_security_cfm
,
4831 .recv_acldata
= l2cap_recv_acldata
4834 static int __init
l2cap_init(void)
4838 err
= proto_register(&l2cap_proto
, 0);
4842 _busy_wq
= create_singlethread_workqueue("l2cap");
4846 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4848 BT_ERR("L2CAP socket registration failed");
4852 err
= hci_register_proto(&l2cap_hci_proto
);
4854 BT_ERR("L2CAP protocol registration failed");
4855 bt_sock_unregister(BTPROTO_L2CAP
);
4860 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4861 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4863 BT_ERR("Failed to create L2CAP debug file");
4866 BT_INFO("L2CAP ver %s", VERSION
);
4867 BT_INFO("L2CAP socket layer initialized");
4872 proto_unregister(&l2cap_proto
);
4876 static void __exit
l2cap_exit(void)
4878 debugfs_remove(l2cap_debugfs
);
4880 flush_workqueue(_busy_wq
);
4881 destroy_workqueue(_busy_wq
);
4883 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4884 BT_ERR("L2CAP socket unregistration failed");
4886 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4887 BT_ERR("L2CAP protocol unregistration failed");
4889 proto_unregister(&l2cap_proto
);
4892 void l2cap_load(void)
4894 /* Dummy function to trigger automatic L2CAP module loading by
4895 * other modules that use L2CAP sockets but don't use any other
4896 * symbols from it. */
4898 EXPORT_SYMBOL(l2cap_load
);
4900 module_init(l2cap_init
);
4901 module_exit(l2cap_exit
);
4903 module_param(disable_ertm
, bool, 0644);
4904 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4906 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4907 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4908 MODULE_VERSION(VERSION
);
4909 MODULE_LICENSE("GPL");
4910 MODULE_ALIAS("bt-proto-0");