2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static struct workqueue_struct
*_busy_wq
;
67 struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void l2cap_sock_close(struct sock
*sk
);
75 static int l2cap_build_conf_req(struct sock
*sk
, void *data
);
76 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
77 u8 code
, u8 ident
, u16 dlen
, void *data
);
79 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
81 /* ---- L2CAP timers ---- */
82 void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
84 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
85 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
88 static void l2cap_sock_clear_timer(struct sock
*sk
)
90 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
91 sk_stop_timer(sk
, &sk
->sk_timer
);
94 /* ---- L2CAP channels ---- */
95 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
98 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
99 if (l2cap_pi(s
)->dcid
== cid
)
105 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
108 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
109 if (l2cap_pi(s
)->scid
== cid
)
115 /* Find channel with given SCID.
116 * Returns locked socket */
117 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
121 s
= __l2cap_get_chan_by_scid(l
, cid
);
124 read_unlock(&l
->lock
);
128 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
131 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
132 if (l2cap_pi(s
)->ident
== ident
)
138 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
142 s
= __l2cap_get_chan_by_ident(l
, ident
);
145 read_unlock(&l
->lock
);
149 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
151 u16 cid
= L2CAP_CID_DYN_START
;
153 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
154 if (!__l2cap_get_chan_by_scid(l
, cid
))
161 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
166 l2cap_pi(l
->head
)->prev_c
= sk
;
168 l2cap_pi(sk
)->next_c
= l
->head
;
169 l2cap_pi(sk
)->prev_c
= NULL
;
173 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
175 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
177 write_lock_bh(&l
->lock
);
182 l2cap_pi(next
)->prev_c
= prev
;
184 l2cap_pi(prev
)->next_c
= next
;
185 write_unlock_bh(&l
->lock
);
190 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
192 struct l2cap_chan_list
*l
= &conn
->chan_list
;
194 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
195 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
197 conn
->disc_reason
= 0x13;
199 l2cap_pi(sk
)->conn
= conn
;
201 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
202 /* Alloc CID for connection-oriented socket */
203 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
204 } else if (sk
->sk_type
== SOCK_DGRAM
) {
205 /* Connectionless socket */
206 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
207 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
208 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
210 /* Raw socket can send/recv signalling messages only */
211 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
212 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
213 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
216 __l2cap_chan_link(l
, sk
);
219 bt_accept_enqueue(parent
, sk
);
223 * Must be called on the locked socket. */
224 static void l2cap_chan_del(struct sock
*sk
, int err
)
226 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
227 struct sock
*parent
= bt_sk(sk
)->parent
;
229 l2cap_sock_clear_timer(sk
);
231 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
234 /* Unlink from channel list */
235 l2cap_chan_unlink(&conn
->chan_list
, sk
);
236 l2cap_pi(sk
)->conn
= NULL
;
237 hci_conn_put(conn
->hcon
);
240 sk
->sk_state
= BT_CLOSED
;
241 sock_set_flag(sk
, SOCK_ZAPPED
);
247 bt_accept_unlink(sk
);
248 parent
->sk_data_ready(parent
, 0);
250 sk
->sk_state_change(sk
);
252 skb_queue_purge(TX_QUEUE(sk
));
254 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
255 struct srej_list
*l
, *tmp
;
257 del_timer(&l2cap_pi(sk
)->retrans_timer
);
258 del_timer(&l2cap_pi(sk
)->monitor_timer
);
259 del_timer(&l2cap_pi(sk
)->ack_timer
);
261 skb_queue_purge(SREJ_QUEUE(sk
));
262 skb_queue_purge(BUSY_QUEUE(sk
));
264 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
271 static inline u8
l2cap_get_auth_type(struct sock
*sk
)
273 if (sk
->sk_type
== SOCK_RAW
) {
274 switch (l2cap_pi(sk
)->sec_level
) {
275 case BT_SECURITY_HIGH
:
276 return HCI_AT_DEDICATED_BONDING_MITM
;
277 case BT_SECURITY_MEDIUM
:
278 return HCI_AT_DEDICATED_BONDING
;
280 return HCI_AT_NO_BONDING
;
282 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
283 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
284 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
286 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
287 return HCI_AT_NO_BONDING_MITM
;
289 return HCI_AT_NO_BONDING
;
291 switch (l2cap_pi(sk
)->sec_level
) {
292 case BT_SECURITY_HIGH
:
293 return HCI_AT_GENERAL_BONDING_MITM
;
294 case BT_SECURITY_MEDIUM
:
295 return HCI_AT_GENERAL_BONDING
;
297 return HCI_AT_NO_BONDING
;
302 /* Service level security */
303 static inline int l2cap_check_security(struct sock
*sk
)
305 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
308 auth_type
= l2cap_get_auth_type(sk
);
310 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
314 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
318 /* Get next available identificator.
319 * 1 - 128 are used by kernel.
320 * 129 - 199 are reserved.
321 * 200 - 254 are used by utilities like l2ping, etc.
324 spin_lock_bh(&conn
->lock
);
326 if (++conn
->tx_ident
> 128)
331 spin_unlock_bh(&conn
->lock
);
336 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
338 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
341 BT_DBG("code 0x%2.2x", code
);
346 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
347 flags
= ACL_START_NO_FLUSH
;
351 hci_send_acl(conn
->hcon
, skb
, flags
);
354 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
357 struct l2cap_hdr
*lh
;
358 struct l2cap_conn
*conn
= pi
->conn
;
359 struct sock
*sk
= (struct sock
*)pi
;
360 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
363 if (sk
->sk_state
!= BT_CONNECTED
)
366 if (pi
->fcs
== L2CAP_FCS_CRC16
)
369 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
371 count
= min_t(unsigned int, conn
->mtu
, hlen
);
372 control
|= L2CAP_CTRL_FRAME_TYPE
;
374 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
375 control
|= L2CAP_CTRL_FINAL
;
376 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
379 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
380 control
|= L2CAP_CTRL_POLL
;
381 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
384 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
388 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
389 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
390 lh
->cid
= cpu_to_le16(pi
->dcid
);
391 put_unaligned_le16(control
, skb_put(skb
, 2));
393 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
394 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
395 put_unaligned_le16(fcs
, skb_put(skb
, 2));
398 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
399 flags
= ACL_START_NO_FLUSH
;
403 hci_send_acl(pi
->conn
->hcon
, skb
, flags
);
406 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
408 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
409 control
|= L2CAP_SUPER_RCV_NOT_READY
;
410 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
412 control
|= L2CAP_SUPER_RCV_READY
;
414 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
416 l2cap_send_sframe(pi
, control
);
419 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
421 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
424 static void l2cap_do_start(struct sock
*sk
)
426 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
428 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
429 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
432 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
433 struct l2cap_conn_req req
;
434 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
435 req
.psm
= l2cap_pi(sk
)->psm
;
437 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
438 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
440 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
441 L2CAP_CONN_REQ
, sizeof(req
), &req
);
444 struct l2cap_info_req req
;
445 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
447 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
448 conn
->info_ident
= l2cap_get_ident(conn
);
450 mod_timer(&conn
->info_timer
, jiffies
+
451 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
453 l2cap_send_cmd(conn
, conn
->info_ident
,
454 L2CAP_INFO_REQ
, sizeof(req
), &req
);
458 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
460 u32 local_feat_mask
= l2cap_feat_mask
;
462 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
465 case L2CAP_MODE_ERTM
:
466 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
467 case L2CAP_MODE_STREAMING
:
468 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
474 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
476 struct l2cap_disconn_req req
;
481 skb_queue_purge(TX_QUEUE(sk
));
483 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
484 del_timer(&l2cap_pi(sk
)->retrans_timer
);
485 del_timer(&l2cap_pi(sk
)->monitor_timer
);
486 del_timer(&l2cap_pi(sk
)->ack_timer
);
489 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
490 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
491 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
492 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
494 sk
->sk_state
= BT_DISCONN
;
498 /* ---- L2CAP connections ---- */
499 static void l2cap_conn_start(struct l2cap_conn
*conn
)
501 struct l2cap_chan_list
*l
= &conn
->chan_list
;
502 struct sock_del_list del
, *tmp1
, *tmp2
;
505 BT_DBG("conn %p", conn
);
507 INIT_LIST_HEAD(&del
.list
);
511 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
514 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
515 sk
->sk_type
!= SOCK_STREAM
) {
520 if (sk
->sk_state
== BT_CONNECT
) {
521 struct l2cap_conn_req req
;
523 if (!l2cap_check_security(sk
) ||
524 !__l2cap_no_conn_pending(sk
)) {
529 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
531 && l2cap_pi(sk
)->conf_state
&
532 L2CAP_CONF_STATE2_DEVICE
) {
533 tmp1
= kzalloc(sizeof(struct sock_del_list
),
536 list_add_tail(&tmp1
->list
, &del
.list
);
541 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
542 req
.psm
= l2cap_pi(sk
)->psm
;
544 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
545 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
547 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
548 L2CAP_CONN_REQ
, sizeof(req
), &req
);
550 } else if (sk
->sk_state
== BT_CONNECT2
) {
551 struct l2cap_conn_rsp rsp
;
553 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
554 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
556 if (l2cap_check_security(sk
)) {
557 if (bt_sk(sk
)->defer_setup
) {
558 struct sock
*parent
= bt_sk(sk
)->parent
;
559 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
560 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
561 parent
->sk_data_ready(parent
, 0);
564 sk
->sk_state
= BT_CONFIG
;
565 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
566 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
569 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
570 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
573 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
574 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
576 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
577 rsp
.result
!= L2CAP_CR_SUCCESS
) {
582 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
583 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
584 l2cap_build_conf_req(sk
, buf
), buf
);
585 l2cap_pi(sk
)->num_conf_req
++;
591 read_unlock(&l
->lock
);
593 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
594 bh_lock_sock(tmp1
->sk
);
595 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
596 bh_unlock_sock(tmp1
->sk
);
597 list_del(&tmp1
->list
);
602 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
604 struct l2cap_chan_list
*l
= &conn
->chan_list
;
607 BT_DBG("conn %p", conn
);
611 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
614 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
615 sk
->sk_type
!= SOCK_STREAM
) {
616 l2cap_sock_clear_timer(sk
);
617 sk
->sk_state
= BT_CONNECTED
;
618 sk
->sk_state_change(sk
);
619 } else if (sk
->sk_state
== BT_CONNECT
)
625 read_unlock(&l
->lock
);
628 /* Notify sockets that we cannot guaranty reliability anymore */
629 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
631 struct l2cap_chan_list
*l
= &conn
->chan_list
;
634 BT_DBG("conn %p", conn
);
638 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
639 if (l2cap_pi(sk
)->force_reliable
)
643 read_unlock(&l
->lock
);
646 static void l2cap_info_timeout(unsigned long arg
)
648 struct l2cap_conn
*conn
= (void *) arg
;
650 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
651 conn
->info_ident
= 0;
653 l2cap_conn_start(conn
);
656 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
658 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
663 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
667 hcon
->l2cap_data
= conn
;
670 BT_DBG("hcon %p conn %p", hcon
, conn
);
672 conn
->mtu
= hcon
->hdev
->acl_mtu
;
673 conn
->src
= &hcon
->hdev
->bdaddr
;
674 conn
->dst
= &hcon
->dst
;
678 spin_lock_init(&conn
->lock
);
679 rwlock_init(&conn
->chan_list
.lock
);
681 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
682 (unsigned long) conn
);
684 conn
->disc_reason
= 0x13;
689 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
691 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
697 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
699 kfree_skb(conn
->rx_skb
);
702 while ((sk
= conn
->chan_list
.head
)) {
704 l2cap_chan_del(sk
, err
);
709 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
710 del_timer_sync(&conn
->info_timer
);
712 hcon
->l2cap_data
= NULL
;
716 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
718 struct l2cap_chan_list
*l
= &conn
->chan_list
;
719 write_lock_bh(&l
->lock
);
720 __l2cap_chan_add(conn
, sk
, parent
);
721 write_unlock_bh(&l
->lock
);
724 /* ---- Socket interface ---- */
726 /* Find socket with psm and source bdaddr.
727 * Returns closest match.
729 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
731 struct sock
*sk
= NULL
, *sk1
= NULL
;
732 struct hlist_node
*node
;
734 read_lock(&l2cap_sk_list
.lock
);
736 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
737 if (state
&& sk
->sk_state
!= state
)
740 if (l2cap_pi(sk
)->psm
== psm
) {
742 if (!bacmp(&bt_sk(sk
)->src
, src
))
746 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
751 read_unlock(&l2cap_sk_list
.lock
);
753 return node
? sk
: sk1
;
756 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
760 BT_DBG("parent %p", parent
);
762 /* Close not yet accepted channels */
763 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
764 l2cap_sock_close(sk
);
766 parent
->sk_state
= BT_CLOSED
;
767 sock_set_flag(parent
, SOCK_ZAPPED
);
770 /* Kill socket (only if zapped and orphan)
771 * Must be called on unlocked socket.
773 void l2cap_sock_kill(struct sock
*sk
)
775 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
778 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
780 /* Kill poor orphan */
781 bt_sock_unlink(&l2cap_sk_list
, sk
);
782 sock_set_flag(sk
, SOCK_DEAD
);
786 void __l2cap_sock_close(struct sock
*sk
, int reason
)
788 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
790 switch (sk
->sk_state
) {
792 l2cap_sock_cleanup_listen(sk
);
797 if (sk
->sk_type
== SOCK_SEQPACKET
||
798 sk
->sk_type
== SOCK_STREAM
) {
799 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
801 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
802 l2cap_send_disconn_req(conn
, sk
, reason
);
804 l2cap_chan_del(sk
, reason
);
808 if (sk
->sk_type
== SOCK_SEQPACKET
||
809 sk
->sk_type
== SOCK_STREAM
) {
810 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
811 struct l2cap_conn_rsp rsp
;
814 if (bt_sk(sk
)->defer_setup
)
815 result
= L2CAP_CR_SEC_BLOCK
;
817 result
= L2CAP_CR_BAD_PSM
;
818 sk
->sk_state
= BT_DISCONN
;
820 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
821 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
822 rsp
.result
= cpu_to_le16(result
);
823 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
824 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
825 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
827 l2cap_chan_del(sk
, reason
);
832 l2cap_chan_del(sk
, reason
);
836 sock_set_flag(sk
, SOCK_ZAPPED
);
841 /* Must be called on unlocked socket. */
842 static void l2cap_sock_close(struct sock
*sk
)
844 l2cap_sock_clear_timer(sk
);
846 __l2cap_sock_close(sk
, ECONNRESET
);
851 static int l2cap_do_connect(struct sock
*sk
)
853 bdaddr_t
*src
= &bt_sk(sk
)->src
;
854 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
855 struct l2cap_conn
*conn
;
856 struct hci_conn
*hcon
;
857 struct hci_dev
*hdev
;
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
864 hdev
= hci_get_route(dst
, src
);
866 return -EHOSTUNREACH
;
868 hci_dev_lock_bh(hdev
);
872 auth_type
= l2cap_get_auth_type(sk
);
874 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
875 l2cap_pi(sk
)->sec_level
, auth_type
);
879 conn
= l2cap_conn_add(hcon
, 0);
887 /* Update source addr of the socket */
888 bacpy(src
, conn
->src
);
890 l2cap_chan_add(conn
, sk
, NULL
);
892 sk
->sk_state
= BT_CONNECT
;
893 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
895 if (hcon
->state
== BT_CONNECTED
) {
896 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
897 sk
->sk_type
!= SOCK_STREAM
) {
898 l2cap_sock_clear_timer(sk
);
899 if (l2cap_check_security(sk
))
900 sk
->sk_state
= BT_CONNECTED
;
906 hci_dev_unlock_bh(hdev
);
911 int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
913 struct sock
*sk
= sock
->sk
;
914 struct sockaddr_l2 la
;
919 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
920 addr
->sa_family
!= AF_BLUETOOTH
)
923 memset(&la
, 0, sizeof(la
));
924 len
= min_t(unsigned int, sizeof(la
), alen
);
925 memcpy(&la
, addr
, len
);
932 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
938 switch (l2cap_pi(sk
)->mode
) {
939 case L2CAP_MODE_BASIC
:
941 case L2CAP_MODE_ERTM
:
942 case L2CAP_MODE_STREAMING
:
951 switch (sk
->sk_state
) {
955 /* Already connecting */
959 /* Already connected */
973 /* PSM must be odd and lsb of upper byte must be 0 */
974 if ((__le16_to_cpu(la
.l2_psm
) & 0x0101) != 0x0001 &&
975 sk
->sk_type
!= SOCK_RAW
) {
980 /* Set destination address and psm */
981 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
982 l2cap_pi(sk
)->psm
= la
.l2_psm
;
984 err
= l2cap_do_connect(sk
);
989 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
990 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
996 int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
998 DECLARE_WAITQUEUE(wait
, current
);
999 struct sock
*sk
= sock
->sk
, *nsk
;
1003 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1005 if (sk
->sk_state
!= BT_LISTEN
) {
1010 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1012 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1014 /* Wait for an incoming connection. (wake-one). */
1015 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1016 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1017 set_current_state(TASK_INTERRUPTIBLE
);
1024 timeo
= schedule_timeout(timeo
);
1025 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1027 if (sk
->sk_state
!= BT_LISTEN
) {
1032 if (signal_pending(current
)) {
1033 err
= sock_intr_errno(timeo
);
1037 set_current_state(TASK_RUNNING
);
1038 remove_wait_queue(sk_sleep(sk
), &wait
);
1043 newsock
->state
= SS_CONNECTED
;
1045 BT_DBG("new socket %p", nsk
);
1052 int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1054 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1055 struct sock
*sk
= sock
->sk
;
1057 BT_DBG("sock %p, sk %p", sock
, sk
);
1059 addr
->sa_family
= AF_BLUETOOTH
;
1060 *len
= sizeof(struct sockaddr_l2
);
1063 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1064 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1065 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1067 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1068 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1069 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1075 static int __l2cap_wait_ack(struct sock
*sk
)
1077 DECLARE_WAITQUEUE(wait
, current
);
1081 add_wait_queue(sk_sleep(sk
), &wait
);
1082 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1083 set_current_state(TASK_INTERRUPTIBLE
);
1088 if (signal_pending(current
)) {
1089 err
= sock_intr_errno(timeo
);
1094 timeo
= schedule_timeout(timeo
);
1097 err
= sock_error(sk
);
1101 set_current_state(TASK_RUNNING
);
1102 remove_wait_queue(sk_sleep(sk
), &wait
);
1106 static void l2cap_monitor_timeout(unsigned long arg
)
1108 struct sock
*sk
= (void *) arg
;
1110 BT_DBG("sk %p", sk
);
1113 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1114 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1119 l2cap_pi(sk
)->retry_count
++;
1120 __mod_monitor_timer();
1122 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1126 static void l2cap_retrans_timeout(unsigned long arg
)
1128 struct sock
*sk
= (void *) arg
;
1130 BT_DBG("sk %p", sk
);
1133 l2cap_pi(sk
)->retry_count
= 1;
1134 __mod_monitor_timer();
1136 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1138 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1142 static void l2cap_drop_acked_frames(struct sock
*sk
)
1144 struct sk_buff
*skb
;
1146 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1147 l2cap_pi(sk
)->unacked_frames
) {
1148 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1151 skb
= skb_dequeue(TX_QUEUE(sk
));
1154 l2cap_pi(sk
)->unacked_frames
--;
1157 if (!l2cap_pi(sk
)->unacked_frames
)
1158 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1161 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1163 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1164 struct hci_conn
*hcon
= pi
->conn
->hcon
;
1167 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1169 if (!pi
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1170 flags
= ACL_START_NO_FLUSH
;
1174 hci_send_acl(hcon
, skb
, flags
);
1177 static void l2cap_streaming_send(struct sock
*sk
)
1179 struct sk_buff
*skb
;
1180 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1183 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1184 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1185 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1186 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1188 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1189 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1190 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1193 l2cap_do_send(sk
, skb
);
1195 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1199 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1201 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1202 struct sk_buff
*skb
, *tx_skb
;
1205 skb
= skb_peek(TX_QUEUE(sk
));
1210 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1213 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1216 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1218 if (pi
->remote_max_tx
&&
1219 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1220 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1224 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1225 bt_cb(skb
)->retries
++;
1226 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1228 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1229 control
|= L2CAP_CTRL_FINAL
;
1230 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1233 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1234 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1236 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1238 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1239 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1240 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1243 l2cap_do_send(sk
, tx_skb
);
1246 static int l2cap_ertm_send(struct sock
*sk
)
1248 struct sk_buff
*skb
, *tx_skb
;
1249 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1253 if (sk
->sk_state
!= BT_CONNECTED
)
1256 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1258 if (pi
->remote_max_tx
&&
1259 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1260 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1264 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1266 bt_cb(skb
)->retries
++;
1268 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1269 control
&= L2CAP_CTRL_SAR
;
1271 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1272 control
|= L2CAP_CTRL_FINAL
;
1273 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1275 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1276 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1277 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1280 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1281 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1282 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1285 l2cap_do_send(sk
, tx_skb
);
1287 __mod_retrans_timer();
1289 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1290 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1292 pi
->unacked_frames
++;
1295 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1296 sk
->sk_send_head
= NULL
;
1298 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1306 static int l2cap_retransmit_frames(struct sock
*sk
)
1308 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1311 if (!skb_queue_empty(TX_QUEUE(sk
)))
1312 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1314 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1315 ret
= l2cap_ertm_send(sk
);
1319 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1321 struct sock
*sk
= (struct sock
*)pi
;
1324 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1326 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1327 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1328 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1329 l2cap_send_sframe(pi
, control
);
1333 if (l2cap_ertm_send(sk
) > 0)
1336 control
|= L2CAP_SUPER_RCV_READY
;
1337 l2cap_send_sframe(pi
, control
);
1340 static void l2cap_send_srejtail(struct sock
*sk
)
1342 struct srej_list
*tail
;
1345 control
= L2CAP_SUPER_SELECT_REJECT
;
1346 control
|= L2CAP_CTRL_FINAL
;
1348 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1349 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1351 l2cap_send_sframe(l2cap_pi(sk
), control
);
1354 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1356 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1357 struct sk_buff
**frag
;
1360 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1366 /* Continuation fragments (no L2CAP header) */
1367 frag
= &skb_shinfo(skb
)->frag_list
;
1369 count
= min_t(unsigned int, conn
->mtu
, len
);
1371 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1374 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1380 frag
= &(*frag
)->next
;
1386 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1388 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1389 struct sk_buff
*skb
;
1390 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1391 struct l2cap_hdr
*lh
;
1393 BT_DBG("sk %p len %d", sk
, (int)len
);
1395 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1396 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1397 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1399 return ERR_PTR(err
);
1401 /* Create L2CAP header */
1402 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1403 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1404 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1405 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1407 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1408 if (unlikely(err
< 0)) {
1410 return ERR_PTR(err
);
1415 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1417 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1418 struct sk_buff
*skb
;
1419 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1420 struct l2cap_hdr
*lh
;
1422 BT_DBG("sk %p len %d", sk
, (int)len
);
1424 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1425 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1426 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1428 return ERR_PTR(err
);
1430 /* Create L2CAP header */
1431 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1432 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1433 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1435 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1436 if (unlikely(err
< 0)) {
1438 return ERR_PTR(err
);
1443 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1445 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1446 struct sk_buff
*skb
;
1447 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1448 struct l2cap_hdr
*lh
;
1450 BT_DBG("sk %p len %d", sk
, (int)len
);
1453 return ERR_PTR(-ENOTCONN
);
1458 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1461 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1462 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1463 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1465 return ERR_PTR(err
);
1467 /* Create L2CAP header */
1468 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1469 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1470 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1471 put_unaligned_le16(control
, skb_put(skb
, 2));
1473 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1475 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1476 if (unlikely(err
< 0)) {
1478 return ERR_PTR(err
);
1481 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1482 put_unaligned_le16(0, skb_put(skb
, 2));
1484 bt_cb(skb
)->retries
= 0;
1488 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1490 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1491 struct sk_buff
*skb
;
1492 struct sk_buff_head sar_queue
;
1496 skb_queue_head_init(&sar_queue
);
1497 control
= L2CAP_SDU_START
;
1498 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1500 return PTR_ERR(skb
);
1502 __skb_queue_tail(&sar_queue
, skb
);
1503 len
-= pi
->remote_mps
;
1504 size
+= pi
->remote_mps
;
1509 if (len
> pi
->remote_mps
) {
1510 control
= L2CAP_SDU_CONTINUE
;
1511 buflen
= pi
->remote_mps
;
1513 control
= L2CAP_SDU_END
;
1517 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1519 skb_queue_purge(&sar_queue
);
1520 return PTR_ERR(skb
);
1523 __skb_queue_tail(&sar_queue
, skb
);
1527 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1528 if (sk
->sk_send_head
== NULL
)
1529 sk
->sk_send_head
= sar_queue
.next
;
1534 int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1536 struct sock
*sk
= sock
->sk
;
1537 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1538 struct sk_buff
*skb
;
1542 BT_DBG("sock %p, sk %p", sock
, sk
);
1544 err
= sock_error(sk
);
1548 if (msg
->msg_flags
& MSG_OOB
)
1553 if (sk
->sk_state
!= BT_CONNECTED
) {
1558 /* Connectionless channel */
1559 if (sk
->sk_type
== SOCK_DGRAM
) {
1560 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1564 l2cap_do_send(sk
, skb
);
1571 case L2CAP_MODE_BASIC
:
1572 /* Check outgoing MTU */
1573 if (len
> pi
->omtu
) {
1578 /* Create a basic PDU */
1579 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1585 l2cap_do_send(sk
, skb
);
1589 case L2CAP_MODE_ERTM
:
1590 case L2CAP_MODE_STREAMING
:
1591 /* Entire SDU fits into one PDU */
1592 if (len
<= pi
->remote_mps
) {
1593 control
= L2CAP_SDU_UNSEGMENTED
;
1594 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1599 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1601 if (sk
->sk_send_head
== NULL
)
1602 sk
->sk_send_head
= skb
;
1605 /* Segment SDU into multiples PDUs */
1606 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1611 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1612 l2cap_streaming_send(sk
);
1614 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
1615 (pi
->conn_state
& L2CAP_CONN_WAIT_F
)) {
1619 err
= l2cap_ertm_send(sk
);
1627 BT_DBG("bad state %1.1x", pi
->mode
);
1636 int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1638 struct sock
*sk
= sock
->sk
;
1642 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1643 struct l2cap_conn_rsp rsp
;
1644 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1647 sk
->sk_state
= BT_CONFIG
;
1649 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1650 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1651 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1652 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1653 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1654 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1656 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) {
1661 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1662 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1663 l2cap_build_conf_req(sk
, buf
), buf
);
1664 l2cap_pi(sk
)->num_conf_req
++;
1672 if (sock
->type
== SOCK_STREAM
)
1673 return bt_sock_stream_recvmsg(iocb
, sock
, msg
, len
, flags
);
1675 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1678 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1680 struct sock
*sk
= sock
->sk
;
1681 struct l2cap_options opts
;
1685 BT_DBG("sk %p", sk
);
1691 if (sk
->sk_state
== BT_CONNECTED
) {
1696 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1697 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1698 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1699 opts
.mode
= l2cap_pi(sk
)->mode
;
1700 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1701 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1702 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1704 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1705 if (copy_from_user((char *) &opts
, optval
, len
)) {
1710 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1715 l2cap_pi(sk
)->mode
= opts
.mode
;
1716 switch (l2cap_pi(sk
)->mode
) {
1717 case L2CAP_MODE_BASIC
:
1718 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1720 case L2CAP_MODE_ERTM
:
1721 case L2CAP_MODE_STREAMING
:
1730 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1731 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1732 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1733 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1734 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1738 if (get_user(opt
, (u32 __user
*) optval
)) {
1743 if (opt
& L2CAP_LM_AUTH
)
1744 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1745 if (opt
& L2CAP_LM_ENCRYPT
)
1746 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1747 if (opt
& L2CAP_LM_SECURE
)
1748 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1750 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1751 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1763 int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1765 struct sock
*sk
= sock
->sk
;
1766 struct bt_security sec
;
1770 BT_DBG("sk %p", sk
);
1772 if (level
== SOL_L2CAP
)
1773 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1775 if (level
!= SOL_BLUETOOTH
)
1776 return -ENOPROTOOPT
;
1782 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
1783 && sk
->sk_type
!= SOCK_RAW
) {
1788 sec
.level
= BT_SECURITY_LOW
;
1790 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1791 if (copy_from_user((char *) &sec
, optval
, len
)) {
1796 if (sec
.level
< BT_SECURITY_LOW
||
1797 sec
.level
> BT_SECURITY_HIGH
) {
1802 l2cap_pi(sk
)->sec_level
= sec
.level
;
1805 case BT_DEFER_SETUP
:
1806 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1811 if (get_user(opt
, (u32 __user
*) optval
)) {
1816 bt_sk(sk
)->defer_setup
= opt
;
1820 if (get_user(opt
, (u32 __user
*) optval
)) {
1825 if (opt
> BT_FLUSHABLE_ON
) {
1830 if (opt
== BT_FLUSHABLE_OFF
) {
1831 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1832 /* proceed futher only when we have l2cap_conn and
1833 No Flush support in the LM */
1834 if (!conn
|| !lmp_no_flush_capable(conn
->hcon
->hdev
)) {
1840 l2cap_pi(sk
)->flushable
= opt
;
1852 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
1854 struct sock
*sk
= sock
->sk
;
1855 struct l2cap_options opts
;
1856 struct l2cap_conninfo cinfo
;
1860 BT_DBG("sk %p", sk
);
1862 if (get_user(len
, optlen
))
1869 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1870 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1871 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1872 opts
.mode
= l2cap_pi(sk
)->mode
;
1873 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1874 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1875 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1877 len
= min_t(unsigned int, len
, sizeof(opts
));
1878 if (copy_to_user(optval
, (char *) &opts
, len
))
1884 switch (l2cap_pi(sk
)->sec_level
) {
1885 case BT_SECURITY_LOW
:
1886 opt
= L2CAP_LM_AUTH
;
1888 case BT_SECURITY_MEDIUM
:
1889 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
1891 case BT_SECURITY_HIGH
:
1892 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
1900 if (l2cap_pi(sk
)->role_switch
)
1901 opt
|= L2CAP_LM_MASTER
;
1903 if (l2cap_pi(sk
)->force_reliable
)
1904 opt
|= L2CAP_LM_RELIABLE
;
1906 if (put_user(opt
, (u32 __user
*) optval
))
1910 case L2CAP_CONNINFO
:
1911 if (sk
->sk_state
!= BT_CONNECTED
&&
1912 !(sk
->sk_state
== BT_CONNECT2
&&
1913 bt_sk(sk
)->defer_setup
)) {
1918 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
1919 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
1921 len
= min_t(unsigned int, len
, sizeof(cinfo
));
1922 if (copy_to_user(optval
, (char *) &cinfo
, len
))
1936 int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
1938 struct sock
*sk
= sock
->sk
;
1939 struct bt_security sec
;
1942 BT_DBG("sk %p", sk
);
1944 if (level
== SOL_L2CAP
)
1945 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
1947 if (level
!= SOL_BLUETOOTH
)
1948 return -ENOPROTOOPT
;
1950 if (get_user(len
, optlen
))
1957 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
1958 && sk
->sk_type
!= SOCK_RAW
) {
1963 sec
.level
= l2cap_pi(sk
)->sec_level
;
1965 len
= min_t(unsigned int, len
, sizeof(sec
));
1966 if (copy_to_user(optval
, (char *) &sec
, len
))
1971 case BT_DEFER_SETUP
:
1972 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
1977 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
1983 if (put_user(l2cap_pi(sk
)->flushable
, (u32 __user
*) optval
))
1997 int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1999 struct sock
*sk
= sock
->sk
;
2002 BT_DBG("sock %p, sk %p", sock
, sk
);
2008 if (!sk
->sk_shutdown
) {
2009 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2010 err
= __l2cap_wait_ack(sk
);
2012 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2013 l2cap_sock_clear_timer(sk
);
2014 __l2cap_sock_close(sk
, 0);
2016 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2017 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2021 if (!err
&& sk
->sk_err
)
2028 static void l2cap_chan_ready(struct sock
*sk
)
2030 struct sock
*parent
= bt_sk(sk
)->parent
;
2032 BT_DBG("sk %p, parent %p", sk
, parent
);
2034 l2cap_pi(sk
)->conf_state
= 0;
2035 l2cap_sock_clear_timer(sk
);
2038 /* Outgoing channel.
2039 * Wake up socket sleeping on connect.
2041 sk
->sk_state
= BT_CONNECTED
;
2042 sk
->sk_state_change(sk
);
2044 /* Incoming channel.
2045 * Wake up socket sleeping on accept.
2047 parent
->sk_data_ready(parent
, 0);
2051 /* Copy frame to all raw sockets on that connection */
2052 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2054 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2055 struct sk_buff
*nskb
;
2058 BT_DBG("conn %p", conn
);
2060 read_lock(&l
->lock
);
2061 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2062 if (sk
->sk_type
!= SOCK_RAW
)
2065 /* Don't send frame to the socket it came from */
2068 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2072 if (sock_queue_rcv_skb(sk
, nskb
))
2075 read_unlock(&l
->lock
);
2078 /* ---- L2CAP signalling commands ---- */
2079 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2080 u8 code
, u8 ident
, u16 dlen
, void *data
)
2082 struct sk_buff
*skb
, **frag
;
2083 struct l2cap_cmd_hdr
*cmd
;
2084 struct l2cap_hdr
*lh
;
2087 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2088 conn
, code
, ident
, dlen
);
2090 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2091 count
= min_t(unsigned int, conn
->mtu
, len
);
2093 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2097 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2098 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2099 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2101 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2104 cmd
->len
= cpu_to_le16(dlen
);
2107 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2108 memcpy(skb_put(skb
, count
), data
, count
);
2114 /* Continuation fragments (no L2CAP header) */
2115 frag
= &skb_shinfo(skb
)->frag_list
;
2117 count
= min_t(unsigned int, conn
->mtu
, len
);
2119 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2123 memcpy(skb_put(*frag
, count
), data
, count
);
2128 frag
= &(*frag
)->next
;
2138 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2140 struct l2cap_conf_opt
*opt
= *ptr
;
2143 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2151 *val
= *((u8
*) opt
->val
);
2155 *val
= get_unaligned_le16(opt
->val
);
2159 *val
= get_unaligned_le32(opt
->val
);
2163 *val
= (unsigned long) opt
->val
;
2167 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2171 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2173 struct l2cap_conf_opt
*opt
= *ptr
;
2175 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2182 *((u8
*) opt
->val
) = val
;
2186 put_unaligned_le16(val
, opt
->val
);
2190 put_unaligned_le32(val
, opt
->val
);
2194 memcpy(opt
->val
, (void *) val
, len
);
2198 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2201 static void l2cap_ack_timeout(unsigned long arg
)
2203 struct sock
*sk
= (void *) arg
;
2206 l2cap_send_ack(l2cap_pi(sk
));
2210 static inline void l2cap_ertm_init(struct sock
*sk
)
2212 l2cap_pi(sk
)->expected_ack_seq
= 0;
2213 l2cap_pi(sk
)->unacked_frames
= 0;
2214 l2cap_pi(sk
)->buffer_seq
= 0;
2215 l2cap_pi(sk
)->num_acked
= 0;
2216 l2cap_pi(sk
)->frames_sent
= 0;
2218 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2219 l2cap_retrans_timeout
, (unsigned long) sk
);
2220 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2221 l2cap_monitor_timeout
, (unsigned long) sk
);
2222 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2223 l2cap_ack_timeout
, (unsigned long) sk
);
2225 __skb_queue_head_init(SREJ_QUEUE(sk
));
2226 __skb_queue_head_init(BUSY_QUEUE(sk
));
2228 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2230 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
2233 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2236 case L2CAP_MODE_STREAMING
:
2237 case L2CAP_MODE_ERTM
:
2238 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2242 return L2CAP_MODE_BASIC
;
2246 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2248 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2249 struct l2cap_conf_req
*req
= data
;
2250 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2251 void *ptr
= req
->data
;
2253 BT_DBG("sk %p", sk
);
2255 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2259 case L2CAP_MODE_STREAMING
:
2260 case L2CAP_MODE_ERTM
:
2261 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2266 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2271 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2272 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2275 case L2CAP_MODE_BASIC
:
2276 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2277 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2280 rfc
.mode
= L2CAP_MODE_BASIC
;
2282 rfc
.max_transmit
= 0;
2283 rfc
.retrans_timeout
= 0;
2284 rfc
.monitor_timeout
= 0;
2285 rfc
.max_pdu_size
= 0;
2287 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2288 (unsigned long) &rfc
);
2291 case L2CAP_MODE_ERTM
:
2292 rfc
.mode
= L2CAP_MODE_ERTM
;
2293 rfc
.txwin_size
= pi
->tx_win
;
2294 rfc
.max_transmit
= pi
->max_tx
;
2295 rfc
.retrans_timeout
= 0;
2296 rfc
.monitor_timeout
= 0;
2297 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2298 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2299 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2301 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2302 (unsigned long) &rfc
);
2304 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2307 if (pi
->fcs
== L2CAP_FCS_NONE
||
2308 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2309 pi
->fcs
= L2CAP_FCS_NONE
;
2310 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2314 case L2CAP_MODE_STREAMING
:
2315 rfc
.mode
= L2CAP_MODE_STREAMING
;
2317 rfc
.max_transmit
= 0;
2318 rfc
.retrans_timeout
= 0;
2319 rfc
.monitor_timeout
= 0;
2320 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2321 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2322 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2324 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2325 (unsigned long) &rfc
);
2327 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2330 if (pi
->fcs
== L2CAP_FCS_NONE
||
2331 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2332 pi
->fcs
= L2CAP_FCS_NONE
;
2333 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2338 /* FIXME: Need actual value of the flush timeout */
2339 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2340 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2342 req
->dcid
= cpu_to_le16(pi
->dcid
);
2343 req
->flags
= cpu_to_le16(0);
2348 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2350 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2351 struct l2cap_conf_rsp
*rsp
= data
;
2352 void *ptr
= rsp
->data
;
2353 void *req
= pi
->conf_req
;
2354 int len
= pi
->conf_len
;
2355 int type
, hint
, olen
;
2357 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2358 u16 mtu
= L2CAP_DEFAULT_MTU
;
2359 u16 result
= L2CAP_CONF_SUCCESS
;
2361 BT_DBG("sk %p", sk
);
2363 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2364 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2366 hint
= type
& L2CAP_CONF_HINT
;
2367 type
&= L2CAP_CONF_MASK
;
2370 case L2CAP_CONF_MTU
:
2374 case L2CAP_CONF_FLUSH_TO
:
2378 case L2CAP_CONF_QOS
:
2381 case L2CAP_CONF_RFC
:
2382 if (olen
== sizeof(rfc
))
2383 memcpy(&rfc
, (void *) val
, olen
);
2386 case L2CAP_CONF_FCS
:
2387 if (val
== L2CAP_FCS_NONE
)
2388 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2396 result
= L2CAP_CONF_UNKNOWN
;
2397 *((u8
*) ptr
++) = type
;
2402 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
2406 case L2CAP_MODE_STREAMING
:
2407 case L2CAP_MODE_ERTM
:
2408 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2409 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2410 pi
->conn
->feat_mask
);
2414 if (pi
->mode
!= rfc
.mode
)
2415 return -ECONNREFUSED
;
2421 if (pi
->mode
!= rfc
.mode
) {
2422 result
= L2CAP_CONF_UNACCEPT
;
2423 rfc
.mode
= pi
->mode
;
2425 if (pi
->num_conf_rsp
== 1)
2426 return -ECONNREFUSED
;
2428 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2429 sizeof(rfc
), (unsigned long) &rfc
);
2433 if (result
== L2CAP_CONF_SUCCESS
) {
2434 /* Configure output options and let the other side know
2435 * which ones we don't like. */
2437 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2438 result
= L2CAP_CONF_UNACCEPT
;
2441 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2443 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2446 case L2CAP_MODE_BASIC
:
2447 pi
->fcs
= L2CAP_FCS_NONE
;
2448 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2451 case L2CAP_MODE_ERTM
:
2452 pi
->remote_tx_win
= rfc
.txwin_size
;
2453 pi
->remote_max_tx
= rfc
.max_transmit
;
2455 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2456 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2458 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2460 rfc
.retrans_timeout
=
2461 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2462 rfc
.monitor_timeout
=
2463 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2465 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2467 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2468 sizeof(rfc
), (unsigned long) &rfc
);
2472 case L2CAP_MODE_STREAMING
:
2473 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
2474 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2476 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2478 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2480 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2481 sizeof(rfc
), (unsigned long) &rfc
);
2486 result
= L2CAP_CONF_UNACCEPT
;
2488 memset(&rfc
, 0, sizeof(rfc
));
2489 rfc
.mode
= pi
->mode
;
2492 if (result
== L2CAP_CONF_SUCCESS
)
2493 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2495 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2496 rsp
->result
= cpu_to_le16(result
);
2497 rsp
->flags
= cpu_to_le16(0x0000);
2502 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2504 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2505 struct l2cap_conf_req
*req
= data
;
2506 void *ptr
= req
->data
;
2509 struct l2cap_conf_rfc rfc
;
2511 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2513 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2514 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2517 case L2CAP_CONF_MTU
:
2518 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2519 *result
= L2CAP_CONF_UNACCEPT
;
2520 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2523 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2526 case L2CAP_CONF_FLUSH_TO
:
2528 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2532 case L2CAP_CONF_RFC
:
2533 if (olen
== sizeof(rfc
))
2534 memcpy(&rfc
, (void *)val
, olen
);
2536 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2537 rfc
.mode
!= pi
->mode
)
2538 return -ECONNREFUSED
;
2542 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2543 sizeof(rfc
), (unsigned long) &rfc
);
2548 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2549 return -ECONNREFUSED
;
2551 pi
->mode
= rfc
.mode
;
2553 if (*result
== L2CAP_CONF_SUCCESS
) {
2555 case L2CAP_MODE_ERTM
:
2556 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2557 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2558 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2560 case L2CAP_MODE_STREAMING
:
2561 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2565 req
->dcid
= cpu_to_le16(pi
->dcid
);
2566 req
->flags
= cpu_to_le16(0x0000);
2571 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2573 struct l2cap_conf_rsp
*rsp
= data
;
2574 void *ptr
= rsp
->data
;
2576 BT_DBG("sk %p", sk
);
2578 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2579 rsp
->result
= cpu_to_le16(result
);
2580 rsp
->flags
= cpu_to_le16(flags
);
2585 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2587 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2590 struct l2cap_conf_rfc rfc
;
2592 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2594 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2597 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2598 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2601 case L2CAP_CONF_RFC
:
2602 if (olen
== sizeof(rfc
))
2603 memcpy(&rfc
, (void *)val
, olen
);
2610 case L2CAP_MODE_ERTM
:
2611 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2612 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2613 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2615 case L2CAP_MODE_STREAMING
:
2616 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2620 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2622 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2624 if (rej
->reason
!= 0x0000)
2627 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2628 cmd
->ident
== conn
->info_ident
) {
2629 del_timer(&conn
->info_timer
);
2631 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2632 conn
->info_ident
= 0;
2634 l2cap_conn_start(conn
);
2640 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2642 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2643 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2644 struct l2cap_conn_rsp rsp
;
2645 struct sock
*parent
, *sk
= NULL
;
2646 int result
, status
= L2CAP_CS_NO_INFO
;
2648 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2649 __le16 psm
= req
->psm
;
2651 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2653 /* Check if we have socket listening on psm */
2654 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2656 result
= L2CAP_CR_BAD_PSM
;
2660 bh_lock_sock(parent
);
2662 /* Check if the ACL is secure enough (if not SDP) */
2663 if (psm
!= cpu_to_le16(0x0001) &&
2664 !hci_conn_check_link_mode(conn
->hcon
)) {
2665 conn
->disc_reason
= 0x05;
2666 result
= L2CAP_CR_SEC_BLOCK
;
2670 result
= L2CAP_CR_NO_MEM
;
2672 /* Check for backlog size */
2673 if (sk_acceptq_is_full(parent
)) {
2674 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2678 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2682 write_lock_bh(&list
->lock
);
2684 /* Check if we already have channel with that dcid */
2685 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2686 write_unlock_bh(&list
->lock
);
2687 sock_set_flag(sk
, SOCK_ZAPPED
);
2688 l2cap_sock_kill(sk
);
2692 hci_conn_hold(conn
->hcon
);
2694 l2cap_sock_init(sk
, parent
);
2695 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2696 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2697 l2cap_pi(sk
)->psm
= psm
;
2698 l2cap_pi(sk
)->dcid
= scid
;
2700 __l2cap_chan_add(conn
, sk
, parent
);
2701 dcid
= l2cap_pi(sk
)->scid
;
2703 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2705 l2cap_pi(sk
)->ident
= cmd
->ident
;
2707 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2708 if (l2cap_check_security(sk
)) {
2709 if (bt_sk(sk
)->defer_setup
) {
2710 sk
->sk_state
= BT_CONNECT2
;
2711 result
= L2CAP_CR_PEND
;
2712 status
= L2CAP_CS_AUTHOR_PEND
;
2713 parent
->sk_data_ready(parent
, 0);
2715 sk
->sk_state
= BT_CONFIG
;
2716 result
= L2CAP_CR_SUCCESS
;
2717 status
= L2CAP_CS_NO_INFO
;
2720 sk
->sk_state
= BT_CONNECT2
;
2721 result
= L2CAP_CR_PEND
;
2722 status
= L2CAP_CS_AUTHEN_PEND
;
2725 sk
->sk_state
= BT_CONNECT2
;
2726 result
= L2CAP_CR_PEND
;
2727 status
= L2CAP_CS_NO_INFO
;
2730 write_unlock_bh(&list
->lock
);
2733 bh_unlock_sock(parent
);
2736 rsp
.scid
= cpu_to_le16(scid
);
2737 rsp
.dcid
= cpu_to_le16(dcid
);
2738 rsp
.result
= cpu_to_le16(result
);
2739 rsp
.status
= cpu_to_le16(status
);
2740 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2742 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2743 struct l2cap_info_req info
;
2744 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2746 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2747 conn
->info_ident
= l2cap_get_ident(conn
);
2749 mod_timer(&conn
->info_timer
, jiffies
+
2750 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2752 l2cap_send_cmd(conn
, conn
->info_ident
,
2753 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2756 if (sk
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2757 result
== L2CAP_CR_SUCCESS
) {
2759 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2760 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2761 l2cap_build_conf_req(sk
, buf
), buf
);
2762 l2cap_pi(sk
)->num_conf_req
++;
2768 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2770 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2771 u16 scid
, dcid
, result
, status
;
2775 scid
= __le16_to_cpu(rsp
->scid
);
2776 dcid
= __le16_to_cpu(rsp
->dcid
);
2777 result
= __le16_to_cpu(rsp
->result
);
2778 status
= __le16_to_cpu(rsp
->status
);
2780 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2783 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2787 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2793 case L2CAP_CR_SUCCESS
:
2794 sk
->sk_state
= BT_CONFIG
;
2795 l2cap_pi(sk
)->ident
= 0;
2796 l2cap_pi(sk
)->dcid
= dcid
;
2797 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2799 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
2802 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2804 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2805 l2cap_build_conf_req(sk
, req
), req
);
2806 l2cap_pi(sk
)->num_conf_req
++;
2810 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2814 /* don't delete l2cap channel if sk is owned by user */
2815 if (sock_owned_by_user(sk
)) {
2816 sk
->sk_state
= BT_DISCONN
;
2817 l2cap_sock_clear_timer(sk
);
2818 l2cap_sock_set_timer(sk
, HZ
/ 5);
2822 l2cap_chan_del(sk
, ECONNREFUSED
);
2830 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
2832 /* FCS is enabled only in ERTM or streaming mode, if one or both
2835 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
2836 pi
->fcs
= L2CAP_FCS_NONE
;
2837 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2838 pi
->fcs
= L2CAP_FCS_CRC16
;
2841 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2843 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2849 dcid
= __le16_to_cpu(req
->dcid
);
2850 flags
= __le16_to_cpu(req
->flags
);
2852 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2854 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2858 if (sk
->sk_state
!= BT_CONFIG
) {
2859 struct l2cap_cmd_rej rej
;
2861 rej
.reason
= cpu_to_le16(0x0002);
2862 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2867 /* Reject if config buffer is too small. */
2868 len
= cmd_len
- sizeof(*req
);
2869 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2870 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2871 l2cap_build_conf_rsp(sk
, rsp
,
2872 L2CAP_CONF_REJECT
, flags
), rsp
);
2877 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2878 l2cap_pi(sk
)->conf_len
+= len
;
2880 if (flags
& 0x0001) {
2881 /* Incomplete config. Send empty response. */
2882 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2883 l2cap_build_conf_rsp(sk
, rsp
,
2884 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2888 /* Complete config. */
2889 len
= l2cap_parse_conf_req(sk
, rsp
);
2891 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2895 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2896 l2cap_pi(sk
)->num_conf_rsp
++;
2898 /* Reset config buffer. */
2899 l2cap_pi(sk
)->conf_len
= 0;
2901 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2904 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2905 set_default_fcs(l2cap_pi(sk
));
2907 sk
->sk_state
= BT_CONNECTED
;
2909 l2cap_pi(sk
)->next_tx_seq
= 0;
2910 l2cap_pi(sk
)->expected_tx_seq
= 0;
2911 __skb_queue_head_init(TX_QUEUE(sk
));
2912 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2913 l2cap_ertm_init(sk
);
2915 l2cap_chan_ready(sk
);
2919 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2921 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2922 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2923 l2cap_build_conf_req(sk
, buf
), buf
);
2924 l2cap_pi(sk
)->num_conf_req
++;
2932 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2934 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2935 u16 scid
, flags
, result
;
2937 int len
= cmd
->len
- sizeof(*rsp
);
2939 scid
= __le16_to_cpu(rsp
->scid
);
2940 flags
= __le16_to_cpu(rsp
->flags
);
2941 result
= __le16_to_cpu(rsp
->result
);
2943 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2944 scid
, flags
, result
);
2946 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2951 case L2CAP_CONF_SUCCESS
:
2952 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
2955 case L2CAP_CONF_UNACCEPT
:
2956 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2959 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2960 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2964 /* throw out any old stored conf requests */
2965 result
= L2CAP_CONF_SUCCESS
;
2966 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2969 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2973 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2974 L2CAP_CONF_REQ
, len
, req
);
2975 l2cap_pi(sk
)->num_conf_req
++;
2976 if (result
!= L2CAP_CONF_SUCCESS
)
2982 sk
->sk_err
= ECONNRESET
;
2983 l2cap_sock_set_timer(sk
, HZ
* 5);
2984 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2991 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2993 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2994 set_default_fcs(l2cap_pi(sk
));
2996 sk
->sk_state
= BT_CONNECTED
;
2997 l2cap_pi(sk
)->next_tx_seq
= 0;
2998 l2cap_pi(sk
)->expected_tx_seq
= 0;
2999 __skb_queue_head_init(TX_QUEUE(sk
));
3000 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3001 l2cap_ertm_init(sk
);
3003 l2cap_chan_ready(sk
);
3011 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3013 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3014 struct l2cap_disconn_rsp rsp
;
3018 scid
= __le16_to_cpu(req
->scid
);
3019 dcid
= __le16_to_cpu(req
->dcid
);
3021 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3023 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3027 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3028 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3029 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3031 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3033 /* don't delete l2cap channel if sk is owned by user */
3034 if (sock_owned_by_user(sk
)) {
3035 sk
->sk_state
= BT_DISCONN
;
3036 l2cap_sock_clear_timer(sk
);
3037 l2cap_sock_set_timer(sk
, HZ
/ 5);
3042 l2cap_chan_del(sk
, ECONNRESET
);
3045 l2cap_sock_kill(sk
);
3049 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3051 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3055 scid
= __le16_to_cpu(rsp
->scid
);
3056 dcid
= __le16_to_cpu(rsp
->dcid
);
3058 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3060 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3064 /* don't delete l2cap channel if sk is owned by user */
3065 if (sock_owned_by_user(sk
)) {
3066 sk
->sk_state
= BT_DISCONN
;
3067 l2cap_sock_clear_timer(sk
);
3068 l2cap_sock_set_timer(sk
, HZ
/ 5);
3073 l2cap_chan_del(sk
, 0);
3076 l2cap_sock_kill(sk
);
3080 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3082 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3085 type
= __le16_to_cpu(req
->type
);
3087 BT_DBG("type 0x%4.4x", type
);
3089 if (type
== L2CAP_IT_FEAT_MASK
) {
3091 u32 feat_mask
= l2cap_feat_mask
;
3092 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3093 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3094 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3096 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3098 put_unaligned_le32(feat_mask
, rsp
->data
);
3099 l2cap_send_cmd(conn
, cmd
->ident
,
3100 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3101 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3103 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3104 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3105 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3106 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3107 l2cap_send_cmd(conn
, cmd
->ident
,
3108 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3110 struct l2cap_info_rsp rsp
;
3111 rsp
.type
= cpu_to_le16(type
);
3112 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3113 l2cap_send_cmd(conn
, cmd
->ident
,
3114 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3120 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3122 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3125 type
= __le16_to_cpu(rsp
->type
);
3126 result
= __le16_to_cpu(rsp
->result
);
3128 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3130 del_timer(&conn
->info_timer
);
3132 if (result
!= L2CAP_IR_SUCCESS
) {
3133 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3134 conn
->info_ident
= 0;
3136 l2cap_conn_start(conn
);
3141 if (type
== L2CAP_IT_FEAT_MASK
) {
3142 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3144 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3145 struct l2cap_info_req req
;
3146 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3148 conn
->info_ident
= l2cap_get_ident(conn
);
3150 l2cap_send_cmd(conn
, conn
->info_ident
,
3151 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3153 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3154 conn
->info_ident
= 0;
3156 l2cap_conn_start(conn
);
3158 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3159 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3160 conn
->info_ident
= 0;
3162 l2cap_conn_start(conn
);
3168 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3170 u8
*data
= skb
->data
;
3172 struct l2cap_cmd_hdr cmd
;
3175 l2cap_raw_recv(conn
, skb
);
3177 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3179 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3180 data
+= L2CAP_CMD_HDR_SIZE
;
3181 len
-= L2CAP_CMD_HDR_SIZE
;
3183 cmd_len
= le16_to_cpu(cmd
.len
);
3185 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3187 if (cmd_len
> len
|| !cmd
.ident
) {
3188 BT_DBG("corrupted command");
3193 case L2CAP_COMMAND_REJ
:
3194 l2cap_command_rej(conn
, &cmd
, data
);
3197 case L2CAP_CONN_REQ
:
3198 err
= l2cap_connect_req(conn
, &cmd
, data
);
3201 case L2CAP_CONN_RSP
:
3202 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3205 case L2CAP_CONF_REQ
:
3206 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3209 case L2CAP_CONF_RSP
:
3210 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3213 case L2CAP_DISCONN_REQ
:
3214 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3217 case L2CAP_DISCONN_RSP
:
3218 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3221 case L2CAP_ECHO_REQ
:
3222 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3225 case L2CAP_ECHO_RSP
:
3228 case L2CAP_INFO_REQ
:
3229 err
= l2cap_information_req(conn
, &cmd
, data
);
3232 case L2CAP_INFO_RSP
:
3233 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3237 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3243 struct l2cap_cmd_rej rej
;
3244 BT_DBG("error %d", err
);
3246 /* FIXME: Map err to a valid reason */
3247 rej
.reason
= cpu_to_le16(0);
3248 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3258 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3260 u16 our_fcs
, rcv_fcs
;
3261 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3263 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3264 skb_trim(skb
, skb
->len
- 2);
3265 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3266 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3268 if (our_fcs
!= rcv_fcs
)
3274 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3276 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3279 pi
->frames_sent
= 0;
3281 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3283 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3284 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3285 l2cap_send_sframe(pi
, control
);
3286 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3289 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3290 l2cap_retransmit_frames(sk
);
3292 l2cap_ertm_send(sk
);
3294 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3295 pi
->frames_sent
== 0) {
3296 control
|= L2CAP_SUPER_RCV_READY
;
3297 l2cap_send_sframe(pi
, control
);
3301 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3303 struct sk_buff
*next_skb
;
3304 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3305 int tx_seq_offset
, next_tx_seq_offset
;
3307 bt_cb(skb
)->tx_seq
= tx_seq
;
3308 bt_cb(skb
)->sar
= sar
;
3310 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3312 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3316 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3317 if (tx_seq_offset
< 0)
3318 tx_seq_offset
+= 64;
3321 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3324 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3325 pi
->buffer_seq
) % 64;
3326 if (next_tx_seq_offset
< 0)
3327 next_tx_seq_offset
+= 64;
3329 if (next_tx_seq_offset
> tx_seq_offset
) {
3330 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3334 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3337 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3339 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3344 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3346 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3347 struct sk_buff
*_skb
;
3350 switch (control
& L2CAP_CTRL_SAR
) {
3351 case L2CAP_SDU_UNSEGMENTED
:
3352 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3355 err
= sock_queue_rcv_skb(sk
, skb
);
3361 case L2CAP_SDU_START
:
3362 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3365 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3367 if (pi
->sdu_len
> pi
->imtu
)
3370 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3374 /* pull sdu_len bytes only after alloc, because of Local Busy
3375 * condition we have to be sure that this will be executed
3376 * only once, i.e., when alloc does not fail */
3379 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3381 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3382 pi
->partial_sdu_len
= skb
->len
;
3385 case L2CAP_SDU_CONTINUE
:
3386 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3392 pi
->partial_sdu_len
+= skb
->len
;
3393 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3396 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3401 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3407 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3408 pi
->partial_sdu_len
+= skb
->len
;
3410 if (pi
->partial_sdu_len
> pi
->imtu
)
3413 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3416 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3419 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3421 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3425 err
= sock_queue_rcv_skb(sk
, _skb
);
3428 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3432 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3433 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3447 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3452 static int l2cap_try_push_rx_skb(struct sock
*sk
)
3454 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3455 struct sk_buff
*skb
;
3459 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3460 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3461 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3463 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3467 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3470 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3473 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3474 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3475 l2cap_send_sframe(pi
, control
);
3476 l2cap_pi(sk
)->retry_count
= 1;
3478 del_timer(&pi
->retrans_timer
);
3479 __mod_monitor_timer();
3481 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3484 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3485 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3487 BT_DBG("sk %p, Exit local busy", sk
);
3492 static void l2cap_busy_work(struct work_struct
*work
)
3494 DECLARE_WAITQUEUE(wait
, current
);
3495 struct l2cap_pinfo
*pi
=
3496 container_of(work
, struct l2cap_pinfo
, busy_work
);
3497 struct sock
*sk
= (struct sock
*)pi
;
3498 int n_tries
= 0, timeo
= HZ
/5, err
;
3499 struct sk_buff
*skb
;
3503 add_wait_queue(sk_sleep(sk
), &wait
);
3504 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3505 set_current_state(TASK_INTERRUPTIBLE
);
3507 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3509 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3516 if (signal_pending(current
)) {
3517 err
= sock_intr_errno(timeo
);
3522 timeo
= schedule_timeout(timeo
);
3525 err
= sock_error(sk
);
3529 if (l2cap_try_push_rx_skb(sk
) == 0)
3533 set_current_state(TASK_RUNNING
);
3534 remove_wait_queue(sk_sleep(sk
), &wait
);
3539 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3541 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3544 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3545 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3546 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3547 return l2cap_try_push_rx_skb(sk
);
3552 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3554 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3558 /* Busy Condition */
3559 BT_DBG("sk %p, Enter local busy", sk
);
3561 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3562 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3563 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3565 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3566 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3567 l2cap_send_sframe(pi
, sctrl
);
3569 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3571 del_timer(&pi
->ack_timer
);
3573 queue_work(_busy_wq
, &pi
->busy_work
);
3578 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3580 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3581 struct sk_buff
*_skb
;
3585 * TODO: We have to notify the userland if some data is lost with the
3589 switch (control
& L2CAP_CTRL_SAR
) {
3590 case L2CAP_SDU_UNSEGMENTED
:
3591 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3596 err
= sock_queue_rcv_skb(sk
, skb
);
3602 case L2CAP_SDU_START
:
3603 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3608 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3611 if (pi
->sdu_len
> pi
->imtu
) {
3616 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3622 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3624 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3625 pi
->partial_sdu_len
= skb
->len
;
3629 case L2CAP_SDU_CONTINUE
:
3630 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3633 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3635 pi
->partial_sdu_len
+= skb
->len
;
3636 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3644 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3647 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3649 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3650 pi
->partial_sdu_len
+= skb
->len
;
3652 if (pi
->partial_sdu_len
> pi
->imtu
)
3655 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3656 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3657 err
= sock_queue_rcv_skb(sk
, _skb
);
3672 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3674 struct sk_buff
*skb
;
3677 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3678 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3681 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3682 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3683 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3684 l2cap_pi(sk
)->buffer_seq_srej
=
3685 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3686 tx_seq
= (tx_seq
+ 1) % 64;
3690 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3692 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3693 struct srej_list
*l
, *tmp
;
3696 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3697 if (l
->tx_seq
== tx_seq
) {
3702 control
= L2CAP_SUPER_SELECT_REJECT
;
3703 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3704 l2cap_send_sframe(pi
, control
);
3706 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3710 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3712 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3713 struct srej_list
*new;
3716 while (tx_seq
!= pi
->expected_tx_seq
) {
3717 control
= L2CAP_SUPER_SELECT_REJECT
;
3718 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3719 l2cap_send_sframe(pi
, control
);
3721 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3722 new->tx_seq
= pi
->expected_tx_seq
;
3723 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3724 list_add_tail(&new->list
, SREJ_LIST(sk
));
3726 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3729 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3731 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3732 u8 tx_seq
= __get_txseq(rx_control
);
3733 u8 req_seq
= __get_reqseq(rx_control
);
3734 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3735 int tx_seq_offset
, expected_tx_seq_offset
;
3736 int num_to_ack
= (pi
->tx_win
/6) + 1;
3739 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3742 if (L2CAP_CTRL_FINAL
& rx_control
&&
3743 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3744 del_timer(&pi
->monitor_timer
);
3745 if (pi
->unacked_frames
> 0)
3746 __mod_retrans_timer();
3747 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3750 pi
->expected_ack_seq
= req_seq
;
3751 l2cap_drop_acked_frames(sk
);
3753 if (tx_seq
== pi
->expected_tx_seq
)
3756 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3757 if (tx_seq_offset
< 0)
3758 tx_seq_offset
+= 64;
3760 /* invalid tx_seq */
3761 if (tx_seq_offset
>= pi
->tx_win
) {
3762 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3766 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3769 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3770 struct srej_list
*first
;
3772 first
= list_first_entry(SREJ_LIST(sk
),
3773 struct srej_list
, list
);
3774 if (tx_seq
== first
->tx_seq
) {
3775 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3776 l2cap_check_srej_gap(sk
, tx_seq
);
3778 list_del(&first
->list
);
3781 if (list_empty(SREJ_LIST(sk
))) {
3782 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3783 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3785 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3788 struct srej_list
*l
;
3790 /* duplicated tx_seq */
3791 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3794 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3795 if (l
->tx_seq
== tx_seq
) {
3796 l2cap_resend_srejframe(sk
, tx_seq
);
3800 l2cap_send_srejframe(sk
, tx_seq
);
3803 expected_tx_seq_offset
=
3804 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3805 if (expected_tx_seq_offset
< 0)
3806 expected_tx_seq_offset
+= 64;
3808 /* duplicated tx_seq */
3809 if (tx_seq_offset
< expected_tx_seq_offset
)
3812 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3814 BT_DBG("sk %p, Enter SREJ", sk
);
3816 INIT_LIST_HEAD(SREJ_LIST(sk
));
3817 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3819 __skb_queue_head_init(SREJ_QUEUE(sk
));
3820 __skb_queue_head_init(BUSY_QUEUE(sk
));
3821 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3823 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3825 l2cap_send_srejframe(sk
, tx_seq
);
3827 del_timer(&pi
->ack_timer
);
3832 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3834 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3835 bt_cb(skb
)->tx_seq
= tx_seq
;
3836 bt_cb(skb
)->sar
= sar
;
3837 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3841 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3845 if (rx_control
& L2CAP_CTRL_FINAL
) {
3846 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3847 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3849 l2cap_retransmit_frames(sk
);
3854 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3855 if (pi
->num_acked
== num_to_ack
- 1)
3865 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3867 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3869 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
3872 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3873 l2cap_drop_acked_frames(sk
);
3875 if (rx_control
& L2CAP_CTRL_POLL
) {
3876 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3877 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3878 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3879 (pi
->unacked_frames
> 0))
3880 __mod_retrans_timer();
3882 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3883 l2cap_send_srejtail(sk
);
3885 l2cap_send_i_or_rr_or_rnr(sk
);
3888 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3889 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3891 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3892 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3894 l2cap_retransmit_frames(sk
);
3897 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3898 (pi
->unacked_frames
> 0))
3899 __mod_retrans_timer();
3901 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3902 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3905 l2cap_ertm_send(sk
);
3909 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
3911 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3912 u8 tx_seq
= __get_reqseq(rx_control
);
3914 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3916 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3918 pi
->expected_ack_seq
= tx_seq
;
3919 l2cap_drop_acked_frames(sk
);
3921 if (rx_control
& L2CAP_CTRL_FINAL
) {
3922 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3923 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3925 l2cap_retransmit_frames(sk
);
3927 l2cap_retransmit_frames(sk
);
3929 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
3930 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3933 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
3935 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3936 u8 tx_seq
= __get_reqseq(rx_control
);
3938 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3940 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3942 if (rx_control
& L2CAP_CTRL_POLL
) {
3943 pi
->expected_ack_seq
= tx_seq
;
3944 l2cap_drop_acked_frames(sk
);
3946 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3947 l2cap_retransmit_one_frame(sk
, tx_seq
);
3949 l2cap_ertm_send(sk
);
3951 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3952 pi
->srej_save_reqseq
= tx_seq
;
3953 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3955 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3956 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3957 pi
->srej_save_reqseq
== tx_seq
)
3958 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3960 l2cap_retransmit_one_frame(sk
, tx_seq
);
3962 l2cap_retransmit_one_frame(sk
, tx_seq
);
3963 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3964 pi
->srej_save_reqseq
= tx_seq
;
3965 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3970 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
3972 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3973 u8 tx_seq
= __get_reqseq(rx_control
);
3975 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3977 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3978 pi
->expected_ack_seq
= tx_seq
;
3979 l2cap_drop_acked_frames(sk
);
3981 if (rx_control
& L2CAP_CTRL_POLL
)
3982 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3984 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3985 del_timer(&pi
->retrans_timer
);
3986 if (rx_control
& L2CAP_CTRL_POLL
)
3987 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
3991 if (rx_control
& L2CAP_CTRL_POLL
)
3992 l2cap_send_srejtail(sk
);
3994 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
3997 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3999 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4001 if (L2CAP_CTRL_FINAL
& rx_control
&&
4002 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4003 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4004 if (l2cap_pi(sk
)->unacked_frames
> 0)
4005 __mod_retrans_timer();
4006 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4009 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4010 case L2CAP_SUPER_RCV_READY
:
4011 l2cap_data_channel_rrframe(sk
, rx_control
);
4014 case L2CAP_SUPER_REJECT
:
4015 l2cap_data_channel_rejframe(sk
, rx_control
);
4018 case L2CAP_SUPER_SELECT_REJECT
:
4019 l2cap_data_channel_srejframe(sk
, rx_control
);
4022 case L2CAP_SUPER_RCV_NOT_READY
:
4023 l2cap_data_channel_rnrframe(sk
, rx_control
);
4031 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4033 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4036 int len
, next_tx_seq_offset
, req_seq_offset
;
4038 control
= get_unaligned_le16(skb
->data
);
4043 * We can just drop the corrupted I-frame here.
4044 * Receiver will miss it and start proper recovery
4045 * procedures and ask retransmission.
4047 if (l2cap_check_fcs(pi
, skb
))
4050 if (__is_sar_start(control
) && __is_iframe(control
))
4053 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4056 if (len
> pi
->mps
) {
4057 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4061 req_seq
= __get_reqseq(control
);
4062 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4063 if (req_seq_offset
< 0)
4064 req_seq_offset
+= 64;
4066 next_tx_seq_offset
=
4067 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4068 if (next_tx_seq_offset
< 0)
4069 next_tx_seq_offset
+= 64;
4071 /* check for invalid req-seq */
4072 if (req_seq_offset
> next_tx_seq_offset
) {
4073 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4077 if (__is_iframe(control
)) {
4079 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4083 l2cap_data_channel_iframe(sk
, control
, skb
);
4087 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4091 l2cap_data_channel_sframe(sk
, control
, skb
);
4101 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4104 struct l2cap_pinfo
*pi
;
4109 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4111 BT_DBG("unknown cid 0x%4.4x", cid
);
4117 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4119 if (sk
->sk_state
!= BT_CONNECTED
)
4123 case L2CAP_MODE_BASIC
:
4124 /* If socket recv buffers overflows we drop data here
4125 * which is *bad* because L2CAP has to be reliable.
4126 * But we don't have any other choice. L2CAP doesn't
4127 * provide flow control mechanism. */
4129 if (pi
->imtu
< skb
->len
)
4132 if (!sock_queue_rcv_skb(sk
, skb
))
4136 case L2CAP_MODE_ERTM
:
4137 if (!sock_owned_by_user(sk
)) {
4138 l2cap_ertm_data_rcv(sk
, skb
);
4140 if (sk_add_backlog(sk
, skb
))
4146 case L2CAP_MODE_STREAMING
:
4147 control
= get_unaligned_le16(skb
->data
);
4151 if (l2cap_check_fcs(pi
, skb
))
4154 if (__is_sar_start(control
))
4157 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4160 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4163 tx_seq
= __get_txseq(control
);
4165 if (pi
->expected_tx_seq
== tx_seq
)
4166 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4168 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4170 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4175 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4189 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4193 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4199 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4201 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4204 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4207 if (!sock_queue_rcv_skb(sk
, skb
))
4219 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4221 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4225 skb_pull(skb
, L2CAP_HDR_SIZE
);
4226 cid
= __le16_to_cpu(lh
->cid
);
4227 len
= __le16_to_cpu(lh
->len
);
4229 if (len
!= skb
->len
) {
4234 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4237 case L2CAP_CID_SIGNALING
:
4238 l2cap_sig_channel(conn
, skb
);
4241 case L2CAP_CID_CONN_LESS
:
4242 psm
= get_unaligned_le16(skb
->data
);
4244 l2cap_conless_channel(conn
, psm
, skb
);
4248 l2cap_data_channel(conn
, cid
, skb
);
4253 /* ---- L2CAP interface with lower layer (HCI) ---- */
4255 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4257 int exact
= 0, lm1
= 0, lm2
= 0;
4258 register struct sock
*sk
;
4259 struct hlist_node
*node
;
4261 if (type
!= ACL_LINK
)
4264 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4266 /* Find listening sockets and check their link_mode */
4267 read_lock(&l2cap_sk_list
.lock
);
4268 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4269 if (sk
->sk_state
!= BT_LISTEN
)
4272 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4273 lm1
|= HCI_LM_ACCEPT
;
4274 if (l2cap_pi(sk
)->role_switch
)
4275 lm1
|= HCI_LM_MASTER
;
4277 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4278 lm2
|= HCI_LM_ACCEPT
;
4279 if (l2cap_pi(sk
)->role_switch
)
4280 lm2
|= HCI_LM_MASTER
;
4283 read_unlock(&l2cap_sk_list
.lock
);
4285 return exact
? lm1
: lm2
;
4288 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4290 struct l2cap_conn
*conn
;
4292 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4294 if (hcon
->type
!= ACL_LINK
)
4298 conn
= l2cap_conn_add(hcon
, status
);
4300 l2cap_conn_ready(conn
);
4302 l2cap_conn_del(hcon
, bt_err(status
));
4307 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4309 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4311 BT_DBG("hcon %p", hcon
);
4313 if (hcon
->type
!= ACL_LINK
|| !conn
)
4316 return conn
->disc_reason
;
4319 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4321 BT_DBG("hcon %p reason %d", hcon
, reason
);
4323 if (hcon
->type
!= ACL_LINK
)
4326 l2cap_conn_del(hcon
, bt_err(reason
));
4331 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4333 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4336 if (encrypt
== 0x00) {
4337 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4338 l2cap_sock_clear_timer(sk
);
4339 l2cap_sock_set_timer(sk
, HZ
* 5);
4340 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4341 __l2cap_sock_close(sk
, ECONNREFUSED
);
4343 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4344 l2cap_sock_clear_timer(sk
);
4348 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4350 struct l2cap_chan_list
*l
;
4351 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4357 l
= &conn
->chan_list
;
4359 BT_DBG("conn %p", conn
);
4361 read_lock(&l
->lock
);
4363 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4366 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4371 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4372 sk
->sk_state
== BT_CONFIG
)) {
4373 l2cap_check_encryption(sk
, encrypt
);
4378 if (sk
->sk_state
== BT_CONNECT
) {
4380 struct l2cap_conn_req req
;
4381 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4382 req
.psm
= l2cap_pi(sk
)->psm
;
4384 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4385 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4387 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4388 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4390 l2cap_sock_clear_timer(sk
);
4391 l2cap_sock_set_timer(sk
, HZ
/ 10);
4393 } else if (sk
->sk_state
== BT_CONNECT2
) {
4394 struct l2cap_conn_rsp rsp
;
4398 sk
->sk_state
= BT_CONFIG
;
4399 result
= L2CAP_CR_SUCCESS
;
4401 sk
->sk_state
= BT_DISCONN
;
4402 l2cap_sock_set_timer(sk
, HZ
/ 10);
4403 result
= L2CAP_CR_SEC_BLOCK
;
4406 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4407 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4408 rsp
.result
= cpu_to_le16(result
);
4409 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4410 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4411 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4417 read_unlock(&l
->lock
);
4422 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4424 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4427 conn
= l2cap_conn_add(hcon
, 0);
4432 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4434 if (!(flags
& ACL_CONT
)) {
4435 struct l2cap_hdr
*hdr
;
4441 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4442 kfree_skb(conn
->rx_skb
);
4443 conn
->rx_skb
= NULL
;
4445 l2cap_conn_unreliable(conn
, ECOMM
);
4448 /* Start fragment always begin with Basic L2CAP header */
4449 if (skb
->len
< L2CAP_HDR_SIZE
) {
4450 BT_ERR("Frame is too short (len %d)", skb
->len
);
4451 l2cap_conn_unreliable(conn
, ECOMM
);
4455 hdr
= (struct l2cap_hdr
*) skb
->data
;
4456 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4457 cid
= __le16_to_cpu(hdr
->cid
);
4459 if (len
== skb
->len
) {
4460 /* Complete frame received */
4461 l2cap_recv_frame(conn
, skb
);
4465 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4467 if (skb
->len
> len
) {
4468 BT_ERR("Frame is too long (len %d, expected len %d)",
4470 l2cap_conn_unreliable(conn
, ECOMM
);
4474 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4476 if (sk
&& l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
4477 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4478 len
, l2cap_pi(sk
)->imtu
);
4480 l2cap_conn_unreliable(conn
, ECOMM
);
4487 /* Allocate skb for the complete frame (with header) */
4488 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4492 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4494 conn
->rx_len
= len
- skb
->len
;
4496 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4498 if (!conn
->rx_len
) {
4499 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4500 l2cap_conn_unreliable(conn
, ECOMM
);
4504 if (skb
->len
> conn
->rx_len
) {
4505 BT_ERR("Fragment is too long (len %d, expected %d)",
4506 skb
->len
, conn
->rx_len
);
4507 kfree_skb(conn
->rx_skb
);
4508 conn
->rx_skb
= NULL
;
4510 l2cap_conn_unreliable(conn
, ECOMM
);
4514 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4516 conn
->rx_len
-= skb
->len
;
4518 if (!conn
->rx_len
) {
4519 /* Complete frame received */
4520 l2cap_recv_frame(conn
, conn
->rx_skb
);
4521 conn
->rx_skb
= NULL
;
4530 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4533 struct hlist_node
*node
;
4535 read_lock_bh(&l2cap_sk_list
.lock
);
4537 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4538 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4540 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4541 batostr(&bt_sk(sk
)->src
),
4542 batostr(&bt_sk(sk
)->dst
),
4543 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4545 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4548 read_unlock_bh(&l2cap_sk_list
.lock
);
4553 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4555 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4558 static const struct file_operations l2cap_debugfs_fops
= {
4559 .open
= l2cap_debugfs_open
,
4561 .llseek
= seq_lseek
,
4562 .release
= single_release
,
4565 static struct dentry
*l2cap_debugfs
;
4567 static struct hci_proto l2cap_hci_proto
= {
4569 .id
= HCI_PROTO_L2CAP
,
4570 .connect_ind
= l2cap_connect_ind
,
4571 .connect_cfm
= l2cap_connect_cfm
,
4572 .disconn_ind
= l2cap_disconn_ind
,
4573 .disconn_cfm
= l2cap_disconn_cfm
,
4574 .security_cfm
= l2cap_security_cfm
,
4575 .recv_acldata
= l2cap_recv_acldata
4578 static int __init
l2cap_init(void)
4582 err
= l2cap_init_sockets();
4586 _busy_wq
= create_singlethread_workqueue("l2cap");
4592 err
= hci_register_proto(&l2cap_hci_proto
);
4594 BT_ERR("L2CAP protocol registration failed");
4595 bt_sock_unregister(BTPROTO_L2CAP
);
4600 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4601 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4603 BT_ERR("Failed to create L2CAP debug file");
4606 BT_INFO("L2CAP ver %s", VERSION
);
4607 BT_INFO("L2CAP socket layer initialized");
4612 destroy_workqueue(_busy_wq
);
4613 l2cap_cleanup_sockets();
4617 static void __exit
l2cap_exit(void)
4619 debugfs_remove(l2cap_debugfs
);
4621 flush_workqueue(_busy_wq
);
4622 destroy_workqueue(_busy_wq
);
4624 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4625 BT_ERR("L2CAP protocol unregistration failed");
4627 l2cap_cleanup_sockets();
4630 void l2cap_load(void)
4632 /* Dummy function to trigger automatic L2CAP module loading by
4633 * other modules that use L2CAP sockets but don't use any other
4634 * symbols from it. */
4636 EXPORT_SYMBOL(l2cap_load
);
4638 module_init(l2cap_init
);
4639 module_exit(l2cap_exit
);
4641 module_param(disable_ertm
, bool, 0644);
4642 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4644 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4645 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4646 MODULE_VERSION(VERSION
);
4647 MODULE_LICENSE("GPL");
4648 MODULE_ALIAS("bt-proto-0");