2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm
= 0;
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops
;
65 static struct workqueue_struct
*_busy_wq
;
67 static struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
74 static void l2cap_sock_close(struct sock
*sk
);
75 static void l2cap_sock_kill(struct sock
*sk
);
77 static int l2cap_build_conf_req(struct sock
*sk
, void *data
);
78 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
79 u8 code
, u8 ident
, u16 dlen
, void *data
);
81 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
83 /* ---- L2CAP timers ---- */
84 static void l2cap_sock_timeout(unsigned long arg
)
86 struct sock
*sk
= (struct sock
*) arg
;
89 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
93 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
94 reason
= ECONNREFUSED
;
95 else if (sk
->sk_state
== BT_CONNECT
&&
96 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
97 reason
= ECONNREFUSED
;
101 __l2cap_sock_close(sk
, reason
);
109 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
111 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
112 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
115 static void l2cap_sock_clear_timer(struct sock
*sk
)
117 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
118 sk_stop_timer(sk
, &sk
->sk_timer
);
121 /* ---- L2CAP channels ---- */
122 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
125 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
126 if (l2cap_pi(s
)->dcid
== cid
)
132 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
135 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
136 if (l2cap_pi(s
)->scid
== cid
)
142 /* Find channel with given SCID.
143 * Returns locked socket */
144 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
148 s
= __l2cap_get_chan_by_scid(l
, cid
);
151 read_unlock(&l
->lock
);
155 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
158 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
159 if (l2cap_pi(s
)->ident
== ident
)
165 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
169 s
= __l2cap_get_chan_by_ident(l
, ident
);
172 read_unlock(&l
->lock
);
176 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
178 u16 cid
= L2CAP_CID_DYN_START
;
180 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
181 if (!__l2cap_get_chan_by_scid(l
, cid
))
188 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
193 l2cap_pi(l
->head
)->prev_c
= sk
;
195 l2cap_pi(sk
)->next_c
= l
->head
;
196 l2cap_pi(sk
)->prev_c
= NULL
;
200 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
202 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
204 write_lock_bh(&l
->lock
);
209 l2cap_pi(next
)->prev_c
= prev
;
211 l2cap_pi(prev
)->next_c
= next
;
212 write_unlock_bh(&l
->lock
);
217 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
219 struct l2cap_chan_list
*l
= &conn
->chan_list
;
221 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
222 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
224 conn
->disc_reason
= 0x13;
226 l2cap_pi(sk
)->conn
= conn
;
228 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
229 /* Alloc CID for connection-oriented socket */
230 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
231 } else if (sk
->sk_type
== SOCK_DGRAM
) {
232 /* Connectionless socket */
233 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
234 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
235 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
237 /* Raw socket can send/recv signalling messages only */
238 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
239 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
240 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
243 __l2cap_chan_link(l
, sk
);
246 bt_accept_enqueue(parent
, sk
);
250 * Must be called on the locked socket. */
251 static void l2cap_chan_del(struct sock
*sk
, int err
)
253 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
254 struct sock
*parent
= bt_sk(sk
)->parent
;
256 l2cap_sock_clear_timer(sk
);
258 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
261 /* Unlink from channel list */
262 l2cap_chan_unlink(&conn
->chan_list
, sk
);
263 l2cap_pi(sk
)->conn
= NULL
;
264 hci_conn_put(conn
->hcon
);
267 sk
->sk_state
= BT_CLOSED
;
268 sock_set_flag(sk
, SOCK_ZAPPED
);
274 bt_accept_unlink(sk
);
275 parent
->sk_data_ready(parent
, 0);
277 sk
->sk_state_change(sk
);
279 skb_queue_purge(TX_QUEUE(sk
));
281 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
282 struct srej_list
*l
, *tmp
;
284 del_timer(&l2cap_pi(sk
)->retrans_timer
);
285 del_timer(&l2cap_pi(sk
)->monitor_timer
);
286 del_timer(&l2cap_pi(sk
)->ack_timer
);
288 skb_queue_purge(SREJ_QUEUE(sk
));
289 skb_queue_purge(BUSY_QUEUE(sk
));
291 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
298 /* Service level security */
299 static inline int l2cap_check_security(struct sock
*sk
)
301 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
304 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
305 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
306 auth_type
= HCI_AT_NO_BONDING_MITM
;
308 auth_type
= HCI_AT_NO_BONDING
;
310 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
311 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
313 switch (l2cap_pi(sk
)->sec_level
) {
314 case BT_SECURITY_HIGH
:
315 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
317 case BT_SECURITY_MEDIUM
:
318 auth_type
= HCI_AT_GENERAL_BONDING
;
321 auth_type
= HCI_AT_NO_BONDING
;
326 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
330 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
334 /* Get next available identificator.
335 * 1 - 128 are used by kernel.
336 * 129 - 199 are reserved.
337 * 200 - 254 are used by utilities like l2ping, etc.
340 spin_lock_bh(&conn
->lock
);
342 if (++conn
->tx_ident
> 128)
347 spin_unlock_bh(&conn
->lock
);
352 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
354 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
356 BT_DBG("code 0x%2.2x", code
);
361 hci_send_acl(conn
->hcon
, skb
, 0);
364 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
367 struct l2cap_hdr
*lh
;
368 struct l2cap_conn
*conn
= pi
->conn
;
369 struct sock
*sk
= (struct sock
*)pi
;
370 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
372 if (sk
->sk_state
!= BT_CONNECTED
)
375 if (pi
->fcs
== L2CAP_FCS_CRC16
)
378 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
380 count
= min_t(unsigned int, conn
->mtu
, hlen
);
381 control
|= L2CAP_CTRL_FRAME_TYPE
;
383 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
384 control
|= L2CAP_CTRL_FINAL
;
385 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
388 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
389 control
|= L2CAP_CTRL_POLL
;
390 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
393 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
397 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
398 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
399 lh
->cid
= cpu_to_le16(pi
->dcid
);
400 put_unaligned_le16(control
, skb_put(skb
, 2));
402 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
403 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
404 put_unaligned_le16(fcs
, skb_put(skb
, 2));
407 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
410 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
412 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
413 control
|= L2CAP_SUPER_RCV_NOT_READY
;
414 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
416 control
|= L2CAP_SUPER_RCV_READY
;
418 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
420 l2cap_send_sframe(pi
, control
);
423 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
425 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
428 static void l2cap_do_start(struct sock
*sk
)
430 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
432 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
433 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
436 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
437 struct l2cap_conn_req req
;
438 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
439 req
.psm
= l2cap_pi(sk
)->psm
;
441 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
442 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
444 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
445 L2CAP_CONN_REQ
, sizeof(req
), &req
);
448 struct l2cap_info_req req
;
449 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
451 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
452 conn
->info_ident
= l2cap_get_ident(conn
);
454 mod_timer(&conn
->info_timer
, jiffies
+
455 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
457 l2cap_send_cmd(conn
, conn
->info_ident
,
458 L2CAP_INFO_REQ
, sizeof(req
), &req
);
462 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
464 u32 local_feat_mask
= l2cap_feat_mask
;
466 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
469 case L2CAP_MODE_ERTM
:
470 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
471 case L2CAP_MODE_STREAMING
:
472 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
478 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
480 struct l2cap_disconn_req req
;
485 skb_queue_purge(TX_QUEUE(sk
));
487 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
488 del_timer(&l2cap_pi(sk
)->retrans_timer
);
489 del_timer(&l2cap_pi(sk
)->monitor_timer
);
490 del_timer(&l2cap_pi(sk
)->ack_timer
);
493 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
494 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
495 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
496 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
498 sk
->sk_state
= BT_DISCONN
;
502 /* ---- L2CAP connections ---- */
503 static void l2cap_conn_start(struct l2cap_conn
*conn
)
505 struct l2cap_chan_list
*l
= &conn
->chan_list
;
506 struct sock_del_list del
, *tmp1
, *tmp2
;
509 BT_DBG("conn %p", conn
);
511 INIT_LIST_HEAD(&del
.list
);
515 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
518 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
519 sk
->sk_type
!= SOCK_STREAM
) {
524 if (sk
->sk_state
== BT_CONNECT
) {
525 if (l2cap_check_security(sk
) &&
526 __l2cap_no_conn_pending(sk
)) {
527 struct l2cap_conn_req req
;
529 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
531 && l2cap_pi(sk
)->conf_state
&
532 L2CAP_CONF_STATE2_DEVICE
) {
533 tmp1
= kzalloc(sizeof(struct srej_list
),
536 list_add_tail(&tmp1
->list
, &del
.list
);
541 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
542 req
.psm
= l2cap_pi(sk
)->psm
;
544 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
545 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
547 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
548 L2CAP_CONN_REQ
, sizeof(req
), &req
);
550 } else if (sk
->sk_state
== BT_CONNECT2
) {
551 struct l2cap_conn_rsp rsp
;
553 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
554 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
556 if (l2cap_check_security(sk
)) {
557 if (bt_sk(sk
)->defer_setup
) {
558 struct sock
*parent
= bt_sk(sk
)->parent
;
559 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
560 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
561 parent
->sk_data_ready(parent
, 0);
564 sk
->sk_state
= BT_CONFIG
;
565 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
566 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
569 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
570 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
573 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
574 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
576 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
577 rsp
.result
!= L2CAP_CR_SUCCESS
) {
582 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
583 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
584 l2cap_build_conf_req(sk
, buf
), buf
);
585 l2cap_pi(sk
)->num_conf_req
++;
591 read_unlock(&l
->lock
);
593 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
594 bh_lock_sock(tmp1
->sk
);
595 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
596 bh_unlock_sock(tmp1
->sk
);
597 list_del(&tmp1
->list
);
602 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
604 struct l2cap_chan_list
*l
= &conn
->chan_list
;
607 BT_DBG("conn %p", conn
);
611 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
614 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
615 sk
->sk_type
!= SOCK_STREAM
) {
616 l2cap_sock_clear_timer(sk
);
617 sk
->sk_state
= BT_CONNECTED
;
618 sk
->sk_state_change(sk
);
619 } else if (sk
->sk_state
== BT_CONNECT
)
625 read_unlock(&l
->lock
);
628 /* Notify sockets that we cannot guaranty reliability anymore */
629 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
631 struct l2cap_chan_list
*l
= &conn
->chan_list
;
634 BT_DBG("conn %p", conn
);
638 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
639 if (l2cap_pi(sk
)->force_reliable
)
643 read_unlock(&l
->lock
);
646 static void l2cap_info_timeout(unsigned long arg
)
648 struct l2cap_conn
*conn
= (void *) arg
;
650 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
651 conn
->info_ident
= 0;
653 l2cap_conn_start(conn
);
656 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
658 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
663 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
667 hcon
->l2cap_data
= conn
;
670 BT_DBG("hcon %p conn %p", hcon
, conn
);
672 conn
->mtu
= hcon
->hdev
->acl_mtu
;
673 conn
->src
= &hcon
->hdev
->bdaddr
;
674 conn
->dst
= &hcon
->dst
;
678 spin_lock_init(&conn
->lock
);
679 rwlock_init(&conn
->chan_list
.lock
);
681 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
682 (unsigned long) conn
);
684 conn
->disc_reason
= 0x13;
689 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
691 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
697 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
699 kfree_skb(conn
->rx_skb
);
702 while ((sk
= conn
->chan_list
.head
)) {
704 l2cap_chan_del(sk
, err
);
709 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
710 del_timer_sync(&conn
->info_timer
);
712 hcon
->l2cap_data
= NULL
;
716 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
718 struct l2cap_chan_list
*l
= &conn
->chan_list
;
719 write_lock_bh(&l
->lock
);
720 __l2cap_chan_add(conn
, sk
, parent
);
721 write_unlock_bh(&l
->lock
);
724 /* ---- Socket interface ---- */
725 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
728 struct hlist_node
*node
;
729 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
730 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
737 /* Find socket with psm and source bdaddr.
738 * Returns closest match.
740 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
742 struct sock
*sk
= NULL
, *sk1
= NULL
;
743 struct hlist_node
*node
;
745 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
746 if (state
&& sk
->sk_state
!= state
)
749 if (l2cap_pi(sk
)->psm
== psm
) {
751 if (!bacmp(&bt_sk(sk
)->src
, src
))
755 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
759 return node
? sk
: sk1
;
762 /* Find socket with given address (psm, src).
763 * Returns locked socket */
764 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
767 read_lock(&l2cap_sk_list
.lock
);
768 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
771 read_unlock(&l2cap_sk_list
.lock
);
775 static void l2cap_sock_destruct(struct sock
*sk
)
779 skb_queue_purge(&sk
->sk_receive_queue
);
780 skb_queue_purge(&sk
->sk_write_queue
);
783 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
787 BT_DBG("parent %p", parent
);
789 /* Close not yet accepted channels */
790 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
791 l2cap_sock_close(sk
);
793 parent
->sk_state
= BT_CLOSED
;
794 sock_set_flag(parent
, SOCK_ZAPPED
);
797 /* Kill socket (only if zapped and orphan)
798 * Must be called on unlocked socket.
800 static void l2cap_sock_kill(struct sock
*sk
)
802 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
805 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
807 /* Kill poor orphan */
808 bt_sock_unlink(&l2cap_sk_list
, sk
);
809 sock_set_flag(sk
, SOCK_DEAD
);
813 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
815 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
817 switch (sk
->sk_state
) {
819 l2cap_sock_cleanup_listen(sk
);
824 if (sk
->sk_type
== SOCK_SEQPACKET
||
825 sk
->sk_type
== SOCK_STREAM
) {
826 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
828 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
829 l2cap_send_disconn_req(conn
, sk
, reason
);
831 l2cap_chan_del(sk
, reason
);
835 if (sk
->sk_type
== SOCK_SEQPACKET
||
836 sk
->sk_type
== SOCK_STREAM
) {
837 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
838 struct l2cap_conn_rsp rsp
;
841 if (bt_sk(sk
)->defer_setup
)
842 result
= L2CAP_CR_SEC_BLOCK
;
844 result
= L2CAP_CR_BAD_PSM
;
846 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
847 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
848 rsp
.result
= cpu_to_le16(result
);
849 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
850 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
851 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
853 l2cap_chan_del(sk
, reason
);
858 l2cap_chan_del(sk
, reason
);
862 sock_set_flag(sk
, SOCK_ZAPPED
);
867 /* Must be called on unlocked socket. */
868 static void l2cap_sock_close(struct sock
*sk
)
870 l2cap_sock_clear_timer(sk
);
872 __l2cap_sock_close(sk
, ECONNRESET
);
877 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
879 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
884 sk
->sk_type
= parent
->sk_type
;
885 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
887 pi
->imtu
= l2cap_pi(parent
)->imtu
;
888 pi
->omtu
= l2cap_pi(parent
)->omtu
;
889 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
890 pi
->mode
= l2cap_pi(parent
)->mode
;
891 pi
->fcs
= l2cap_pi(parent
)->fcs
;
892 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
893 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
894 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
895 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
896 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
898 pi
->imtu
= L2CAP_DEFAULT_MTU
;
900 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
901 pi
->mode
= L2CAP_MODE_ERTM
;
902 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
904 pi
->mode
= L2CAP_MODE_BASIC
;
906 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
907 pi
->fcs
= L2CAP_FCS_CRC16
;
908 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
909 pi
->sec_level
= BT_SECURITY_LOW
;
911 pi
->force_reliable
= 0;
914 /* Default config options */
916 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
917 skb_queue_head_init(TX_QUEUE(sk
));
918 skb_queue_head_init(SREJ_QUEUE(sk
));
919 skb_queue_head_init(BUSY_QUEUE(sk
));
920 INIT_LIST_HEAD(SREJ_LIST(sk
));
923 static struct proto l2cap_proto
= {
925 .owner
= THIS_MODULE
,
926 .obj_size
= sizeof(struct l2cap_pinfo
)
929 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
933 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
937 sock_init_data(sock
, sk
);
938 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
940 sk
->sk_destruct
= l2cap_sock_destruct
;
941 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
943 sock_reset_flag(sk
, SOCK_ZAPPED
);
945 sk
->sk_protocol
= proto
;
946 sk
->sk_state
= BT_OPEN
;
948 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
950 bt_sock_link(&l2cap_sk_list
, sk
);
954 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
959 BT_DBG("sock %p", sock
);
961 sock
->state
= SS_UNCONNECTED
;
963 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
964 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
965 return -ESOCKTNOSUPPORT
;
967 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
970 sock
->ops
= &l2cap_sock_ops
;
972 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
976 l2cap_sock_init(sk
, NULL
);
980 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
982 struct sock
*sk
= sock
->sk
;
983 struct sockaddr_l2 la
;
988 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
991 memset(&la
, 0, sizeof(la
));
992 len
= min_t(unsigned int, sizeof(la
), alen
);
993 memcpy(&la
, addr
, len
);
1000 if (sk
->sk_state
!= BT_OPEN
) {
1005 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
1006 !capable(CAP_NET_BIND_SERVICE
)) {
1011 write_lock_bh(&l2cap_sk_list
.lock
);
1013 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
1016 /* Save source address */
1017 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
1018 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1019 l2cap_pi(sk
)->sport
= la
.l2_psm
;
1020 sk
->sk_state
= BT_BOUND
;
1022 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
1023 __le16_to_cpu(la
.l2_psm
) == 0x0003)
1024 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1027 write_unlock_bh(&l2cap_sk_list
.lock
);
1034 static int l2cap_do_connect(struct sock
*sk
)
1036 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1037 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1038 struct l2cap_conn
*conn
;
1039 struct hci_conn
*hcon
;
1040 struct hci_dev
*hdev
;
1044 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1047 hdev
= hci_get_route(dst
, src
);
1049 return -EHOSTUNREACH
;
1051 hci_dev_lock_bh(hdev
);
1055 if (sk
->sk_type
== SOCK_RAW
) {
1056 switch (l2cap_pi(sk
)->sec_level
) {
1057 case BT_SECURITY_HIGH
:
1058 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1060 case BT_SECURITY_MEDIUM
:
1061 auth_type
= HCI_AT_DEDICATED_BONDING
;
1064 auth_type
= HCI_AT_NO_BONDING
;
1067 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1068 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1069 auth_type
= HCI_AT_NO_BONDING_MITM
;
1071 auth_type
= HCI_AT_NO_BONDING
;
1073 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1074 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1076 switch (l2cap_pi(sk
)->sec_level
) {
1077 case BT_SECURITY_HIGH
:
1078 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1080 case BT_SECURITY_MEDIUM
:
1081 auth_type
= HCI_AT_GENERAL_BONDING
;
1084 auth_type
= HCI_AT_NO_BONDING
;
1089 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1090 l2cap_pi(sk
)->sec_level
, auth_type
);
1094 conn
= l2cap_conn_add(hcon
, 0);
1102 /* Update source addr of the socket */
1103 bacpy(src
, conn
->src
);
1105 l2cap_chan_add(conn
, sk
, NULL
);
1107 sk
->sk_state
= BT_CONNECT
;
1108 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1110 if (hcon
->state
== BT_CONNECTED
) {
1111 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1112 sk
->sk_type
!= SOCK_STREAM
) {
1113 l2cap_sock_clear_timer(sk
);
1114 sk
->sk_state
= BT_CONNECTED
;
1120 hci_dev_unlock_bh(hdev
);
1125 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1127 struct sock
*sk
= sock
->sk
;
1128 struct sockaddr_l2 la
;
1131 BT_DBG("sk %p", sk
);
1133 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1134 addr
->sa_family
!= AF_BLUETOOTH
)
1137 memset(&la
, 0, sizeof(la
));
1138 len
= min_t(unsigned int, sizeof(la
), alen
);
1139 memcpy(&la
, addr
, len
);
1146 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1152 switch (l2cap_pi(sk
)->mode
) {
1153 case L2CAP_MODE_BASIC
:
1155 case L2CAP_MODE_ERTM
:
1156 case L2CAP_MODE_STREAMING
:
1165 switch (sk
->sk_state
) {
1169 /* Already connecting */
1173 /* Already connected */
1187 /* Set destination address and psm */
1188 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1189 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1191 err
= l2cap_do_connect(sk
);
1196 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1197 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1203 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1205 struct sock
*sk
= sock
->sk
;
1208 BT_DBG("sk %p backlog %d", sk
, backlog
);
1212 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1213 || sk
->sk_state
!= BT_BOUND
) {
1218 switch (l2cap_pi(sk
)->mode
) {
1219 case L2CAP_MODE_BASIC
:
1221 case L2CAP_MODE_ERTM
:
1222 case L2CAP_MODE_STREAMING
:
1231 if (!l2cap_pi(sk
)->psm
) {
1232 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1237 write_lock_bh(&l2cap_sk_list
.lock
);
1239 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1240 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1241 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1242 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1247 write_unlock_bh(&l2cap_sk_list
.lock
);
1253 sk
->sk_max_ack_backlog
= backlog
;
1254 sk
->sk_ack_backlog
= 0;
1255 sk
->sk_state
= BT_LISTEN
;
1262 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1264 DECLARE_WAITQUEUE(wait
, current
);
1265 struct sock
*sk
= sock
->sk
, *nsk
;
1269 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1271 if (sk
->sk_state
!= BT_LISTEN
) {
1276 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1278 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1280 /* Wait for an incoming connection. (wake-one). */
1281 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1282 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1283 set_current_state(TASK_INTERRUPTIBLE
);
1290 timeo
= schedule_timeout(timeo
);
1291 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1293 if (sk
->sk_state
!= BT_LISTEN
) {
1298 if (signal_pending(current
)) {
1299 err
= sock_intr_errno(timeo
);
1303 set_current_state(TASK_RUNNING
);
1304 remove_wait_queue(sk_sleep(sk
), &wait
);
1309 newsock
->state
= SS_CONNECTED
;
1311 BT_DBG("new socket %p", nsk
);
1318 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1320 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1321 struct sock
*sk
= sock
->sk
;
1323 BT_DBG("sock %p, sk %p", sock
, sk
);
1325 addr
->sa_family
= AF_BLUETOOTH
;
1326 *len
= sizeof(struct sockaddr_l2
);
1329 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1330 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1331 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1333 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1334 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1335 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1341 static int __l2cap_wait_ack(struct sock
*sk
)
1343 DECLARE_WAITQUEUE(wait
, current
);
1347 add_wait_queue(sk_sleep(sk
), &wait
);
1348 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1349 set_current_state(TASK_INTERRUPTIBLE
);
1354 if (signal_pending(current
)) {
1355 err
= sock_intr_errno(timeo
);
1360 timeo
= schedule_timeout(timeo
);
1363 err
= sock_error(sk
);
1367 set_current_state(TASK_RUNNING
);
1368 remove_wait_queue(sk_sleep(sk
), &wait
);
1372 static void l2cap_monitor_timeout(unsigned long arg
)
1374 struct sock
*sk
= (void *) arg
;
1376 BT_DBG("sk %p", sk
);
1379 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1380 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1385 l2cap_pi(sk
)->retry_count
++;
1386 __mod_monitor_timer();
1388 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1392 static void l2cap_retrans_timeout(unsigned long arg
)
1394 struct sock
*sk
= (void *) arg
;
1396 BT_DBG("sk %p", sk
);
1399 l2cap_pi(sk
)->retry_count
= 1;
1400 __mod_monitor_timer();
1402 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1404 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1408 static void l2cap_drop_acked_frames(struct sock
*sk
)
1410 struct sk_buff
*skb
;
1412 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1413 l2cap_pi(sk
)->unacked_frames
) {
1414 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1417 skb
= skb_dequeue(TX_QUEUE(sk
));
1420 l2cap_pi(sk
)->unacked_frames
--;
1423 if (!l2cap_pi(sk
)->unacked_frames
)
1424 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1427 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1429 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1431 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1433 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1436 static void l2cap_streaming_send(struct sock
*sk
)
1438 struct sk_buff
*skb
, *tx_skb
;
1439 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1442 while ((skb
= sk
->sk_send_head
)) {
1443 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1445 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1446 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1447 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1449 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1450 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1451 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1454 l2cap_do_send(sk
, tx_skb
);
1456 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1458 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1459 sk
->sk_send_head
= NULL
;
1461 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1463 skb
= skb_dequeue(TX_QUEUE(sk
));
1468 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1470 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1471 struct sk_buff
*skb
, *tx_skb
;
1474 skb
= skb_peek(TX_QUEUE(sk
));
1479 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1482 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1485 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1487 if (pi
->remote_max_tx
&&
1488 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1489 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1493 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1494 bt_cb(skb
)->retries
++;
1495 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1497 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1498 control
|= L2CAP_CTRL_FINAL
;
1499 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1502 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1503 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1505 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1507 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1508 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1509 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1512 l2cap_do_send(sk
, tx_skb
);
1515 static int l2cap_ertm_send(struct sock
*sk
)
1517 struct sk_buff
*skb
, *tx_skb
;
1518 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1522 if (sk
->sk_state
!= BT_CONNECTED
)
1525 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1527 if (pi
->remote_max_tx
&&
1528 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1529 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1533 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1535 bt_cb(skb
)->retries
++;
1537 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1538 control
&= L2CAP_CTRL_SAR
;
1540 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1541 control
|= L2CAP_CTRL_FINAL
;
1542 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1544 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1545 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1546 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1549 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1550 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1551 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1554 l2cap_do_send(sk
, tx_skb
);
1556 __mod_retrans_timer();
1558 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1559 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1561 pi
->unacked_frames
++;
1564 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1565 sk
->sk_send_head
= NULL
;
1567 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1575 static int l2cap_retransmit_frames(struct sock
*sk
)
1577 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1580 if (!skb_queue_empty(TX_QUEUE(sk
)))
1581 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1583 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1584 ret
= l2cap_ertm_send(sk
);
1588 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1590 struct sock
*sk
= (struct sock
*)pi
;
1593 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1595 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1596 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1597 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1598 l2cap_send_sframe(pi
, control
);
1602 if (l2cap_ertm_send(sk
) > 0)
1605 control
|= L2CAP_SUPER_RCV_READY
;
1606 l2cap_send_sframe(pi
, control
);
1609 static void l2cap_send_srejtail(struct sock
*sk
)
1611 struct srej_list
*tail
;
1614 control
= L2CAP_SUPER_SELECT_REJECT
;
1615 control
|= L2CAP_CTRL_FINAL
;
1617 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1618 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1620 l2cap_send_sframe(l2cap_pi(sk
), control
);
1623 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1625 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1626 struct sk_buff
**frag
;
1629 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1635 /* Continuation fragments (no L2CAP header) */
1636 frag
= &skb_shinfo(skb
)->frag_list
;
1638 count
= min_t(unsigned int, conn
->mtu
, len
);
1640 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1643 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1649 frag
= &(*frag
)->next
;
1655 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1657 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1658 struct sk_buff
*skb
;
1659 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1660 struct l2cap_hdr
*lh
;
1662 BT_DBG("sk %p len %d", sk
, (int)len
);
1664 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1665 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1666 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1668 return ERR_PTR(-ENOMEM
);
1670 /* Create L2CAP header */
1671 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1672 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1673 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1674 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1676 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1677 if (unlikely(err
< 0)) {
1679 return ERR_PTR(err
);
1684 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1686 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1687 struct sk_buff
*skb
;
1688 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1689 struct l2cap_hdr
*lh
;
1691 BT_DBG("sk %p len %d", sk
, (int)len
);
1693 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1694 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1695 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1697 return ERR_PTR(-ENOMEM
);
1699 /* Create L2CAP header */
1700 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1701 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1702 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1704 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1705 if (unlikely(err
< 0)) {
1707 return ERR_PTR(err
);
1712 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1714 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1715 struct sk_buff
*skb
;
1716 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1717 struct l2cap_hdr
*lh
;
1719 BT_DBG("sk %p len %d", sk
, (int)len
);
1722 return ERR_PTR(-ENOTCONN
);
1727 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1730 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1731 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1732 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1734 return ERR_PTR(-ENOMEM
);
1736 /* Create L2CAP header */
1737 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1738 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1739 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1740 put_unaligned_le16(control
, skb_put(skb
, 2));
1742 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1744 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1745 if (unlikely(err
< 0)) {
1747 return ERR_PTR(err
);
1750 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1751 put_unaligned_le16(0, skb_put(skb
, 2));
1753 bt_cb(skb
)->retries
= 0;
1757 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1759 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1760 struct sk_buff
*skb
;
1761 struct sk_buff_head sar_queue
;
1765 skb_queue_head_init(&sar_queue
);
1766 control
= L2CAP_SDU_START
;
1767 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1769 return PTR_ERR(skb
);
1771 __skb_queue_tail(&sar_queue
, skb
);
1772 len
-= pi
->remote_mps
;
1773 size
+= pi
->remote_mps
;
1778 if (len
> pi
->remote_mps
) {
1779 control
= L2CAP_SDU_CONTINUE
;
1780 buflen
= pi
->remote_mps
;
1782 control
= L2CAP_SDU_END
;
1786 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1788 skb_queue_purge(&sar_queue
);
1789 return PTR_ERR(skb
);
1792 __skb_queue_tail(&sar_queue
, skb
);
1796 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1797 if (sk
->sk_send_head
== NULL
)
1798 sk
->sk_send_head
= sar_queue
.next
;
1803 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1805 struct sock
*sk
= sock
->sk
;
1806 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1807 struct sk_buff
*skb
;
1811 BT_DBG("sock %p, sk %p", sock
, sk
);
1813 err
= sock_error(sk
);
1817 if (msg
->msg_flags
& MSG_OOB
)
1822 if (sk
->sk_state
!= BT_CONNECTED
) {
1827 /* Connectionless channel */
1828 if (sk
->sk_type
== SOCK_DGRAM
) {
1829 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1833 l2cap_do_send(sk
, skb
);
1840 case L2CAP_MODE_BASIC
:
1841 /* Check outgoing MTU */
1842 if (len
> pi
->omtu
) {
1847 /* Create a basic PDU */
1848 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1854 l2cap_do_send(sk
, skb
);
1858 case L2CAP_MODE_ERTM
:
1859 case L2CAP_MODE_STREAMING
:
1860 /* Entire SDU fits into one PDU */
1861 if (len
<= pi
->remote_mps
) {
1862 control
= L2CAP_SDU_UNSEGMENTED
;
1863 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1868 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1870 if (sk
->sk_send_head
== NULL
)
1871 sk
->sk_send_head
= skb
;
1874 /* Segment SDU into multiples PDUs */
1875 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1880 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1881 l2cap_streaming_send(sk
);
1883 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1884 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1888 err
= l2cap_ertm_send(sk
);
1896 BT_DBG("bad state %1.1x", pi
->mode
);
1905 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1907 struct sock
*sk
= sock
->sk
;
1911 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1912 struct l2cap_conn_rsp rsp
;
1913 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1916 sk
->sk_state
= BT_CONFIG
;
1918 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1919 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1920 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1921 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1922 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1923 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1925 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) {
1930 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1931 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1932 l2cap_build_conf_req(sk
, buf
), buf
);
1933 l2cap_pi(sk
)->num_conf_req
++;
1941 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1944 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1946 struct sock
*sk
= sock
->sk
;
1947 struct l2cap_options opts
;
1951 BT_DBG("sk %p", sk
);
1957 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1958 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1959 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1960 opts
.mode
= l2cap_pi(sk
)->mode
;
1961 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1962 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1963 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1965 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1966 if (copy_from_user((char *) &opts
, optval
, len
)) {
1971 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1976 l2cap_pi(sk
)->mode
= opts
.mode
;
1977 switch (l2cap_pi(sk
)->mode
) {
1978 case L2CAP_MODE_BASIC
:
1979 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1981 case L2CAP_MODE_ERTM
:
1982 case L2CAP_MODE_STREAMING
:
1991 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1992 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1993 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1994 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1995 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1999 if (get_user(opt
, (u32 __user
*) optval
)) {
2004 if (opt
& L2CAP_LM_AUTH
)
2005 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
2006 if (opt
& L2CAP_LM_ENCRYPT
)
2007 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
2008 if (opt
& L2CAP_LM_SECURE
)
2009 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
2011 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
2012 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
2024 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
2026 struct sock
*sk
= sock
->sk
;
2027 struct bt_security sec
;
2031 BT_DBG("sk %p", sk
);
2033 if (level
== SOL_L2CAP
)
2034 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
2036 if (level
!= SOL_BLUETOOTH
)
2037 return -ENOPROTOOPT
;
2043 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2044 && sk
->sk_type
!= SOCK_RAW
) {
2049 sec
.level
= BT_SECURITY_LOW
;
2051 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2052 if (copy_from_user((char *) &sec
, optval
, len
)) {
2057 if (sec
.level
< BT_SECURITY_LOW
||
2058 sec
.level
> BT_SECURITY_HIGH
) {
2063 l2cap_pi(sk
)->sec_level
= sec
.level
;
2066 case BT_DEFER_SETUP
:
2067 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2072 if (get_user(opt
, (u32 __user
*) optval
)) {
2077 bt_sk(sk
)->defer_setup
= opt
;
2089 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2091 struct sock
*sk
= sock
->sk
;
2092 struct l2cap_options opts
;
2093 struct l2cap_conninfo cinfo
;
2097 BT_DBG("sk %p", sk
);
2099 if (get_user(len
, optlen
))
2106 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2107 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2108 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2109 opts
.mode
= l2cap_pi(sk
)->mode
;
2110 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2111 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2112 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2114 len
= min_t(unsigned int, len
, sizeof(opts
));
2115 if (copy_to_user(optval
, (char *) &opts
, len
))
2121 switch (l2cap_pi(sk
)->sec_level
) {
2122 case BT_SECURITY_LOW
:
2123 opt
= L2CAP_LM_AUTH
;
2125 case BT_SECURITY_MEDIUM
:
2126 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2128 case BT_SECURITY_HIGH
:
2129 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2137 if (l2cap_pi(sk
)->role_switch
)
2138 opt
|= L2CAP_LM_MASTER
;
2140 if (l2cap_pi(sk
)->force_reliable
)
2141 opt
|= L2CAP_LM_RELIABLE
;
2143 if (put_user(opt
, (u32 __user
*) optval
))
2147 case L2CAP_CONNINFO
:
2148 if (sk
->sk_state
!= BT_CONNECTED
&&
2149 !(sk
->sk_state
== BT_CONNECT2
&&
2150 bt_sk(sk
)->defer_setup
)) {
2155 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2156 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2158 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2159 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2173 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2175 struct sock
*sk
= sock
->sk
;
2176 struct bt_security sec
;
2179 BT_DBG("sk %p", sk
);
2181 if (level
== SOL_L2CAP
)
2182 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2184 if (level
!= SOL_BLUETOOTH
)
2185 return -ENOPROTOOPT
;
2187 if (get_user(len
, optlen
))
2194 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2195 && sk
->sk_type
!= SOCK_RAW
) {
2200 sec
.level
= l2cap_pi(sk
)->sec_level
;
2202 len
= min_t(unsigned int, len
, sizeof(sec
));
2203 if (copy_to_user(optval
, (char *) &sec
, len
))
2208 case BT_DEFER_SETUP
:
2209 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2214 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2228 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2230 struct sock
*sk
= sock
->sk
;
2233 BT_DBG("sock %p, sk %p", sock
, sk
);
2239 if (!sk
->sk_shutdown
) {
2240 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2241 err
= __l2cap_wait_ack(sk
);
2243 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2244 l2cap_sock_clear_timer(sk
);
2245 __l2cap_sock_close(sk
, 0);
2247 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2248 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2252 if (!err
&& sk
->sk_err
)
2259 static int l2cap_sock_release(struct socket
*sock
)
2261 struct sock
*sk
= sock
->sk
;
2264 BT_DBG("sock %p, sk %p", sock
, sk
);
2269 err
= l2cap_sock_shutdown(sock
, 2);
2272 l2cap_sock_kill(sk
);
2276 static void l2cap_chan_ready(struct sock
*sk
)
2278 struct sock
*parent
= bt_sk(sk
)->parent
;
2280 BT_DBG("sk %p, parent %p", sk
, parent
);
2282 l2cap_pi(sk
)->conf_state
= 0;
2283 l2cap_sock_clear_timer(sk
);
2286 /* Outgoing channel.
2287 * Wake up socket sleeping on connect.
2289 sk
->sk_state
= BT_CONNECTED
;
2290 sk
->sk_state_change(sk
);
2292 /* Incoming channel.
2293 * Wake up socket sleeping on accept.
2295 parent
->sk_data_ready(parent
, 0);
2299 /* Copy frame to all raw sockets on that connection */
2300 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2302 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2303 struct sk_buff
*nskb
;
2306 BT_DBG("conn %p", conn
);
2308 read_lock(&l
->lock
);
2309 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2310 if (sk
->sk_type
!= SOCK_RAW
)
2313 /* Don't send frame to the socket it came from */
2316 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2320 if (sock_queue_rcv_skb(sk
, nskb
))
2323 read_unlock(&l
->lock
);
2326 /* ---- L2CAP signalling commands ---- */
2327 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2328 u8 code
, u8 ident
, u16 dlen
, void *data
)
2330 struct sk_buff
*skb
, **frag
;
2331 struct l2cap_cmd_hdr
*cmd
;
2332 struct l2cap_hdr
*lh
;
2335 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2336 conn
, code
, ident
, dlen
);
2338 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2339 count
= min_t(unsigned int, conn
->mtu
, len
);
2341 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2345 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2346 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2347 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2349 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2352 cmd
->len
= cpu_to_le16(dlen
);
2355 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2356 memcpy(skb_put(skb
, count
), data
, count
);
2362 /* Continuation fragments (no L2CAP header) */
2363 frag
= &skb_shinfo(skb
)->frag_list
;
2365 count
= min_t(unsigned int, conn
->mtu
, len
);
2367 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2371 memcpy(skb_put(*frag
, count
), data
, count
);
2376 frag
= &(*frag
)->next
;
2386 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2388 struct l2cap_conf_opt
*opt
= *ptr
;
2391 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2399 *val
= *((u8
*) opt
->val
);
2403 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2407 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2411 *val
= (unsigned long) opt
->val
;
2415 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2419 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2421 struct l2cap_conf_opt
*opt
= *ptr
;
2423 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2430 *((u8
*) opt
->val
) = val
;
2434 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2438 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2442 memcpy(opt
->val
, (void *) val
, len
);
2446 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2449 static void l2cap_ack_timeout(unsigned long arg
)
2451 struct sock
*sk
= (void *) arg
;
2454 l2cap_send_ack(l2cap_pi(sk
));
2458 static inline void l2cap_ertm_init(struct sock
*sk
)
2460 l2cap_pi(sk
)->expected_ack_seq
= 0;
2461 l2cap_pi(sk
)->unacked_frames
= 0;
2462 l2cap_pi(sk
)->buffer_seq
= 0;
2463 l2cap_pi(sk
)->num_acked
= 0;
2464 l2cap_pi(sk
)->frames_sent
= 0;
2466 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2467 l2cap_retrans_timeout
, (unsigned long) sk
);
2468 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2469 l2cap_monitor_timeout
, (unsigned long) sk
);
2470 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2471 l2cap_ack_timeout
, (unsigned long) sk
);
2473 __skb_queue_head_init(SREJ_QUEUE(sk
));
2474 __skb_queue_head_init(BUSY_QUEUE(sk
));
2476 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2478 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
2481 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2484 case L2CAP_MODE_STREAMING
:
2485 case L2CAP_MODE_ERTM
:
2486 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2490 return L2CAP_MODE_BASIC
;
2494 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2496 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2497 struct l2cap_conf_req
*req
= data
;
2498 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2499 void *ptr
= req
->data
;
2501 BT_DBG("sk %p", sk
);
2503 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2507 case L2CAP_MODE_STREAMING
:
2508 case L2CAP_MODE_ERTM
:
2509 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2514 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2520 case L2CAP_MODE_BASIC
:
2521 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2522 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2524 rfc
.mode
= L2CAP_MODE_BASIC
;
2526 rfc
.max_transmit
= 0;
2527 rfc
.retrans_timeout
= 0;
2528 rfc
.monitor_timeout
= 0;
2529 rfc
.max_pdu_size
= 0;
2533 case L2CAP_MODE_ERTM
:
2534 rfc
.mode
= L2CAP_MODE_ERTM
;
2535 rfc
.txwin_size
= pi
->tx_win
;
2536 rfc
.max_transmit
= pi
->max_tx
;
2537 rfc
.retrans_timeout
= 0;
2538 rfc
.monitor_timeout
= 0;
2539 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2540 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2541 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2543 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2546 if (pi
->fcs
== L2CAP_FCS_NONE
||
2547 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2548 pi
->fcs
= L2CAP_FCS_NONE
;
2549 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2553 case L2CAP_MODE_STREAMING
:
2554 rfc
.mode
= L2CAP_MODE_STREAMING
;
2556 rfc
.max_transmit
= 0;
2557 rfc
.retrans_timeout
= 0;
2558 rfc
.monitor_timeout
= 0;
2559 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2560 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2561 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2563 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2566 if (pi
->fcs
== L2CAP_FCS_NONE
||
2567 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2568 pi
->fcs
= L2CAP_FCS_NONE
;
2569 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2574 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2575 (unsigned long) &rfc
);
2577 /* FIXME: Need actual value of the flush timeout */
2578 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2579 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2581 req
->dcid
= cpu_to_le16(pi
->dcid
);
2582 req
->flags
= cpu_to_le16(0);
2587 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2589 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2590 struct l2cap_conf_rsp
*rsp
= data
;
2591 void *ptr
= rsp
->data
;
2592 void *req
= pi
->conf_req
;
2593 int len
= pi
->conf_len
;
2594 int type
, hint
, olen
;
2596 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2597 u16 mtu
= L2CAP_DEFAULT_MTU
;
2598 u16 result
= L2CAP_CONF_SUCCESS
;
2600 BT_DBG("sk %p", sk
);
2602 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2603 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2605 hint
= type
& L2CAP_CONF_HINT
;
2606 type
&= L2CAP_CONF_MASK
;
2609 case L2CAP_CONF_MTU
:
2613 case L2CAP_CONF_FLUSH_TO
:
2617 case L2CAP_CONF_QOS
:
2620 case L2CAP_CONF_RFC
:
2621 if (olen
== sizeof(rfc
))
2622 memcpy(&rfc
, (void *) val
, olen
);
2625 case L2CAP_CONF_FCS
:
2626 if (val
== L2CAP_FCS_NONE
)
2627 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2635 result
= L2CAP_CONF_UNKNOWN
;
2636 *((u8
*) ptr
++) = type
;
2641 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
2645 case L2CAP_MODE_STREAMING
:
2646 case L2CAP_MODE_ERTM
:
2647 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2648 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2649 pi
->conn
->feat_mask
);
2653 if (pi
->mode
!= rfc
.mode
)
2654 return -ECONNREFUSED
;
2660 if (pi
->mode
!= rfc
.mode
) {
2661 result
= L2CAP_CONF_UNACCEPT
;
2662 rfc
.mode
= pi
->mode
;
2664 if (pi
->num_conf_rsp
== 1)
2665 return -ECONNREFUSED
;
2667 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2668 sizeof(rfc
), (unsigned long) &rfc
);
2672 if (result
== L2CAP_CONF_SUCCESS
) {
2673 /* Configure output options and let the other side know
2674 * which ones we don't like. */
2676 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2677 result
= L2CAP_CONF_UNACCEPT
;
2680 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2682 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2685 case L2CAP_MODE_BASIC
:
2686 pi
->fcs
= L2CAP_FCS_NONE
;
2687 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2690 case L2CAP_MODE_ERTM
:
2691 pi
->remote_tx_win
= rfc
.txwin_size
;
2692 pi
->remote_max_tx
= rfc
.max_transmit
;
2693 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2694 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2696 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2698 rfc
.retrans_timeout
=
2699 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2700 rfc
.monitor_timeout
=
2701 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2703 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2705 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2706 sizeof(rfc
), (unsigned long) &rfc
);
2710 case L2CAP_MODE_STREAMING
:
2711 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2712 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2714 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2716 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2718 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2719 sizeof(rfc
), (unsigned long) &rfc
);
2724 result
= L2CAP_CONF_UNACCEPT
;
2726 memset(&rfc
, 0, sizeof(rfc
));
2727 rfc
.mode
= pi
->mode
;
2730 if (result
== L2CAP_CONF_SUCCESS
)
2731 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2733 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2734 rsp
->result
= cpu_to_le16(result
);
2735 rsp
->flags
= cpu_to_le16(0x0000);
2740 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2742 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2743 struct l2cap_conf_req
*req
= data
;
2744 void *ptr
= req
->data
;
2747 struct l2cap_conf_rfc rfc
;
2749 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2751 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2752 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2755 case L2CAP_CONF_MTU
:
2756 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2757 *result
= L2CAP_CONF_UNACCEPT
;
2758 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2761 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2764 case L2CAP_CONF_FLUSH_TO
:
2766 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2770 case L2CAP_CONF_RFC
:
2771 if (olen
== sizeof(rfc
))
2772 memcpy(&rfc
, (void *)val
, olen
);
2774 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2775 rfc
.mode
!= pi
->mode
)
2776 return -ECONNREFUSED
;
2780 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2781 sizeof(rfc
), (unsigned long) &rfc
);
2786 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2787 return -ECONNREFUSED
;
2789 pi
->mode
= rfc
.mode
;
2791 if (*result
== L2CAP_CONF_SUCCESS
) {
2793 case L2CAP_MODE_ERTM
:
2794 pi
->remote_tx_win
= rfc
.txwin_size
;
2795 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2796 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2797 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2799 case L2CAP_MODE_STREAMING
:
2800 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2804 req
->dcid
= cpu_to_le16(pi
->dcid
);
2805 req
->flags
= cpu_to_le16(0x0000);
2810 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2812 struct l2cap_conf_rsp
*rsp
= data
;
2813 void *ptr
= rsp
->data
;
2815 BT_DBG("sk %p", sk
);
2817 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2818 rsp
->result
= cpu_to_le16(result
);
2819 rsp
->flags
= cpu_to_le16(flags
);
2824 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2826 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2829 struct l2cap_conf_rfc rfc
;
2831 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2833 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2836 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2837 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2840 case L2CAP_CONF_RFC
:
2841 if (olen
== sizeof(rfc
))
2842 memcpy(&rfc
, (void *)val
, olen
);
2849 case L2CAP_MODE_ERTM
:
2850 pi
->remote_tx_win
= rfc
.txwin_size
;
2851 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2852 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2853 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2855 case L2CAP_MODE_STREAMING
:
2856 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2860 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2862 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2864 if (rej
->reason
!= 0x0000)
2867 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2868 cmd
->ident
== conn
->info_ident
) {
2869 del_timer(&conn
->info_timer
);
2871 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2872 conn
->info_ident
= 0;
2874 l2cap_conn_start(conn
);
2880 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2882 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2883 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2884 struct l2cap_conn_rsp rsp
;
2885 struct sock
*parent
, *uninitialized_var(sk
);
2886 int result
, status
= L2CAP_CS_NO_INFO
;
2888 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2889 __le16 psm
= req
->psm
;
2891 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2893 /* Check if we have socket listening on psm */
2894 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2896 result
= L2CAP_CR_BAD_PSM
;
2900 /* Check if the ACL is secure enough (if not SDP) */
2901 if (psm
!= cpu_to_le16(0x0001) &&
2902 !hci_conn_check_link_mode(conn
->hcon
)) {
2903 conn
->disc_reason
= 0x05;
2904 result
= L2CAP_CR_SEC_BLOCK
;
2908 result
= L2CAP_CR_NO_MEM
;
2910 /* Check for backlog size */
2911 if (sk_acceptq_is_full(parent
)) {
2912 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2916 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2920 write_lock_bh(&list
->lock
);
2922 /* Check if we already have channel with that dcid */
2923 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2924 write_unlock_bh(&list
->lock
);
2925 sock_set_flag(sk
, SOCK_ZAPPED
);
2926 l2cap_sock_kill(sk
);
2930 hci_conn_hold(conn
->hcon
);
2932 l2cap_sock_init(sk
, parent
);
2933 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2934 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2935 l2cap_pi(sk
)->psm
= psm
;
2936 l2cap_pi(sk
)->dcid
= scid
;
2938 __l2cap_chan_add(conn
, sk
, parent
);
2939 dcid
= l2cap_pi(sk
)->scid
;
2941 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2943 l2cap_pi(sk
)->ident
= cmd
->ident
;
2945 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2946 if (l2cap_check_security(sk
)) {
2947 if (bt_sk(sk
)->defer_setup
) {
2948 sk
->sk_state
= BT_CONNECT2
;
2949 result
= L2CAP_CR_PEND
;
2950 status
= L2CAP_CS_AUTHOR_PEND
;
2951 parent
->sk_data_ready(parent
, 0);
2953 sk
->sk_state
= BT_CONFIG
;
2954 result
= L2CAP_CR_SUCCESS
;
2955 status
= L2CAP_CS_NO_INFO
;
2958 sk
->sk_state
= BT_CONNECT2
;
2959 result
= L2CAP_CR_PEND
;
2960 status
= L2CAP_CS_AUTHEN_PEND
;
2963 sk
->sk_state
= BT_CONNECT2
;
2964 result
= L2CAP_CR_PEND
;
2965 status
= L2CAP_CS_NO_INFO
;
2968 write_unlock_bh(&list
->lock
);
2971 bh_unlock_sock(parent
);
2974 rsp
.scid
= cpu_to_le16(scid
);
2975 rsp
.dcid
= cpu_to_le16(dcid
);
2976 rsp
.result
= cpu_to_le16(result
);
2977 rsp
.status
= cpu_to_le16(status
);
2978 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2980 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2981 struct l2cap_info_req info
;
2982 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2984 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2985 conn
->info_ident
= l2cap_get_ident(conn
);
2987 mod_timer(&conn
->info_timer
, jiffies
+
2988 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2990 l2cap_send_cmd(conn
, conn
->info_ident
,
2991 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2994 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2995 result
== L2CAP_CR_SUCCESS
) {
2997 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2998 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2999 l2cap_build_conf_req(sk
, buf
), buf
);
3000 l2cap_pi(sk
)->num_conf_req
++;
3006 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3008 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3009 u16 scid
, dcid
, result
, status
;
3013 scid
= __le16_to_cpu(rsp
->scid
);
3014 dcid
= __le16_to_cpu(rsp
->dcid
);
3015 result
= __le16_to_cpu(rsp
->result
);
3016 status
= __le16_to_cpu(rsp
->status
);
3018 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
3021 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3025 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
3031 case L2CAP_CR_SUCCESS
:
3032 sk
->sk_state
= BT_CONFIG
;
3033 l2cap_pi(sk
)->ident
= 0;
3034 l2cap_pi(sk
)->dcid
= dcid
;
3035 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
3037 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
3040 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
3042 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3043 l2cap_build_conf_req(sk
, req
), req
);
3044 l2cap_pi(sk
)->num_conf_req
++;
3048 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3052 l2cap_chan_del(sk
, ECONNREFUSED
);
3060 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3062 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3068 dcid
= __le16_to_cpu(req
->dcid
);
3069 flags
= __le16_to_cpu(req
->flags
);
3071 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3073 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3077 if (sk
->sk_state
!= BT_CONFIG
) {
3078 struct l2cap_cmd_rej rej
;
3080 rej
.reason
= cpu_to_le16(0x0002);
3081 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3086 /* Reject if config buffer is too small. */
3087 len
= cmd_len
- sizeof(*req
);
3088 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3089 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3090 l2cap_build_conf_rsp(sk
, rsp
,
3091 L2CAP_CONF_REJECT
, flags
), rsp
);
3096 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3097 l2cap_pi(sk
)->conf_len
+= len
;
3099 if (flags
& 0x0001) {
3100 /* Incomplete config. Send empty response. */
3101 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3102 l2cap_build_conf_rsp(sk
, rsp
,
3103 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3107 /* Complete config. */
3108 len
= l2cap_parse_conf_req(sk
, rsp
);
3110 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3114 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3115 l2cap_pi(sk
)->num_conf_rsp
++;
3117 /* Reset config buffer. */
3118 l2cap_pi(sk
)->conf_len
= 0;
3120 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3123 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3124 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3125 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3126 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3128 sk
->sk_state
= BT_CONNECTED
;
3130 l2cap_pi(sk
)->next_tx_seq
= 0;
3131 l2cap_pi(sk
)->expected_tx_seq
= 0;
3132 __skb_queue_head_init(TX_QUEUE(sk
));
3133 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3134 l2cap_ertm_init(sk
);
3136 l2cap_chan_ready(sk
);
3140 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3142 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3143 l2cap_build_conf_req(sk
, buf
), buf
);
3144 l2cap_pi(sk
)->num_conf_req
++;
3152 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3154 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3155 u16 scid
, flags
, result
;
3157 int len
= cmd
->len
- sizeof(*rsp
);
3159 scid
= __le16_to_cpu(rsp
->scid
);
3160 flags
= __le16_to_cpu(rsp
->flags
);
3161 result
= __le16_to_cpu(rsp
->result
);
3163 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3164 scid
, flags
, result
);
3166 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3171 case L2CAP_CONF_SUCCESS
:
3172 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3175 case L2CAP_CONF_UNACCEPT
:
3176 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3179 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3180 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3184 /* throw out any old stored conf requests */
3185 result
= L2CAP_CONF_SUCCESS
;
3186 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3189 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3193 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3194 L2CAP_CONF_REQ
, len
, req
);
3195 l2cap_pi(sk
)->num_conf_req
++;
3196 if (result
!= L2CAP_CONF_SUCCESS
)
3202 sk
->sk_err
= ECONNRESET
;
3203 l2cap_sock_set_timer(sk
, HZ
* 5);
3204 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3211 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3213 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3214 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3215 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3216 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3218 sk
->sk_state
= BT_CONNECTED
;
3219 l2cap_pi(sk
)->next_tx_seq
= 0;
3220 l2cap_pi(sk
)->expected_tx_seq
= 0;
3221 __skb_queue_head_init(TX_QUEUE(sk
));
3222 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3223 l2cap_ertm_init(sk
);
3225 l2cap_chan_ready(sk
);
3233 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3235 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3236 struct l2cap_disconn_rsp rsp
;
3240 scid
= __le16_to_cpu(req
->scid
);
3241 dcid
= __le16_to_cpu(req
->dcid
);
3243 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3245 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3249 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3250 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3251 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3253 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3255 l2cap_chan_del(sk
, ECONNRESET
);
3258 l2cap_sock_kill(sk
);
3262 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3264 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3268 scid
= __le16_to_cpu(rsp
->scid
);
3269 dcid
= __le16_to_cpu(rsp
->dcid
);
3271 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3273 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3277 l2cap_chan_del(sk
, 0);
3280 l2cap_sock_kill(sk
);
3284 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3286 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3289 type
= __le16_to_cpu(req
->type
);
3291 BT_DBG("type 0x%4.4x", type
);
3293 if (type
== L2CAP_IT_FEAT_MASK
) {
3295 u32 feat_mask
= l2cap_feat_mask
;
3296 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3297 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3298 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3300 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3302 put_unaligned_le32(feat_mask
, rsp
->data
);
3303 l2cap_send_cmd(conn
, cmd
->ident
,
3304 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3305 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3307 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3308 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3309 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3310 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3311 l2cap_send_cmd(conn
, cmd
->ident
,
3312 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3314 struct l2cap_info_rsp rsp
;
3315 rsp
.type
= cpu_to_le16(type
);
3316 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3317 l2cap_send_cmd(conn
, cmd
->ident
,
3318 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3324 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3326 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3329 type
= __le16_to_cpu(rsp
->type
);
3330 result
= __le16_to_cpu(rsp
->result
);
3332 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3334 del_timer(&conn
->info_timer
);
3336 if (type
== L2CAP_IT_FEAT_MASK
) {
3337 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3339 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3340 struct l2cap_info_req req
;
3341 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3343 conn
->info_ident
= l2cap_get_ident(conn
);
3345 l2cap_send_cmd(conn
, conn
->info_ident
,
3346 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3348 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3349 conn
->info_ident
= 0;
3351 l2cap_conn_start(conn
);
3353 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3354 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3355 conn
->info_ident
= 0;
3357 l2cap_conn_start(conn
);
3363 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3365 u8
*data
= skb
->data
;
3367 struct l2cap_cmd_hdr cmd
;
3370 l2cap_raw_recv(conn
, skb
);
3372 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3374 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3375 data
+= L2CAP_CMD_HDR_SIZE
;
3376 len
-= L2CAP_CMD_HDR_SIZE
;
3378 cmd_len
= le16_to_cpu(cmd
.len
);
3380 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3382 if (cmd_len
> len
|| !cmd
.ident
) {
3383 BT_DBG("corrupted command");
3388 case L2CAP_COMMAND_REJ
:
3389 l2cap_command_rej(conn
, &cmd
, data
);
3392 case L2CAP_CONN_REQ
:
3393 err
= l2cap_connect_req(conn
, &cmd
, data
);
3396 case L2CAP_CONN_RSP
:
3397 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3400 case L2CAP_CONF_REQ
:
3401 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3404 case L2CAP_CONF_RSP
:
3405 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3408 case L2CAP_DISCONN_REQ
:
3409 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3412 case L2CAP_DISCONN_RSP
:
3413 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3416 case L2CAP_ECHO_REQ
:
3417 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3420 case L2CAP_ECHO_RSP
:
3423 case L2CAP_INFO_REQ
:
3424 err
= l2cap_information_req(conn
, &cmd
, data
);
3427 case L2CAP_INFO_RSP
:
3428 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3432 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3438 struct l2cap_cmd_rej rej
;
3439 BT_DBG("error %d", err
);
3441 /* FIXME: Map err to a valid reason */
3442 rej
.reason
= cpu_to_le16(0);
3443 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3453 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3455 u16 our_fcs
, rcv_fcs
;
3456 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3458 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3459 skb_trim(skb
, skb
->len
- 2);
3460 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3461 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3463 if (our_fcs
!= rcv_fcs
)
3469 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3471 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3474 pi
->frames_sent
= 0;
3476 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3478 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3479 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3480 l2cap_send_sframe(pi
, control
);
3481 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3484 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3485 l2cap_retransmit_frames(sk
);
3487 l2cap_ertm_send(sk
);
3489 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3490 pi
->frames_sent
== 0) {
3491 control
|= L2CAP_SUPER_RCV_READY
;
3492 l2cap_send_sframe(pi
, control
);
3496 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3498 struct sk_buff
*next_skb
;
3499 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3500 int tx_seq_offset
, next_tx_seq_offset
;
3502 bt_cb(skb
)->tx_seq
= tx_seq
;
3503 bt_cb(skb
)->sar
= sar
;
3505 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3507 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3511 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3512 if (tx_seq_offset
< 0)
3513 tx_seq_offset
+= 64;
3516 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3519 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3520 pi
->buffer_seq
) % 64;
3521 if (next_tx_seq_offset
< 0)
3522 next_tx_seq_offset
+= 64;
3524 if (next_tx_seq_offset
> tx_seq_offset
) {
3525 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3529 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3532 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3534 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3539 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3541 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3542 struct sk_buff
*_skb
;
3545 switch (control
& L2CAP_CTRL_SAR
) {
3546 case L2CAP_SDU_UNSEGMENTED
:
3547 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3550 err
= sock_queue_rcv_skb(sk
, skb
);
3556 case L2CAP_SDU_START
:
3557 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3560 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3562 if (pi
->sdu_len
> pi
->imtu
)
3565 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3569 /* pull sdu_len bytes only after alloc, because of Local Busy
3570 * condition we have to be sure that this will be executed
3571 * only once, i.e., when alloc does not fail */
3574 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3576 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3577 pi
->partial_sdu_len
= skb
->len
;
3580 case L2CAP_SDU_CONTINUE
:
3581 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3587 pi
->partial_sdu_len
+= skb
->len
;
3588 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3591 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3596 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3602 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3603 pi
->partial_sdu_len
+= skb
->len
;
3605 if (pi
->partial_sdu_len
> pi
->imtu
)
3608 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3611 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3614 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3616 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3620 err
= sock_queue_rcv_skb(sk
, _skb
);
3623 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3627 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3628 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3642 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3647 static int l2cap_try_push_rx_skb(struct sock
*sk
)
3649 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3650 struct sk_buff
*skb
;
3654 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3655 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3656 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3658 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3662 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3665 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3668 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3669 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3670 l2cap_send_sframe(pi
, control
);
3671 l2cap_pi(sk
)->retry_count
= 1;
3673 del_timer(&pi
->retrans_timer
);
3674 __mod_monitor_timer();
3676 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3679 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3680 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3682 BT_DBG("sk %p, Exit local busy", sk
);
3687 static void l2cap_busy_work(struct work_struct
*work
)
3689 DECLARE_WAITQUEUE(wait
, current
);
3690 struct l2cap_pinfo
*pi
=
3691 container_of(work
, struct l2cap_pinfo
, busy_work
);
3692 struct sock
*sk
= (struct sock
*)pi
;
3693 int n_tries
= 0, timeo
= HZ
/5, err
;
3694 struct sk_buff
*skb
;
3698 add_wait_queue(sk_sleep(sk
), &wait
);
3699 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3700 set_current_state(TASK_INTERRUPTIBLE
);
3702 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3704 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3711 if (signal_pending(current
)) {
3712 err
= sock_intr_errno(timeo
);
3717 timeo
= schedule_timeout(timeo
);
3720 err
= sock_error(sk
);
3724 if (l2cap_try_push_rx_skb(sk
) == 0)
3728 set_current_state(TASK_RUNNING
);
3729 remove_wait_queue(sk_sleep(sk
), &wait
);
3734 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3736 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3739 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3740 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3741 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3742 return l2cap_try_push_rx_skb(sk
);
3747 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3749 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3753 /* Busy Condition */
3754 BT_DBG("sk %p, Enter local busy", sk
);
3756 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3757 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3758 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3760 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3761 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3762 l2cap_send_sframe(pi
, sctrl
);
3764 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3766 del_timer(&pi
->ack_timer
);
3768 queue_work(_busy_wq
, &pi
->busy_work
);
3773 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3775 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3776 struct sk_buff
*_skb
;
3780 * TODO: We have to notify the userland if some data is lost with the
3784 switch (control
& L2CAP_CTRL_SAR
) {
3785 case L2CAP_SDU_UNSEGMENTED
:
3786 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3791 err
= sock_queue_rcv_skb(sk
, skb
);
3797 case L2CAP_SDU_START
:
3798 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3803 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3806 if (pi
->sdu_len
> pi
->imtu
) {
3811 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3817 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3819 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3820 pi
->partial_sdu_len
= skb
->len
;
3824 case L2CAP_SDU_CONTINUE
:
3825 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3828 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3830 pi
->partial_sdu_len
+= skb
->len
;
3831 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3839 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3842 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3844 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3845 pi
->partial_sdu_len
+= skb
->len
;
3847 if (pi
->partial_sdu_len
> pi
->imtu
)
3850 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3851 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3852 err
= sock_queue_rcv_skb(sk
, _skb
);
3867 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3869 struct sk_buff
*skb
;
3872 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3873 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3876 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3877 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3878 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3879 l2cap_pi(sk
)->buffer_seq_srej
=
3880 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3881 tx_seq
= (tx_seq
+ 1) % 64;
3885 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3887 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3888 struct srej_list
*l
, *tmp
;
3891 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3892 if (l
->tx_seq
== tx_seq
) {
3897 control
= L2CAP_SUPER_SELECT_REJECT
;
3898 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3899 l2cap_send_sframe(pi
, control
);
3901 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3905 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3907 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3908 struct srej_list
*new;
3911 while (tx_seq
!= pi
->expected_tx_seq
) {
3912 control
= L2CAP_SUPER_SELECT_REJECT
;
3913 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3914 l2cap_send_sframe(pi
, control
);
3916 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3917 new->tx_seq
= pi
->expected_tx_seq
;
3918 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3919 list_add_tail(&new->list
, SREJ_LIST(sk
));
3921 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3924 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3926 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3927 u8 tx_seq
= __get_txseq(rx_control
);
3928 u8 req_seq
= __get_reqseq(rx_control
);
3929 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3930 int tx_seq_offset
, expected_tx_seq_offset
;
3931 int num_to_ack
= (pi
->tx_win
/6) + 1;
3934 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3937 if (L2CAP_CTRL_FINAL
& rx_control
&&
3938 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3939 del_timer(&pi
->monitor_timer
);
3940 if (pi
->unacked_frames
> 0)
3941 __mod_retrans_timer();
3942 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3945 pi
->expected_ack_seq
= req_seq
;
3946 l2cap_drop_acked_frames(sk
);
3948 if (tx_seq
== pi
->expected_tx_seq
)
3951 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3952 if (tx_seq_offset
< 0)
3953 tx_seq_offset
+= 64;
3955 /* invalid tx_seq */
3956 if (tx_seq_offset
>= pi
->tx_win
) {
3957 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3961 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3964 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3965 struct srej_list
*first
;
3967 first
= list_first_entry(SREJ_LIST(sk
),
3968 struct srej_list
, list
);
3969 if (tx_seq
== first
->tx_seq
) {
3970 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3971 l2cap_check_srej_gap(sk
, tx_seq
);
3973 list_del(&first
->list
);
3976 if (list_empty(SREJ_LIST(sk
))) {
3977 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3978 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3980 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3983 struct srej_list
*l
;
3985 /* duplicated tx_seq */
3986 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3989 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3990 if (l
->tx_seq
== tx_seq
) {
3991 l2cap_resend_srejframe(sk
, tx_seq
);
3995 l2cap_send_srejframe(sk
, tx_seq
);
3998 expected_tx_seq_offset
=
3999 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
4000 if (expected_tx_seq_offset
< 0)
4001 expected_tx_seq_offset
+= 64;
4003 /* duplicated tx_seq */
4004 if (tx_seq_offset
< expected_tx_seq_offset
)
4007 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
4009 BT_DBG("sk %p, Enter SREJ", sk
);
4011 INIT_LIST_HEAD(SREJ_LIST(sk
));
4012 pi
->buffer_seq_srej
= pi
->buffer_seq
;
4014 __skb_queue_head_init(SREJ_QUEUE(sk
));
4015 __skb_queue_head_init(BUSY_QUEUE(sk
));
4016 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
4018 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
4020 l2cap_send_srejframe(sk
, tx_seq
);
4022 del_timer(&pi
->ack_timer
);
4027 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4029 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4030 bt_cb(skb
)->tx_seq
= tx_seq
;
4031 bt_cb(skb
)->sar
= sar
;
4032 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
4036 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
4040 if (rx_control
& L2CAP_CTRL_FINAL
) {
4041 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4042 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4044 l2cap_retransmit_frames(sk
);
4049 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
4050 if (pi
->num_acked
== num_to_ack
- 1)
4060 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4062 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4064 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4067 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4068 l2cap_drop_acked_frames(sk
);
4070 if (rx_control
& L2CAP_CTRL_POLL
) {
4071 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4072 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4073 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4074 (pi
->unacked_frames
> 0))
4075 __mod_retrans_timer();
4077 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4078 l2cap_send_srejtail(sk
);
4080 l2cap_send_i_or_rr_or_rnr(sk
);
4083 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4084 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4086 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4087 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4089 l2cap_retransmit_frames(sk
);
4092 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4093 (pi
->unacked_frames
> 0))
4094 __mod_retrans_timer();
4096 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4097 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4100 l2cap_ertm_send(sk
);
4105 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4107 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4108 u8 tx_seq
= __get_reqseq(rx_control
);
4110 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4112 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4114 pi
->expected_ack_seq
= tx_seq
;
4115 l2cap_drop_acked_frames(sk
);
4117 if (rx_control
& L2CAP_CTRL_FINAL
) {
4118 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4119 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4121 l2cap_retransmit_frames(sk
);
4123 l2cap_retransmit_frames(sk
);
4125 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4126 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4129 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4131 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4132 u8 tx_seq
= __get_reqseq(rx_control
);
4134 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4136 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4138 if (rx_control
& L2CAP_CTRL_POLL
) {
4139 pi
->expected_ack_seq
= tx_seq
;
4140 l2cap_drop_acked_frames(sk
);
4142 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4143 l2cap_retransmit_one_frame(sk
, tx_seq
);
4145 l2cap_ertm_send(sk
);
4147 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4148 pi
->srej_save_reqseq
= tx_seq
;
4149 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4151 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4152 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4153 pi
->srej_save_reqseq
== tx_seq
)
4154 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4156 l2cap_retransmit_one_frame(sk
, tx_seq
);
4158 l2cap_retransmit_one_frame(sk
, tx_seq
);
4159 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4160 pi
->srej_save_reqseq
= tx_seq
;
4161 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4166 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4168 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4169 u8 tx_seq
= __get_reqseq(rx_control
);
4171 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4173 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4174 pi
->expected_ack_seq
= tx_seq
;
4175 l2cap_drop_acked_frames(sk
);
4177 if (rx_control
& L2CAP_CTRL_POLL
)
4178 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4180 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4181 del_timer(&pi
->retrans_timer
);
4182 if (rx_control
& L2CAP_CTRL_POLL
)
4183 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4187 if (rx_control
& L2CAP_CTRL_POLL
)
4188 l2cap_send_srejtail(sk
);
4190 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4193 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4195 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4197 if (L2CAP_CTRL_FINAL
& rx_control
&&
4198 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4199 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4200 if (l2cap_pi(sk
)->unacked_frames
> 0)
4201 __mod_retrans_timer();
4202 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4205 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4206 case L2CAP_SUPER_RCV_READY
:
4207 l2cap_data_channel_rrframe(sk
, rx_control
);
4210 case L2CAP_SUPER_REJECT
:
4211 l2cap_data_channel_rejframe(sk
, rx_control
);
4214 case L2CAP_SUPER_SELECT_REJECT
:
4215 l2cap_data_channel_srejframe(sk
, rx_control
);
4218 case L2CAP_SUPER_RCV_NOT_READY
:
4219 l2cap_data_channel_rnrframe(sk
, rx_control
);
4227 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
4229 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4232 int len
, next_tx_seq_offset
, req_seq_offset
;
4234 control
= get_unaligned_le16(skb
->data
);
4239 * We can just drop the corrupted I-frame here.
4240 * Receiver will miss it and start proper recovery
4241 * procedures and ask retransmission.
4243 if (l2cap_check_fcs(pi
, skb
))
4246 if (__is_sar_start(control
) && __is_iframe(control
))
4249 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4252 if (len
> pi
->mps
) {
4253 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4257 req_seq
= __get_reqseq(control
);
4258 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4259 if (req_seq_offset
< 0)
4260 req_seq_offset
+= 64;
4262 next_tx_seq_offset
=
4263 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4264 if (next_tx_seq_offset
< 0)
4265 next_tx_seq_offset
+= 64;
4267 /* check for invalid req-seq */
4268 if (req_seq_offset
> next_tx_seq_offset
) {
4269 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4273 if (__is_iframe(control
)) {
4275 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4279 l2cap_data_channel_iframe(sk
, control
, skb
);
4283 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4287 l2cap_data_channel_sframe(sk
, control
, skb
);
4297 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4300 struct l2cap_pinfo
*pi
;
4305 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4307 BT_DBG("unknown cid 0x%4.4x", cid
);
4313 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4315 if (sk
->sk_state
!= BT_CONNECTED
)
4319 case L2CAP_MODE_BASIC
:
4320 /* If socket recv buffers overflows we drop data here
4321 * which is *bad* because L2CAP has to be reliable.
4322 * But we don't have any other choice. L2CAP doesn't
4323 * provide flow control mechanism. */
4325 if (pi
->imtu
< skb
->len
)
4328 if (!sock_queue_rcv_skb(sk
, skb
))
4332 case L2CAP_MODE_ERTM
:
4333 if (!sock_owned_by_user(sk
)) {
4334 l2cap_ertm_data_rcv(sk
, skb
);
4336 if (sk_add_backlog(sk
, skb
))
4342 case L2CAP_MODE_STREAMING
:
4343 control
= get_unaligned_le16(skb
->data
);
4347 if (l2cap_check_fcs(pi
, skb
))
4350 if (__is_sar_start(control
))
4353 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4356 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4359 tx_seq
= __get_txseq(control
);
4361 if (pi
->expected_tx_seq
== tx_seq
)
4362 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4364 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4366 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4371 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4385 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4389 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4393 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4395 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4398 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4401 if (!sock_queue_rcv_skb(sk
, skb
))
4413 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4415 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4419 skb_pull(skb
, L2CAP_HDR_SIZE
);
4420 cid
= __le16_to_cpu(lh
->cid
);
4421 len
= __le16_to_cpu(lh
->len
);
4423 if (len
!= skb
->len
) {
4428 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4431 case L2CAP_CID_SIGNALING
:
4432 l2cap_sig_channel(conn
, skb
);
4435 case L2CAP_CID_CONN_LESS
:
4436 psm
= get_unaligned_le16(skb
->data
);
4438 l2cap_conless_channel(conn
, psm
, skb
);
4442 l2cap_data_channel(conn
, cid
, skb
);
4447 /* ---- L2CAP interface with lower layer (HCI) ---- */
4449 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4451 int exact
= 0, lm1
= 0, lm2
= 0;
4452 register struct sock
*sk
;
4453 struct hlist_node
*node
;
4455 if (type
!= ACL_LINK
)
4458 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4460 /* Find listening sockets and check their link_mode */
4461 read_lock(&l2cap_sk_list
.lock
);
4462 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4463 if (sk
->sk_state
!= BT_LISTEN
)
4466 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4467 lm1
|= HCI_LM_ACCEPT
;
4468 if (l2cap_pi(sk
)->role_switch
)
4469 lm1
|= HCI_LM_MASTER
;
4471 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4472 lm2
|= HCI_LM_ACCEPT
;
4473 if (l2cap_pi(sk
)->role_switch
)
4474 lm2
|= HCI_LM_MASTER
;
4477 read_unlock(&l2cap_sk_list
.lock
);
4479 return exact
? lm1
: lm2
;
4482 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4484 struct l2cap_conn
*conn
;
4486 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4488 if (hcon
->type
!= ACL_LINK
)
4492 conn
= l2cap_conn_add(hcon
, status
);
4494 l2cap_conn_ready(conn
);
4496 l2cap_conn_del(hcon
, bt_err(status
));
4501 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4503 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4505 BT_DBG("hcon %p", hcon
);
4507 if (hcon
->type
!= ACL_LINK
|| !conn
)
4510 return conn
->disc_reason
;
4513 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4515 BT_DBG("hcon %p reason %d", hcon
, reason
);
4517 if (hcon
->type
!= ACL_LINK
)
4520 l2cap_conn_del(hcon
, bt_err(reason
));
4525 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4527 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4530 if (encrypt
== 0x00) {
4531 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4532 l2cap_sock_clear_timer(sk
);
4533 l2cap_sock_set_timer(sk
, HZ
* 5);
4534 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4535 __l2cap_sock_close(sk
, ECONNREFUSED
);
4537 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4538 l2cap_sock_clear_timer(sk
);
4542 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4544 struct l2cap_chan_list
*l
;
4545 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4551 l
= &conn
->chan_list
;
4553 BT_DBG("conn %p", conn
);
4555 read_lock(&l
->lock
);
4557 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4560 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4565 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4566 sk
->sk_state
== BT_CONFIG
)) {
4567 l2cap_check_encryption(sk
, encrypt
);
4572 if (sk
->sk_state
== BT_CONNECT
) {
4574 struct l2cap_conn_req req
;
4575 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4576 req
.psm
= l2cap_pi(sk
)->psm
;
4578 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4579 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4581 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4582 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4584 l2cap_sock_clear_timer(sk
);
4585 l2cap_sock_set_timer(sk
, HZ
/ 10);
4587 } else if (sk
->sk_state
== BT_CONNECT2
) {
4588 struct l2cap_conn_rsp rsp
;
4592 sk
->sk_state
= BT_CONFIG
;
4593 result
= L2CAP_CR_SUCCESS
;
4595 sk
->sk_state
= BT_DISCONN
;
4596 l2cap_sock_set_timer(sk
, HZ
/ 10);
4597 result
= L2CAP_CR_SEC_BLOCK
;
4600 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4601 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4602 rsp
.result
= cpu_to_le16(result
);
4603 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4604 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4605 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4611 read_unlock(&l
->lock
);
4616 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4618 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4620 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4623 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4625 if (flags
& ACL_START
) {
4626 struct l2cap_hdr
*hdr
;
4630 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4631 kfree_skb(conn
->rx_skb
);
4632 conn
->rx_skb
= NULL
;
4634 l2cap_conn_unreliable(conn
, ECOMM
);
4638 BT_ERR("Frame is too short (len %d)", skb
->len
);
4639 l2cap_conn_unreliable(conn
, ECOMM
);
4643 hdr
= (struct l2cap_hdr
*) skb
->data
;
4644 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4646 if (len
== skb
->len
) {
4647 /* Complete frame received */
4648 l2cap_recv_frame(conn
, skb
);
4652 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4654 if (skb
->len
> len
) {
4655 BT_ERR("Frame is too long (len %d, expected len %d)",
4657 l2cap_conn_unreliable(conn
, ECOMM
);
4661 /* Allocate skb for the complete frame (with header) */
4662 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4666 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4668 conn
->rx_len
= len
- skb
->len
;
4670 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4672 if (!conn
->rx_len
) {
4673 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4674 l2cap_conn_unreliable(conn
, ECOMM
);
4678 if (skb
->len
> conn
->rx_len
) {
4679 BT_ERR("Fragment is too long (len %d, expected %d)",
4680 skb
->len
, conn
->rx_len
);
4681 kfree_skb(conn
->rx_skb
);
4682 conn
->rx_skb
= NULL
;
4684 l2cap_conn_unreliable(conn
, ECOMM
);
4688 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4690 conn
->rx_len
-= skb
->len
;
4692 if (!conn
->rx_len
) {
4693 /* Complete frame received */
4694 l2cap_recv_frame(conn
, conn
->rx_skb
);
4695 conn
->rx_skb
= NULL
;
4704 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4707 struct hlist_node
*node
;
4709 read_lock_bh(&l2cap_sk_list
.lock
);
4711 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4712 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4714 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4715 batostr(&bt_sk(sk
)->src
),
4716 batostr(&bt_sk(sk
)->dst
),
4717 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4719 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4722 read_unlock_bh(&l2cap_sk_list
.lock
);
4727 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4729 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4732 static const struct file_operations l2cap_debugfs_fops
= {
4733 .open
= l2cap_debugfs_open
,
4735 .llseek
= seq_lseek
,
4736 .release
= single_release
,
4739 static struct dentry
*l2cap_debugfs
;
4741 static const struct proto_ops l2cap_sock_ops
= {
4742 .family
= PF_BLUETOOTH
,
4743 .owner
= THIS_MODULE
,
4744 .release
= l2cap_sock_release
,
4745 .bind
= l2cap_sock_bind
,
4746 .connect
= l2cap_sock_connect
,
4747 .listen
= l2cap_sock_listen
,
4748 .accept
= l2cap_sock_accept
,
4749 .getname
= l2cap_sock_getname
,
4750 .sendmsg
= l2cap_sock_sendmsg
,
4751 .recvmsg
= l2cap_sock_recvmsg
,
4752 .poll
= bt_sock_poll
,
4753 .ioctl
= bt_sock_ioctl
,
4754 .mmap
= sock_no_mmap
,
4755 .socketpair
= sock_no_socketpair
,
4756 .shutdown
= l2cap_sock_shutdown
,
4757 .setsockopt
= l2cap_sock_setsockopt
,
4758 .getsockopt
= l2cap_sock_getsockopt
4761 static const struct net_proto_family l2cap_sock_family_ops
= {
4762 .family
= PF_BLUETOOTH
,
4763 .owner
= THIS_MODULE
,
4764 .create
= l2cap_sock_create
,
4767 static struct hci_proto l2cap_hci_proto
= {
4769 .id
= HCI_PROTO_L2CAP
,
4770 .connect_ind
= l2cap_connect_ind
,
4771 .connect_cfm
= l2cap_connect_cfm
,
4772 .disconn_ind
= l2cap_disconn_ind
,
4773 .disconn_cfm
= l2cap_disconn_cfm
,
4774 .security_cfm
= l2cap_security_cfm
,
4775 .recv_acldata
= l2cap_recv_acldata
4778 static int __init
l2cap_init(void)
4782 err
= proto_register(&l2cap_proto
, 0);
4786 _busy_wq
= create_singlethread_workqueue("l2cap");
4790 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4792 BT_ERR("L2CAP socket registration failed");
4796 err
= hci_register_proto(&l2cap_hci_proto
);
4798 BT_ERR("L2CAP protocol registration failed");
4799 bt_sock_unregister(BTPROTO_L2CAP
);
4804 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4805 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4807 BT_ERR("Failed to create L2CAP debug file");
4810 BT_INFO("L2CAP ver %s", VERSION
);
4811 BT_INFO("L2CAP socket layer initialized");
4816 proto_unregister(&l2cap_proto
);
4820 static void __exit
l2cap_exit(void)
4822 debugfs_remove(l2cap_debugfs
);
4824 flush_workqueue(_busy_wq
);
4825 destroy_workqueue(_busy_wq
);
4827 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4828 BT_ERR("L2CAP socket unregistration failed");
4830 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4831 BT_ERR("L2CAP protocol unregistration failed");
4833 proto_unregister(&l2cap_proto
);
4836 void l2cap_load(void)
4838 /* Dummy function to trigger automatic L2CAP module loading by
4839 * other modules that use L2CAP sockets but don't use any other
4840 * symbols from it. */
4842 EXPORT_SYMBOL(l2cap_load
);
4844 module_init(l2cap_init
);
4845 module_exit(l2cap_exit
);
4847 module_param(enable_ertm
, bool, 0644);
4848 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4850 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4851 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4852 MODULE_VERSION(VERSION
);
4853 MODULE_LICENSE("GPL");
4854 MODULE_ALIAS("bt-proto-0");