2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm
= 0;
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops
;
65 static struct workqueue_struct
*_busy_wq
;
67 static struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
74 static void l2cap_sock_close(struct sock
*sk
);
75 static void l2cap_sock_kill(struct sock
*sk
);
77 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
78 u8 code
, u8 ident
, u16 dlen
, void *data
);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg
)
83 struct sock
*sk
= (struct sock
*) arg
;
86 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
90 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
91 reason
= ECONNREFUSED
;
92 else if (sk
->sk_state
== BT_CONNECT
&&
93 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
94 reason
= ECONNREFUSED
;
98 __l2cap_sock_close(sk
, reason
);
106 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
108 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
109 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
112 static void l2cap_sock_clear_timer(struct sock
*sk
)
114 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
115 sk_stop_timer(sk
, &sk
->sk_timer
);
118 /* ---- L2CAP channels ---- */
119 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
122 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
123 if (l2cap_pi(s
)->dcid
== cid
)
129 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
132 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
133 if (l2cap_pi(s
)->scid
== cid
)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
145 s
= __l2cap_get_chan_by_scid(l
, cid
);
148 read_unlock(&l
->lock
);
152 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
155 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
156 if (l2cap_pi(s
)->ident
== ident
)
162 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
166 s
= __l2cap_get_chan_by_ident(l
, ident
);
169 read_unlock(&l
->lock
);
173 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
175 u16 cid
= L2CAP_CID_DYN_START
;
177 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
178 if (!__l2cap_get_chan_by_scid(l
, cid
))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
190 l2cap_pi(l
->head
)->prev_c
= sk
;
192 l2cap_pi(sk
)->next_c
= l
->head
;
193 l2cap_pi(sk
)->prev_c
= NULL
;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
199 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
201 write_lock_bh(&l
->lock
);
206 l2cap_pi(next
)->prev_c
= prev
;
208 l2cap_pi(prev
)->next_c
= next
;
209 write_unlock_bh(&l
->lock
);
214 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
216 struct l2cap_chan_list
*l
= &conn
->chan_list
;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
219 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
221 conn
->disc_reason
= 0x13;
223 l2cap_pi(sk
)->conn
= conn
;
225 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
228 } else if (sk
->sk_type
== SOCK_DGRAM
) {
229 /* Connectionless socket */
230 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
231 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
232 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
236 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
237 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
240 __l2cap_chan_link(l
, sk
);
243 bt_accept_enqueue(parent
, sk
);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock
*sk
, int err
)
250 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
251 struct sock
*parent
= bt_sk(sk
)->parent
;
253 l2cap_sock_clear_timer(sk
);
255 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn
->chan_list
, sk
);
260 l2cap_pi(sk
)->conn
= NULL
;
261 hci_conn_put(conn
->hcon
);
264 sk
->sk_state
= BT_CLOSED
;
265 sock_set_flag(sk
, SOCK_ZAPPED
);
271 bt_accept_unlink(sk
);
272 parent
->sk_data_ready(parent
, 0);
274 sk
->sk_state_change(sk
);
276 skb_queue_purge(TX_QUEUE(sk
));
278 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
279 struct srej_list
*l
, *tmp
;
281 del_timer(&l2cap_pi(sk
)->retrans_timer
);
282 del_timer(&l2cap_pi(sk
)->monitor_timer
);
283 del_timer(&l2cap_pi(sk
)->ack_timer
);
285 skb_queue_purge(SREJ_QUEUE(sk
));
286 skb_queue_purge(BUSY_QUEUE(sk
));
288 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock
*sk
)
298 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
301 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
303 auth_type
= HCI_AT_NO_BONDING_MITM
;
305 auth_type
= HCI_AT_NO_BONDING
;
307 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
308 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
310 switch (l2cap_pi(sk
)->sec_level
) {
311 case BT_SECURITY_HIGH
:
312 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
314 case BT_SECURITY_MEDIUM
:
315 auth_type
= HCI_AT_GENERAL_BONDING
;
318 auth_type
= HCI_AT_NO_BONDING
;
323 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
327 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn
->lock
);
339 if (++conn
->tx_ident
> 128)
344 spin_unlock_bh(&conn
->lock
);
349 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
351 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
353 BT_DBG("code 0x%2.2x", code
);
358 hci_send_acl(conn
->hcon
, skb
, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
364 struct l2cap_hdr
*lh
;
365 struct l2cap_conn
*conn
= pi
->conn
;
366 struct sock
*sk
= (struct sock
*)pi
;
367 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
369 if (sk
->sk_state
!= BT_CONNECTED
)
372 if (pi
->fcs
== L2CAP_FCS_CRC16
)
375 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
377 count
= min_t(unsigned int, conn
->mtu
, hlen
);
378 control
|= L2CAP_CTRL_FRAME_TYPE
;
380 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
381 control
|= L2CAP_CTRL_FINAL
;
382 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
385 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
386 control
|= L2CAP_CTRL_POLL
;
387 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
390 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
394 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
395 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
396 lh
->cid
= cpu_to_le16(pi
->dcid
);
397 put_unaligned_le16(control
, skb_put(skb
, 2));
399 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
400 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
401 put_unaligned_le16(fcs
, skb_put(skb
, 2));
404 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
409 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
410 control
|= L2CAP_SUPER_RCV_NOT_READY
;
411 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
413 control
|= L2CAP_SUPER_RCV_READY
;
415 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
417 l2cap_send_sframe(pi
, control
);
420 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
422 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
425 static void l2cap_do_start(struct sock
*sk
)
427 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
429 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
430 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
433 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
434 struct l2cap_conn_req req
;
435 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
436 req
.psm
= l2cap_pi(sk
)->psm
;
438 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
439 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
441 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
442 L2CAP_CONN_REQ
, sizeof(req
), &req
);
445 struct l2cap_info_req req
;
446 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
448 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
449 conn
->info_ident
= l2cap_get_ident(conn
);
451 mod_timer(&conn
->info_timer
, jiffies
+
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
454 l2cap_send_cmd(conn
, conn
->info_ident
,
455 L2CAP_INFO_REQ
, sizeof(req
), &req
);
459 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
)
461 struct l2cap_disconn_req req
;
466 skb_queue_purge(TX_QUEUE(sk
));
468 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
469 del_timer(&l2cap_pi(sk
)->retrans_timer
);
470 del_timer(&l2cap_pi(sk
)->monitor_timer
);
471 del_timer(&l2cap_pi(sk
)->ack_timer
);
474 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
475 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
476 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
477 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
479 sk
->sk_state
= BT_DISCONN
;
482 /* ---- L2CAP connections ---- */
483 static void l2cap_conn_start(struct l2cap_conn
*conn
)
485 struct l2cap_chan_list
*l
= &conn
->chan_list
;
488 BT_DBG("conn %p", conn
);
492 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
495 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
496 sk
->sk_type
!= SOCK_STREAM
) {
501 if (sk
->sk_state
== BT_CONNECT
) {
502 if (l2cap_check_security(sk
) &&
503 __l2cap_no_conn_pending(sk
)) {
504 struct l2cap_conn_req req
;
505 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
506 req
.psm
= l2cap_pi(sk
)->psm
;
508 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
509 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
511 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
512 L2CAP_CONN_REQ
, sizeof(req
), &req
);
514 } else if (sk
->sk_state
== BT_CONNECT2
) {
515 struct l2cap_conn_rsp rsp
;
516 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
517 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
519 if (l2cap_check_security(sk
)) {
520 if (bt_sk(sk
)->defer_setup
) {
521 struct sock
*parent
= bt_sk(sk
)->parent
;
522 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
523 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
524 parent
->sk_data_ready(parent
, 0);
527 sk
->sk_state
= BT_CONFIG
;
528 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
529 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
532 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
533 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
536 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
537 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
543 read_unlock(&l
->lock
);
546 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
548 struct l2cap_chan_list
*l
= &conn
->chan_list
;
551 BT_DBG("conn %p", conn
);
555 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
558 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
559 sk
->sk_type
!= SOCK_STREAM
) {
560 l2cap_sock_clear_timer(sk
);
561 sk
->sk_state
= BT_CONNECTED
;
562 sk
->sk_state_change(sk
);
563 } else if (sk
->sk_state
== BT_CONNECT
)
569 read_unlock(&l
->lock
);
572 /* Notify sockets that we cannot guaranty reliability anymore */
573 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
575 struct l2cap_chan_list
*l
= &conn
->chan_list
;
578 BT_DBG("conn %p", conn
);
582 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
583 if (l2cap_pi(sk
)->force_reliable
)
587 read_unlock(&l
->lock
);
590 static void l2cap_info_timeout(unsigned long arg
)
592 struct l2cap_conn
*conn
= (void *) arg
;
594 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
595 conn
->info_ident
= 0;
597 l2cap_conn_start(conn
);
600 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
602 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
607 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
611 hcon
->l2cap_data
= conn
;
614 BT_DBG("hcon %p conn %p", hcon
, conn
);
616 conn
->mtu
= hcon
->hdev
->acl_mtu
;
617 conn
->src
= &hcon
->hdev
->bdaddr
;
618 conn
->dst
= &hcon
->dst
;
622 spin_lock_init(&conn
->lock
);
623 rwlock_init(&conn
->chan_list
.lock
);
625 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
626 (unsigned long) conn
);
628 conn
->disc_reason
= 0x13;
633 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
635 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
641 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
643 kfree_skb(conn
->rx_skb
);
646 while ((sk
= conn
->chan_list
.head
)) {
648 l2cap_chan_del(sk
, err
);
653 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
654 del_timer_sync(&conn
->info_timer
);
656 hcon
->l2cap_data
= NULL
;
660 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
662 struct l2cap_chan_list
*l
= &conn
->chan_list
;
663 write_lock_bh(&l
->lock
);
664 __l2cap_chan_add(conn
, sk
, parent
);
665 write_unlock_bh(&l
->lock
);
668 /* ---- Socket interface ---- */
669 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
672 struct hlist_node
*node
;
673 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
674 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
681 /* Find socket with psm and source bdaddr.
682 * Returns closest match.
684 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
686 struct sock
*sk
= NULL
, *sk1
= NULL
;
687 struct hlist_node
*node
;
689 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
690 if (state
&& sk
->sk_state
!= state
)
693 if (l2cap_pi(sk
)->psm
== psm
) {
695 if (!bacmp(&bt_sk(sk
)->src
, src
))
699 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
703 return node
? sk
: sk1
;
706 /* Find socket with given address (psm, src).
707 * Returns locked socket */
708 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
711 read_lock(&l2cap_sk_list
.lock
);
712 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
715 read_unlock(&l2cap_sk_list
.lock
);
719 static void l2cap_sock_destruct(struct sock
*sk
)
723 skb_queue_purge(&sk
->sk_receive_queue
);
724 skb_queue_purge(&sk
->sk_write_queue
);
727 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
731 BT_DBG("parent %p", parent
);
733 /* Close not yet accepted channels */
734 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
735 l2cap_sock_close(sk
);
737 parent
->sk_state
= BT_CLOSED
;
738 sock_set_flag(parent
, SOCK_ZAPPED
);
741 /* Kill socket (only if zapped and orphan)
742 * Must be called on unlocked socket.
744 static void l2cap_sock_kill(struct sock
*sk
)
746 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
749 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
751 /* Kill poor orphan */
752 bt_sock_unlink(&l2cap_sk_list
, sk
);
753 sock_set_flag(sk
, SOCK_DEAD
);
757 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
759 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
761 switch (sk
->sk_state
) {
763 l2cap_sock_cleanup_listen(sk
);
768 if (sk
->sk_type
== SOCK_SEQPACKET
||
769 sk
->sk_type
== SOCK_STREAM
) {
770 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
772 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
773 l2cap_send_disconn_req(conn
, sk
);
775 l2cap_chan_del(sk
, reason
);
779 if (sk
->sk_type
== SOCK_SEQPACKET
||
780 sk
->sk_type
== SOCK_STREAM
) {
781 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
782 struct l2cap_conn_rsp rsp
;
785 if (bt_sk(sk
)->defer_setup
)
786 result
= L2CAP_CR_SEC_BLOCK
;
788 result
= L2CAP_CR_BAD_PSM
;
790 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
791 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
792 rsp
.result
= cpu_to_le16(result
);
793 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
794 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
795 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
797 l2cap_chan_del(sk
, reason
);
802 l2cap_chan_del(sk
, reason
);
806 sock_set_flag(sk
, SOCK_ZAPPED
);
811 /* Must be called on unlocked socket. */
812 static void l2cap_sock_close(struct sock
*sk
)
814 l2cap_sock_clear_timer(sk
);
816 __l2cap_sock_close(sk
, ECONNRESET
);
821 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
823 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
828 sk
->sk_type
= parent
->sk_type
;
829 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
831 pi
->imtu
= l2cap_pi(parent
)->imtu
;
832 pi
->omtu
= l2cap_pi(parent
)->omtu
;
833 pi
->mode
= l2cap_pi(parent
)->mode
;
834 pi
->fcs
= l2cap_pi(parent
)->fcs
;
835 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
836 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
837 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
838 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
839 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
841 pi
->imtu
= L2CAP_DEFAULT_MTU
;
843 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
)
844 pi
->mode
= L2CAP_MODE_ERTM
;
846 pi
->mode
= L2CAP_MODE_BASIC
;
847 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
848 pi
->fcs
= L2CAP_FCS_CRC16
;
849 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
850 pi
->sec_level
= BT_SECURITY_LOW
;
852 pi
->force_reliable
= 0;
855 /* Default config options */
857 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
858 skb_queue_head_init(TX_QUEUE(sk
));
859 skb_queue_head_init(SREJ_QUEUE(sk
));
860 skb_queue_head_init(BUSY_QUEUE(sk
));
861 INIT_LIST_HEAD(SREJ_LIST(sk
));
864 static struct proto l2cap_proto
= {
866 .owner
= THIS_MODULE
,
867 .obj_size
= sizeof(struct l2cap_pinfo
)
870 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
874 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
878 sock_init_data(sock
, sk
);
879 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
881 sk
->sk_destruct
= l2cap_sock_destruct
;
882 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
884 sock_reset_flag(sk
, SOCK_ZAPPED
);
886 sk
->sk_protocol
= proto
;
887 sk
->sk_state
= BT_OPEN
;
889 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
891 bt_sock_link(&l2cap_sk_list
, sk
);
895 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
900 BT_DBG("sock %p", sock
);
902 sock
->state
= SS_UNCONNECTED
;
904 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
905 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
906 return -ESOCKTNOSUPPORT
;
908 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
911 sock
->ops
= &l2cap_sock_ops
;
913 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
917 l2cap_sock_init(sk
, NULL
);
921 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
923 struct sock
*sk
= sock
->sk
;
924 struct sockaddr_l2 la
;
929 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
932 memset(&la
, 0, sizeof(la
));
933 len
= min_t(unsigned int, sizeof(la
), alen
);
934 memcpy(&la
, addr
, len
);
941 if (sk
->sk_state
!= BT_OPEN
) {
946 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
947 !capable(CAP_NET_BIND_SERVICE
)) {
952 write_lock_bh(&l2cap_sk_list
.lock
);
954 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
957 /* Save source address */
958 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
959 l2cap_pi(sk
)->psm
= la
.l2_psm
;
960 l2cap_pi(sk
)->sport
= la
.l2_psm
;
961 sk
->sk_state
= BT_BOUND
;
963 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
964 __le16_to_cpu(la
.l2_psm
) == 0x0003)
965 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
968 write_unlock_bh(&l2cap_sk_list
.lock
);
975 static int l2cap_do_connect(struct sock
*sk
)
977 bdaddr_t
*src
= &bt_sk(sk
)->src
;
978 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
979 struct l2cap_conn
*conn
;
980 struct hci_conn
*hcon
;
981 struct hci_dev
*hdev
;
985 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
988 hdev
= hci_get_route(dst
, src
);
990 return -EHOSTUNREACH
;
992 hci_dev_lock_bh(hdev
);
996 if (sk
->sk_type
== SOCK_RAW
) {
997 switch (l2cap_pi(sk
)->sec_level
) {
998 case BT_SECURITY_HIGH
:
999 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1001 case BT_SECURITY_MEDIUM
:
1002 auth_type
= HCI_AT_DEDICATED_BONDING
;
1005 auth_type
= HCI_AT_NO_BONDING
;
1008 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1009 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1010 auth_type
= HCI_AT_NO_BONDING_MITM
;
1012 auth_type
= HCI_AT_NO_BONDING
;
1014 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1015 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1017 switch (l2cap_pi(sk
)->sec_level
) {
1018 case BT_SECURITY_HIGH
:
1019 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1021 case BT_SECURITY_MEDIUM
:
1022 auth_type
= HCI_AT_GENERAL_BONDING
;
1025 auth_type
= HCI_AT_NO_BONDING
;
1030 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1031 l2cap_pi(sk
)->sec_level
, auth_type
);
1035 conn
= l2cap_conn_add(hcon
, 0);
1043 /* Update source addr of the socket */
1044 bacpy(src
, conn
->src
);
1046 l2cap_chan_add(conn
, sk
, NULL
);
1048 sk
->sk_state
= BT_CONNECT
;
1049 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1051 if (hcon
->state
== BT_CONNECTED
) {
1052 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1053 sk
->sk_type
!= SOCK_STREAM
) {
1054 l2cap_sock_clear_timer(sk
);
1055 sk
->sk_state
= BT_CONNECTED
;
1061 hci_dev_unlock_bh(hdev
);
1066 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1068 struct sock
*sk
= sock
->sk
;
1069 struct sockaddr_l2 la
;
1072 BT_DBG("sk %p", sk
);
1074 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1075 addr
->sa_family
!= AF_BLUETOOTH
)
1078 memset(&la
, 0, sizeof(la
));
1079 len
= min_t(unsigned int, sizeof(la
), alen
);
1080 memcpy(&la
, addr
, len
);
1087 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1093 switch (l2cap_pi(sk
)->mode
) {
1094 case L2CAP_MODE_BASIC
:
1096 case L2CAP_MODE_ERTM
:
1097 case L2CAP_MODE_STREAMING
:
1106 switch (sk
->sk_state
) {
1110 /* Already connecting */
1114 /* Already connected */
1127 /* Set destination address and psm */
1128 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1129 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1131 err
= l2cap_do_connect(sk
);
1136 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1137 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1143 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1145 struct sock
*sk
= sock
->sk
;
1148 BT_DBG("sk %p backlog %d", sk
, backlog
);
1152 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1153 || sk
->sk_state
!= BT_BOUND
) {
1158 switch (l2cap_pi(sk
)->mode
) {
1159 case L2CAP_MODE_BASIC
:
1161 case L2CAP_MODE_ERTM
:
1162 case L2CAP_MODE_STREAMING
:
1171 if (!l2cap_pi(sk
)->psm
) {
1172 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1177 write_lock_bh(&l2cap_sk_list
.lock
);
1179 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1180 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1181 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1182 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1187 write_unlock_bh(&l2cap_sk_list
.lock
);
1193 sk
->sk_max_ack_backlog
= backlog
;
1194 sk
->sk_ack_backlog
= 0;
1195 sk
->sk_state
= BT_LISTEN
;
1202 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1204 DECLARE_WAITQUEUE(wait
, current
);
1205 struct sock
*sk
= sock
->sk
, *nsk
;
1209 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1211 if (sk
->sk_state
!= BT_LISTEN
) {
1216 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1218 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1220 /* Wait for an incoming connection. (wake-one). */
1221 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1222 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1223 set_current_state(TASK_INTERRUPTIBLE
);
1230 timeo
= schedule_timeout(timeo
);
1231 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1233 if (sk
->sk_state
!= BT_LISTEN
) {
1238 if (signal_pending(current
)) {
1239 err
= sock_intr_errno(timeo
);
1243 set_current_state(TASK_RUNNING
);
1244 remove_wait_queue(sk_sleep(sk
), &wait
);
1249 newsock
->state
= SS_CONNECTED
;
1251 BT_DBG("new socket %p", nsk
);
1258 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1260 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1261 struct sock
*sk
= sock
->sk
;
1263 BT_DBG("sock %p, sk %p", sock
, sk
);
1265 addr
->sa_family
= AF_BLUETOOTH
;
1266 *len
= sizeof(struct sockaddr_l2
);
1269 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1270 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1271 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1273 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1274 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1275 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1281 static int __l2cap_wait_ack(struct sock
*sk
)
1283 DECLARE_WAITQUEUE(wait
, current
);
1287 add_wait_queue(sk_sleep(sk
), &wait
);
1288 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1289 set_current_state(TASK_INTERRUPTIBLE
);
1294 if (signal_pending(current
)) {
1295 err
= sock_intr_errno(timeo
);
1300 timeo
= schedule_timeout(timeo
);
1303 err
= sock_error(sk
);
1307 set_current_state(TASK_RUNNING
);
1308 remove_wait_queue(sk_sleep(sk
), &wait
);
1312 static void l2cap_monitor_timeout(unsigned long arg
)
1314 struct sock
*sk
= (void *) arg
;
1317 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1318 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
);
1323 l2cap_pi(sk
)->retry_count
++;
1324 __mod_monitor_timer();
1326 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1330 static void l2cap_retrans_timeout(unsigned long arg
)
1332 struct sock
*sk
= (void *) arg
;
1335 l2cap_pi(sk
)->retry_count
= 1;
1336 __mod_monitor_timer();
1338 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1340 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1344 static void l2cap_drop_acked_frames(struct sock
*sk
)
1346 struct sk_buff
*skb
;
1348 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1349 l2cap_pi(sk
)->unacked_frames
) {
1350 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1353 skb
= skb_dequeue(TX_QUEUE(sk
));
1356 l2cap_pi(sk
)->unacked_frames
--;
1359 if (!l2cap_pi(sk
)->unacked_frames
)
1360 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1363 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1365 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1367 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1369 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1372 static int l2cap_streaming_send(struct sock
*sk
)
1374 struct sk_buff
*skb
, *tx_skb
;
1375 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1378 while ((skb
= sk
->sk_send_head
)) {
1379 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1381 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1382 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1383 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1385 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1386 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1387 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1390 l2cap_do_send(sk
, tx_skb
);
1392 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1394 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1395 sk
->sk_send_head
= NULL
;
1397 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1399 skb
= skb_dequeue(TX_QUEUE(sk
));
1405 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1407 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1408 struct sk_buff
*skb
, *tx_skb
;
1411 skb
= skb_peek(TX_QUEUE(sk
));
1416 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1419 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1422 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1424 if (pi
->remote_max_tx
&&
1425 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1426 l2cap_send_disconn_req(pi
->conn
, sk
);
1430 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1431 bt_cb(skb
)->retries
++;
1432 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1434 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1435 control
|= L2CAP_CTRL_FINAL
;
1436 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1439 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1440 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1442 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1444 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1445 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1446 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1449 l2cap_do_send(sk
, tx_skb
);
1452 static int l2cap_ertm_send(struct sock
*sk
)
1454 struct sk_buff
*skb
, *tx_skb
;
1455 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1459 if (sk
->sk_state
!= BT_CONNECTED
)
1462 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1464 if (pi
->remote_max_tx
&&
1465 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1466 l2cap_send_disconn_req(pi
->conn
, sk
);
1470 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1472 bt_cb(skb
)->retries
++;
1474 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1475 control
&= L2CAP_CTRL_SAR
;
1477 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1478 control
|= L2CAP_CTRL_FINAL
;
1479 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1481 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1482 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1483 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1486 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1487 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1488 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1491 l2cap_do_send(sk
, tx_skb
);
1493 __mod_retrans_timer();
1495 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1496 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1498 pi
->unacked_frames
++;
1501 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1502 sk
->sk_send_head
= NULL
;
1504 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1512 static int l2cap_retransmit_frames(struct sock
*sk
)
1514 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1517 spin_lock_bh(&pi
->send_lock
);
1519 if (!skb_queue_empty(TX_QUEUE(sk
)))
1520 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1522 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1523 ret
= l2cap_ertm_send(sk
);
1525 spin_unlock_bh(&pi
->send_lock
);
1530 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1532 struct sock
*sk
= (struct sock
*)pi
;
1536 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1538 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1539 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1540 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1541 l2cap_send_sframe(pi
, control
);
1545 spin_lock_bh(&pi
->send_lock
);
1546 nframes
= l2cap_ertm_send(sk
);
1547 spin_unlock_bh(&pi
->send_lock
);
1552 control
|= L2CAP_SUPER_RCV_READY
;
1553 l2cap_send_sframe(pi
, control
);
1556 static void l2cap_send_srejtail(struct sock
*sk
)
1558 struct srej_list
*tail
;
1561 control
= L2CAP_SUPER_SELECT_REJECT
;
1562 control
|= L2CAP_CTRL_FINAL
;
1564 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1565 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1567 l2cap_send_sframe(l2cap_pi(sk
), control
);
1570 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1572 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1573 struct sk_buff
**frag
;
1576 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1582 /* Continuation fragments (no L2CAP header) */
1583 frag
= &skb_shinfo(skb
)->frag_list
;
1585 count
= min_t(unsigned int, conn
->mtu
, len
);
1587 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1590 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1596 frag
= &(*frag
)->next
;
1602 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1604 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1605 struct sk_buff
*skb
;
1606 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1607 struct l2cap_hdr
*lh
;
1609 BT_DBG("sk %p len %d", sk
, (int)len
);
1611 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1612 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1613 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1615 return ERR_PTR(-ENOMEM
);
1617 /* Create L2CAP header */
1618 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1619 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1620 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1621 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1623 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1624 if (unlikely(err
< 0)) {
1626 return ERR_PTR(err
);
1631 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1633 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1634 struct sk_buff
*skb
;
1635 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1636 struct l2cap_hdr
*lh
;
1638 BT_DBG("sk %p len %d", sk
, (int)len
);
1640 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1641 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1642 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1644 return ERR_PTR(-ENOMEM
);
1646 /* Create L2CAP header */
1647 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1648 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1649 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1651 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1652 if (unlikely(err
< 0)) {
1654 return ERR_PTR(err
);
1659 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1661 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1662 struct sk_buff
*skb
;
1663 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1664 struct l2cap_hdr
*lh
;
1666 BT_DBG("sk %p len %d", sk
, (int)len
);
1669 return ERR_PTR(-ENOTCONN
);
1674 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1677 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1678 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1679 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1681 return ERR_PTR(-ENOMEM
);
1683 /* Create L2CAP header */
1684 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1685 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1686 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1687 put_unaligned_le16(control
, skb_put(skb
, 2));
1689 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1691 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1692 if (unlikely(err
< 0)) {
1694 return ERR_PTR(err
);
1697 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1698 put_unaligned_le16(0, skb_put(skb
, 2));
1700 bt_cb(skb
)->retries
= 0;
1704 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1706 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1707 struct sk_buff
*skb
;
1708 struct sk_buff_head sar_queue
;
1712 skb_queue_head_init(&sar_queue
);
1713 control
= L2CAP_SDU_START
;
1714 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1716 return PTR_ERR(skb
);
1718 __skb_queue_tail(&sar_queue
, skb
);
1719 len
-= pi
->remote_mps
;
1720 size
+= pi
->remote_mps
;
1725 if (len
> pi
->remote_mps
) {
1726 control
= L2CAP_SDU_CONTINUE
;
1727 buflen
= pi
->remote_mps
;
1729 control
= L2CAP_SDU_END
;
1733 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1735 skb_queue_purge(&sar_queue
);
1736 return PTR_ERR(skb
);
1739 __skb_queue_tail(&sar_queue
, skb
);
1743 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1744 spin_lock_bh(&pi
->send_lock
);
1745 if (sk
->sk_send_head
== NULL
)
1746 sk
->sk_send_head
= sar_queue
.next
;
1747 spin_unlock_bh(&pi
->send_lock
);
1752 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1754 struct sock
*sk
= sock
->sk
;
1755 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1756 struct sk_buff
*skb
;
1760 BT_DBG("sock %p, sk %p", sock
, sk
);
1762 err
= sock_error(sk
);
1766 if (msg
->msg_flags
& MSG_OOB
)
1771 if (sk
->sk_state
!= BT_CONNECTED
) {
1776 /* Connectionless channel */
1777 if (sk
->sk_type
== SOCK_DGRAM
) {
1778 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1782 l2cap_do_send(sk
, skb
);
1789 case L2CAP_MODE_BASIC
:
1790 /* Check outgoing MTU */
1791 if (len
> pi
->omtu
) {
1796 /* Create a basic PDU */
1797 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1803 l2cap_do_send(sk
, skb
);
1807 case L2CAP_MODE_ERTM
:
1808 case L2CAP_MODE_STREAMING
:
1809 /* Entire SDU fits into one PDU */
1810 if (len
<= pi
->remote_mps
) {
1811 control
= L2CAP_SDU_UNSEGMENTED
;
1812 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1817 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1819 if (pi
->mode
== L2CAP_MODE_ERTM
)
1820 spin_lock_bh(&pi
->send_lock
);
1822 if (sk
->sk_send_head
== NULL
)
1823 sk
->sk_send_head
= skb
;
1825 if (pi
->mode
== L2CAP_MODE_ERTM
)
1826 spin_unlock_bh(&pi
->send_lock
);
1828 /* Segment SDU into multiples PDUs */
1829 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1834 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1835 err
= l2cap_streaming_send(sk
);
1837 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1838 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1842 spin_lock_bh(&pi
->send_lock
);
1843 err
= l2cap_ertm_send(sk
);
1844 spin_unlock_bh(&pi
->send_lock
);
1852 BT_DBG("bad state %1.1x", pi
->mode
);
1861 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1863 struct sock
*sk
= sock
->sk
;
1867 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1868 struct l2cap_conn_rsp rsp
;
1870 sk
->sk_state
= BT_CONFIG
;
1872 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1873 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1874 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1875 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1876 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1877 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1885 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1888 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1890 struct sock
*sk
= sock
->sk
;
1891 struct l2cap_options opts
;
1895 BT_DBG("sk %p", sk
);
1901 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1902 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1903 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1904 opts
.mode
= l2cap_pi(sk
)->mode
;
1905 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1906 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1907 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1909 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1910 if (copy_from_user((char *) &opts
, optval
, len
)) {
1915 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1920 l2cap_pi(sk
)->mode
= opts
.mode
;
1921 switch (l2cap_pi(sk
)->mode
) {
1922 case L2CAP_MODE_BASIC
:
1924 case L2CAP_MODE_ERTM
:
1925 case L2CAP_MODE_STREAMING
:
1934 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1935 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1936 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1937 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1938 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1942 if (get_user(opt
, (u32 __user
*) optval
)) {
1947 if (opt
& L2CAP_LM_AUTH
)
1948 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1949 if (opt
& L2CAP_LM_ENCRYPT
)
1950 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1951 if (opt
& L2CAP_LM_SECURE
)
1952 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1954 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1955 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1967 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1969 struct sock
*sk
= sock
->sk
;
1970 struct bt_security sec
;
1974 BT_DBG("sk %p", sk
);
1976 if (level
== SOL_L2CAP
)
1977 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
1979 if (level
!= SOL_BLUETOOTH
)
1980 return -ENOPROTOOPT
;
1986 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
1987 && sk
->sk_type
!= SOCK_RAW
) {
1992 sec
.level
= BT_SECURITY_LOW
;
1994 len
= min_t(unsigned int, sizeof(sec
), optlen
);
1995 if (copy_from_user((char *) &sec
, optval
, len
)) {
2000 if (sec
.level
< BT_SECURITY_LOW
||
2001 sec
.level
> BT_SECURITY_HIGH
) {
2006 l2cap_pi(sk
)->sec_level
= sec
.level
;
2009 case BT_DEFER_SETUP
:
2010 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2015 if (get_user(opt
, (u32 __user
*) optval
)) {
2020 bt_sk(sk
)->defer_setup
= opt
;
2032 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2034 struct sock
*sk
= sock
->sk
;
2035 struct l2cap_options opts
;
2036 struct l2cap_conninfo cinfo
;
2040 BT_DBG("sk %p", sk
);
2042 if (get_user(len
, optlen
))
2049 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2050 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2051 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2052 opts
.mode
= l2cap_pi(sk
)->mode
;
2053 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2054 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2055 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2057 len
= min_t(unsigned int, len
, sizeof(opts
));
2058 if (copy_to_user(optval
, (char *) &opts
, len
))
2064 switch (l2cap_pi(sk
)->sec_level
) {
2065 case BT_SECURITY_LOW
:
2066 opt
= L2CAP_LM_AUTH
;
2068 case BT_SECURITY_MEDIUM
:
2069 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2071 case BT_SECURITY_HIGH
:
2072 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2080 if (l2cap_pi(sk
)->role_switch
)
2081 opt
|= L2CAP_LM_MASTER
;
2083 if (l2cap_pi(sk
)->force_reliable
)
2084 opt
|= L2CAP_LM_RELIABLE
;
2086 if (put_user(opt
, (u32 __user
*) optval
))
2090 case L2CAP_CONNINFO
:
2091 if (sk
->sk_state
!= BT_CONNECTED
&&
2092 !(sk
->sk_state
== BT_CONNECT2
&&
2093 bt_sk(sk
)->defer_setup
)) {
2098 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2099 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2101 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2102 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2116 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2118 struct sock
*sk
= sock
->sk
;
2119 struct bt_security sec
;
2122 BT_DBG("sk %p", sk
);
2124 if (level
== SOL_L2CAP
)
2125 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2127 if (level
!= SOL_BLUETOOTH
)
2128 return -ENOPROTOOPT
;
2130 if (get_user(len
, optlen
))
2137 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2138 && sk
->sk_type
!= SOCK_RAW
) {
2143 sec
.level
= l2cap_pi(sk
)->sec_level
;
2145 len
= min_t(unsigned int, len
, sizeof(sec
));
2146 if (copy_to_user(optval
, (char *) &sec
, len
))
2151 case BT_DEFER_SETUP
:
2152 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2157 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2171 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2173 struct sock
*sk
= sock
->sk
;
2176 BT_DBG("sock %p, sk %p", sock
, sk
);
2182 if (!sk
->sk_shutdown
) {
2183 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2184 err
= __l2cap_wait_ack(sk
);
2186 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2187 l2cap_sock_clear_timer(sk
);
2188 __l2cap_sock_close(sk
, 0);
2190 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2191 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2198 static int l2cap_sock_release(struct socket
*sock
)
2200 struct sock
*sk
= sock
->sk
;
2203 BT_DBG("sock %p, sk %p", sock
, sk
);
2208 err
= l2cap_sock_shutdown(sock
, 2);
2211 l2cap_sock_kill(sk
);
2215 static void l2cap_chan_ready(struct sock
*sk
)
2217 struct sock
*parent
= bt_sk(sk
)->parent
;
2219 BT_DBG("sk %p, parent %p", sk
, parent
);
2221 l2cap_pi(sk
)->conf_state
= 0;
2222 l2cap_sock_clear_timer(sk
);
2225 /* Outgoing channel.
2226 * Wake up socket sleeping on connect.
2228 sk
->sk_state
= BT_CONNECTED
;
2229 sk
->sk_state_change(sk
);
2231 /* Incoming channel.
2232 * Wake up socket sleeping on accept.
2234 parent
->sk_data_ready(parent
, 0);
2238 /* Copy frame to all raw sockets on that connection */
2239 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2241 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2242 struct sk_buff
*nskb
;
2245 BT_DBG("conn %p", conn
);
2247 read_lock(&l
->lock
);
2248 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2249 if (sk
->sk_type
!= SOCK_RAW
)
2252 /* Don't send frame to the socket it came from */
2255 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2259 if (sock_queue_rcv_skb(sk
, nskb
))
2262 read_unlock(&l
->lock
);
2265 /* ---- L2CAP signalling commands ---- */
2266 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2267 u8 code
, u8 ident
, u16 dlen
, void *data
)
2269 struct sk_buff
*skb
, **frag
;
2270 struct l2cap_cmd_hdr
*cmd
;
2271 struct l2cap_hdr
*lh
;
2274 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2275 conn
, code
, ident
, dlen
);
2277 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2278 count
= min_t(unsigned int, conn
->mtu
, len
);
2280 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2284 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2285 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2286 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2288 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2291 cmd
->len
= cpu_to_le16(dlen
);
2294 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2295 memcpy(skb_put(skb
, count
), data
, count
);
2301 /* Continuation fragments (no L2CAP header) */
2302 frag
= &skb_shinfo(skb
)->frag_list
;
2304 count
= min_t(unsigned int, conn
->mtu
, len
);
2306 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2310 memcpy(skb_put(*frag
, count
), data
, count
);
2315 frag
= &(*frag
)->next
;
2325 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2327 struct l2cap_conf_opt
*opt
= *ptr
;
2330 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2338 *val
= *((u8
*) opt
->val
);
2342 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2346 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2350 *val
= (unsigned long) opt
->val
;
2354 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2358 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2360 struct l2cap_conf_opt
*opt
= *ptr
;
2362 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2369 *((u8
*) opt
->val
) = val
;
2373 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2377 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2381 memcpy(opt
->val
, (void *) val
, len
);
2385 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2388 static void l2cap_ack_timeout(unsigned long arg
)
2390 struct sock
*sk
= (void *) arg
;
2393 l2cap_send_ack(l2cap_pi(sk
));
2397 static inline void l2cap_ertm_init(struct sock
*sk
)
2399 l2cap_pi(sk
)->expected_ack_seq
= 0;
2400 l2cap_pi(sk
)->unacked_frames
= 0;
2401 l2cap_pi(sk
)->buffer_seq
= 0;
2402 l2cap_pi(sk
)->num_acked
= 0;
2403 l2cap_pi(sk
)->frames_sent
= 0;
2405 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2406 l2cap_retrans_timeout
, (unsigned long) sk
);
2407 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2408 l2cap_monitor_timeout
, (unsigned long) sk
);
2409 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2410 l2cap_ack_timeout
, (unsigned long) sk
);
2412 __skb_queue_head_init(SREJ_QUEUE(sk
));
2413 __skb_queue_head_init(BUSY_QUEUE(sk
));
2414 spin_lock_init(&l2cap_pi(sk
)->send_lock
);
2416 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2419 static int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
2421 u32 local_feat_mask
= l2cap_feat_mask
;
2423 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
2426 case L2CAP_MODE_ERTM
:
2427 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
2428 case L2CAP_MODE_STREAMING
:
2429 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
2435 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2438 case L2CAP_MODE_STREAMING
:
2439 case L2CAP_MODE_ERTM
:
2440 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2444 return L2CAP_MODE_BASIC
;
2448 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2450 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2451 struct l2cap_conf_req
*req
= data
;
2452 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2453 void *ptr
= req
->data
;
2455 BT_DBG("sk %p", sk
);
2457 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2461 case L2CAP_MODE_STREAMING
:
2462 case L2CAP_MODE_ERTM
:
2463 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2464 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2465 l2cap_send_disconn_req(pi
->conn
, sk
);
2468 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2474 case L2CAP_MODE_BASIC
:
2475 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2476 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2479 case L2CAP_MODE_ERTM
:
2480 rfc
.mode
= L2CAP_MODE_ERTM
;
2481 rfc
.txwin_size
= pi
->tx_win
;
2482 rfc
.max_transmit
= pi
->max_tx
;
2483 rfc
.retrans_timeout
= 0;
2484 rfc
.monitor_timeout
= 0;
2485 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2486 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2487 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2489 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2490 sizeof(rfc
), (unsigned long) &rfc
);
2492 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2495 if (pi
->fcs
== L2CAP_FCS_NONE
||
2496 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2497 pi
->fcs
= L2CAP_FCS_NONE
;
2498 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2502 case L2CAP_MODE_STREAMING
:
2503 rfc
.mode
= L2CAP_MODE_STREAMING
;
2505 rfc
.max_transmit
= 0;
2506 rfc
.retrans_timeout
= 0;
2507 rfc
.monitor_timeout
= 0;
2508 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2509 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2510 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2512 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2513 sizeof(rfc
), (unsigned long) &rfc
);
2515 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2518 if (pi
->fcs
== L2CAP_FCS_NONE
||
2519 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2520 pi
->fcs
= L2CAP_FCS_NONE
;
2521 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2526 /* FIXME: Need actual value of the flush timeout */
2527 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2528 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2530 req
->dcid
= cpu_to_le16(pi
->dcid
);
2531 req
->flags
= cpu_to_le16(0);
2536 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2538 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2539 struct l2cap_conf_rsp
*rsp
= data
;
2540 void *ptr
= rsp
->data
;
2541 void *req
= pi
->conf_req
;
2542 int len
= pi
->conf_len
;
2543 int type
, hint
, olen
;
2545 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2546 u16 mtu
= L2CAP_DEFAULT_MTU
;
2547 u16 result
= L2CAP_CONF_SUCCESS
;
2549 BT_DBG("sk %p", sk
);
2551 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2552 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2554 hint
= type
& L2CAP_CONF_HINT
;
2555 type
&= L2CAP_CONF_MASK
;
2558 case L2CAP_CONF_MTU
:
2562 case L2CAP_CONF_FLUSH_TO
:
2566 case L2CAP_CONF_QOS
:
2569 case L2CAP_CONF_RFC
:
2570 if (olen
== sizeof(rfc
))
2571 memcpy(&rfc
, (void *) val
, olen
);
2574 case L2CAP_CONF_FCS
:
2575 if (val
== L2CAP_FCS_NONE
)
2576 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2584 result
= L2CAP_CONF_UNKNOWN
;
2585 *((u8
*) ptr
++) = type
;
2590 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2594 case L2CAP_MODE_STREAMING
:
2595 case L2CAP_MODE_ERTM
:
2596 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
2597 if (!l2cap_mode_supported(pi
->mode
, pi
->conn
->feat_mask
))
2598 return -ECONNREFUSED
;
2601 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2606 if (pi
->mode
!= rfc
.mode
) {
2607 result
= L2CAP_CONF_UNACCEPT
;
2608 rfc
.mode
= pi
->mode
;
2610 if (pi
->num_conf_rsp
== 1)
2611 return -ECONNREFUSED
;
2613 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2614 sizeof(rfc
), (unsigned long) &rfc
);
2618 if (result
== L2CAP_CONF_SUCCESS
) {
2619 /* Configure output options and let the other side know
2620 * which ones we don't like. */
2622 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2623 result
= L2CAP_CONF_UNACCEPT
;
2626 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2628 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2631 case L2CAP_MODE_BASIC
:
2632 pi
->fcs
= L2CAP_FCS_NONE
;
2633 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2636 case L2CAP_MODE_ERTM
:
2637 pi
->remote_tx_win
= rfc
.txwin_size
;
2638 pi
->remote_max_tx
= rfc
.max_transmit
;
2639 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2640 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2642 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2644 rfc
.retrans_timeout
=
2645 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2646 rfc
.monitor_timeout
=
2647 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2649 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2651 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2652 sizeof(rfc
), (unsigned long) &rfc
);
2656 case L2CAP_MODE_STREAMING
:
2657 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2658 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2660 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2662 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2664 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2665 sizeof(rfc
), (unsigned long) &rfc
);
2670 result
= L2CAP_CONF_UNACCEPT
;
2672 memset(&rfc
, 0, sizeof(rfc
));
2673 rfc
.mode
= pi
->mode
;
2676 if (result
== L2CAP_CONF_SUCCESS
)
2677 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2679 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2680 rsp
->result
= cpu_to_le16(result
);
2681 rsp
->flags
= cpu_to_le16(0x0000);
2686 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2688 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2689 struct l2cap_conf_req
*req
= data
;
2690 void *ptr
= req
->data
;
2693 struct l2cap_conf_rfc rfc
;
2695 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2697 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2698 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2701 case L2CAP_CONF_MTU
:
2702 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2703 *result
= L2CAP_CONF_UNACCEPT
;
2704 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2707 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2710 case L2CAP_CONF_FLUSH_TO
:
2712 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2716 case L2CAP_CONF_RFC
:
2717 if (olen
== sizeof(rfc
))
2718 memcpy(&rfc
, (void *)val
, olen
);
2720 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2721 rfc
.mode
!= pi
->mode
)
2722 return -ECONNREFUSED
;
2724 pi
->mode
= rfc
.mode
;
2727 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2728 sizeof(rfc
), (unsigned long) &rfc
);
2733 if (*result
== L2CAP_CONF_SUCCESS
) {
2735 case L2CAP_MODE_ERTM
:
2736 pi
->remote_tx_win
= rfc
.txwin_size
;
2737 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2738 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2739 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2741 case L2CAP_MODE_STREAMING
:
2742 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2746 req
->dcid
= cpu_to_le16(pi
->dcid
);
2747 req
->flags
= cpu_to_le16(0x0000);
2752 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2754 struct l2cap_conf_rsp
*rsp
= data
;
2755 void *ptr
= rsp
->data
;
2757 BT_DBG("sk %p", sk
);
2759 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2760 rsp
->result
= cpu_to_le16(result
);
2761 rsp
->flags
= cpu_to_le16(flags
);
2766 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2768 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2771 struct l2cap_conf_rfc rfc
;
2773 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2775 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2778 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2779 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2782 case L2CAP_CONF_RFC
:
2783 if (olen
== sizeof(rfc
))
2784 memcpy(&rfc
, (void *)val
, olen
);
2791 case L2CAP_MODE_ERTM
:
2792 pi
->remote_tx_win
= rfc
.txwin_size
;
2793 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2794 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2795 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2797 case L2CAP_MODE_STREAMING
:
2798 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2802 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2804 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2806 if (rej
->reason
!= 0x0000)
2809 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2810 cmd
->ident
== conn
->info_ident
) {
2811 del_timer(&conn
->info_timer
);
2813 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2814 conn
->info_ident
= 0;
2816 l2cap_conn_start(conn
);
2822 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2824 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2825 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2826 struct l2cap_conn_rsp rsp
;
2827 struct sock
*sk
, *parent
;
2828 int result
, status
= L2CAP_CS_NO_INFO
;
2830 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2831 __le16 psm
= req
->psm
;
2833 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2835 /* Check if we have socket listening on psm */
2836 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2838 result
= L2CAP_CR_BAD_PSM
;
2842 /* Check if the ACL is secure enough (if not SDP) */
2843 if (psm
!= cpu_to_le16(0x0001) &&
2844 !hci_conn_check_link_mode(conn
->hcon
)) {
2845 conn
->disc_reason
= 0x05;
2846 result
= L2CAP_CR_SEC_BLOCK
;
2850 result
= L2CAP_CR_NO_MEM
;
2852 /* Check for backlog size */
2853 if (sk_acceptq_is_full(parent
)) {
2854 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2858 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2862 write_lock_bh(&list
->lock
);
2864 /* Check if we already have channel with that dcid */
2865 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2866 write_unlock_bh(&list
->lock
);
2867 sock_set_flag(sk
, SOCK_ZAPPED
);
2868 l2cap_sock_kill(sk
);
2872 hci_conn_hold(conn
->hcon
);
2874 l2cap_sock_init(sk
, parent
);
2875 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2876 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2877 l2cap_pi(sk
)->psm
= psm
;
2878 l2cap_pi(sk
)->dcid
= scid
;
2880 __l2cap_chan_add(conn
, sk
, parent
);
2881 dcid
= l2cap_pi(sk
)->scid
;
2883 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2885 l2cap_pi(sk
)->ident
= cmd
->ident
;
2887 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2888 if (l2cap_check_security(sk
)) {
2889 if (bt_sk(sk
)->defer_setup
) {
2890 sk
->sk_state
= BT_CONNECT2
;
2891 result
= L2CAP_CR_PEND
;
2892 status
= L2CAP_CS_AUTHOR_PEND
;
2893 parent
->sk_data_ready(parent
, 0);
2895 sk
->sk_state
= BT_CONFIG
;
2896 result
= L2CAP_CR_SUCCESS
;
2897 status
= L2CAP_CS_NO_INFO
;
2900 sk
->sk_state
= BT_CONNECT2
;
2901 result
= L2CAP_CR_PEND
;
2902 status
= L2CAP_CS_AUTHEN_PEND
;
2905 sk
->sk_state
= BT_CONNECT2
;
2906 result
= L2CAP_CR_PEND
;
2907 status
= L2CAP_CS_NO_INFO
;
2910 write_unlock_bh(&list
->lock
);
2913 bh_unlock_sock(parent
);
2916 rsp
.scid
= cpu_to_le16(scid
);
2917 rsp
.dcid
= cpu_to_le16(dcid
);
2918 rsp
.result
= cpu_to_le16(result
);
2919 rsp
.status
= cpu_to_le16(status
);
2920 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2922 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2923 struct l2cap_info_req info
;
2924 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2926 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2927 conn
->info_ident
= l2cap_get_ident(conn
);
2929 mod_timer(&conn
->info_timer
, jiffies
+
2930 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2932 l2cap_send_cmd(conn
, conn
->info_ident
,
2933 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2939 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2941 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2942 u16 scid
, dcid
, result
, status
;
2946 scid
= __le16_to_cpu(rsp
->scid
);
2947 dcid
= __le16_to_cpu(rsp
->dcid
);
2948 result
= __le16_to_cpu(rsp
->result
);
2949 status
= __le16_to_cpu(rsp
->status
);
2951 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2954 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2958 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2964 case L2CAP_CR_SUCCESS
:
2965 sk
->sk_state
= BT_CONFIG
;
2966 l2cap_pi(sk
)->ident
= 0;
2967 l2cap_pi(sk
)->dcid
= dcid
;
2968 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2969 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2971 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2972 l2cap_build_conf_req(sk
, req
), req
);
2973 l2cap_pi(sk
)->num_conf_req
++;
2977 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2981 l2cap_chan_del(sk
, ECONNREFUSED
);
2989 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2991 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2997 dcid
= __le16_to_cpu(req
->dcid
);
2998 flags
= __le16_to_cpu(req
->flags
);
3000 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3002 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3006 if (sk
->sk_state
== BT_DISCONN
)
3009 /* Reject if config buffer is too small. */
3010 len
= cmd_len
- sizeof(*req
);
3011 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3012 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3013 l2cap_build_conf_rsp(sk
, rsp
,
3014 L2CAP_CONF_REJECT
, flags
), rsp
);
3019 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3020 l2cap_pi(sk
)->conf_len
+= len
;
3022 if (flags
& 0x0001) {
3023 /* Incomplete config. Send empty response. */
3024 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3025 l2cap_build_conf_rsp(sk
, rsp
,
3026 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3030 /* Complete config. */
3031 len
= l2cap_parse_conf_req(sk
, rsp
);
3033 l2cap_send_disconn_req(conn
, sk
);
3037 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3038 l2cap_pi(sk
)->num_conf_rsp
++;
3040 /* Reset config buffer. */
3041 l2cap_pi(sk
)->conf_len
= 0;
3043 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3046 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3047 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3048 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3049 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3051 sk
->sk_state
= BT_CONNECTED
;
3053 l2cap_pi(sk
)->next_tx_seq
= 0;
3054 l2cap_pi(sk
)->expected_tx_seq
= 0;
3055 __skb_queue_head_init(TX_QUEUE(sk
));
3056 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3057 l2cap_ertm_init(sk
);
3059 l2cap_chan_ready(sk
);
3063 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3065 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3066 l2cap_build_conf_req(sk
, buf
), buf
);
3067 l2cap_pi(sk
)->num_conf_req
++;
3075 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3077 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3078 u16 scid
, flags
, result
;
3080 int len
= cmd
->len
- sizeof(*rsp
);
3082 scid
= __le16_to_cpu(rsp
->scid
);
3083 flags
= __le16_to_cpu(rsp
->flags
);
3084 result
= __le16_to_cpu(rsp
->result
);
3086 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3087 scid
, flags
, result
);
3089 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3094 case L2CAP_CONF_SUCCESS
:
3095 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3098 case L2CAP_CONF_UNACCEPT
:
3099 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3102 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3103 l2cap_send_disconn_req(conn
, sk
);
3107 /* throw out any old stored conf requests */
3108 result
= L2CAP_CONF_SUCCESS
;
3109 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3112 l2cap_send_disconn_req(conn
, sk
);
3116 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3117 L2CAP_CONF_REQ
, len
, req
);
3118 l2cap_pi(sk
)->num_conf_req
++;
3119 if (result
!= L2CAP_CONF_SUCCESS
)
3125 sk
->sk_err
= ECONNRESET
;
3126 l2cap_sock_set_timer(sk
, HZ
* 5);
3127 l2cap_send_disconn_req(conn
, sk
);
3134 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3136 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3137 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3138 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3139 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3141 sk
->sk_state
= BT_CONNECTED
;
3142 l2cap_pi(sk
)->next_tx_seq
= 0;
3143 l2cap_pi(sk
)->expected_tx_seq
= 0;
3144 __skb_queue_head_init(TX_QUEUE(sk
));
3145 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3146 l2cap_ertm_init(sk
);
3148 l2cap_chan_ready(sk
);
3156 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3158 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3159 struct l2cap_disconn_rsp rsp
;
3163 scid
= __le16_to_cpu(req
->scid
);
3164 dcid
= __le16_to_cpu(req
->dcid
);
3166 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3168 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3172 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3173 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3174 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3176 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3178 l2cap_chan_del(sk
, ECONNRESET
);
3181 l2cap_sock_kill(sk
);
3185 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3187 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3191 scid
= __le16_to_cpu(rsp
->scid
);
3192 dcid
= __le16_to_cpu(rsp
->dcid
);
3194 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3196 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3200 l2cap_chan_del(sk
, 0);
3203 l2cap_sock_kill(sk
);
3207 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3209 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3212 type
= __le16_to_cpu(req
->type
);
3214 BT_DBG("type 0x%4.4x", type
);
3216 if (type
== L2CAP_IT_FEAT_MASK
) {
3218 u32 feat_mask
= l2cap_feat_mask
;
3219 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3220 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3221 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3223 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3225 put_unaligned_le32(feat_mask
, rsp
->data
);
3226 l2cap_send_cmd(conn
, cmd
->ident
,
3227 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3228 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3230 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3231 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3232 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3233 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3234 l2cap_send_cmd(conn
, cmd
->ident
,
3235 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3237 struct l2cap_info_rsp rsp
;
3238 rsp
.type
= cpu_to_le16(type
);
3239 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3240 l2cap_send_cmd(conn
, cmd
->ident
,
3241 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3247 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3249 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3252 type
= __le16_to_cpu(rsp
->type
);
3253 result
= __le16_to_cpu(rsp
->result
);
3255 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3257 del_timer(&conn
->info_timer
);
3259 if (type
== L2CAP_IT_FEAT_MASK
) {
3260 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3262 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3263 struct l2cap_info_req req
;
3264 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3266 conn
->info_ident
= l2cap_get_ident(conn
);
3268 l2cap_send_cmd(conn
, conn
->info_ident
,
3269 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3271 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3272 conn
->info_ident
= 0;
3274 l2cap_conn_start(conn
);
3276 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3277 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3278 conn
->info_ident
= 0;
3280 l2cap_conn_start(conn
);
3286 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3288 u8
*data
= skb
->data
;
3290 struct l2cap_cmd_hdr cmd
;
3293 l2cap_raw_recv(conn
, skb
);
3295 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3297 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3298 data
+= L2CAP_CMD_HDR_SIZE
;
3299 len
-= L2CAP_CMD_HDR_SIZE
;
3301 cmd_len
= le16_to_cpu(cmd
.len
);
3303 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3305 if (cmd_len
> len
|| !cmd
.ident
) {
3306 BT_DBG("corrupted command");
3311 case L2CAP_COMMAND_REJ
:
3312 l2cap_command_rej(conn
, &cmd
, data
);
3315 case L2CAP_CONN_REQ
:
3316 err
= l2cap_connect_req(conn
, &cmd
, data
);
3319 case L2CAP_CONN_RSP
:
3320 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3323 case L2CAP_CONF_REQ
:
3324 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3327 case L2CAP_CONF_RSP
:
3328 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3331 case L2CAP_DISCONN_REQ
:
3332 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3335 case L2CAP_DISCONN_RSP
:
3336 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3339 case L2CAP_ECHO_REQ
:
3340 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3343 case L2CAP_ECHO_RSP
:
3346 case L2CAP_INFO_REQ
:
3347 err
= l2cap_information_req(conn
, &cmd
, data
);
3350 case L2CAP_INFO_RSP
:
3351 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3355 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3361 struct l2cap_cmd_rej rej
;
3362 BT_DBG("error %d", err
);
3364 /* FIXME: Map err to a valid reason */
3365 rej
.reason
= cpu_to_le16(0);
3366 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3376 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3378 u16 our_fcs
, rcv_fcs
;
3379 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3381 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3382 skb_trim(skb
, skb
->len
- 2);
3383 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3384 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3386 if (our_fcs
!= rcv_fcs
)
3392 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3394 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3397 pi
->frames_sent
= 0;
3399 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3401 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3402 control
|= L2CAP_SUPER_RCV_NOT_READY
| L2CAP_CTRL_FINAL
;
3403 l2cap_send_sframe(pi
, control
);
3404 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3405 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
3408 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&& pi
->unacked_frames
> 0)
3409 __mod_retrans_timer();
3411 spin_lock_bh(&pi
->send_lock
);
3412 l2cap_ertm_send(sk
);
3413 spin_unlock_bh(&pi
->send_lock
);
3415 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3416 pi
->frames_sent
== 0) {
3417 control
|= L2CAP_SUPER_RCV_READY
;
3418 l2cap_send_sframe(pi
, control
);
3422 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3424 struct sk_buff
*next_skb
;
3425 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3426 int tx_seq_offset
, next_tx_seq_offset
;
3428 bt_cb(skb
)->tx_seq
= tx_seq
;
3429 bt_cb(skb
)->sar
= sar
;
3431 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3433 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3437 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3438 if (tx_seq_offset
< 0)
3439 tx_seq_offset
+= 64;
3442 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3445 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3446 pi
->buffer_seq
) % 64;
3447 if (next_tx_seq_offset
< 0)
3448 next_tx_seq_offset
+= 64;
3450 if (next_tx_seq_offset
> tx_seq_offset
) {
3451 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3455 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3458 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3460 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3465 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3467 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3468 struct sk_buff
*_skb
;
3471 switch (control
& L2CAP_CTRL_SAR
) {
3472 case L2CAP_SDU_UNSEGMENTED
:
3473 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3476 err
= sock_queue_rcv_skb(sk
, skb
);
3482 case L2CAP_SDU_START
:
3483 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3486 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3488 if (pi
->sdu_len
> pi
->imtu
)
3491 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3495 /* pull sdu_len bytes only after alloc, because of Local Busy
3496 * condition we have to be sure that this will be executed
3497 * only once, i.e., when alloc does not fail */
3500 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3502 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3503 pi
->partial_sdu_len
= skb
->len
;
3506 case L2CAP_SDU_CONTINUE
:
3507 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3513 pi
->partial_sdu_len
+= skb
->len
;
3514 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3517 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3522 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3528 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3529 pi
->partial_sdu_len
+= skb
->len
;
3531 if (pi
->partial_sdu_len
> pi
->imtu
)
3534 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3537 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3540 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3542 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3546 err
= sock_queue_rcv_skb(sk
, _skb
);
3549 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3553 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3554 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3568 l2cap_send_disconn_req(pi
->conn
, sk
);
3573 static void l2cap_busy_work(struct work_struct
*work
)
3575 DECLARE_WAITQUEUE(wait
, current
);
3576 struct l2cap_pinfo
*pi
=
3577 container_of(work
, struct l2cap_pinfo
, busy_work
);
3578 struct sock
*sk
= (struct sock
*)pi
;
3579 int n_tries
= 0, timeo
= HZ
/5, err
;
3580 struct sk_buff
*skb
;
3585 add_wait_queue(sk_sleep(sk
), &wait
);
3586 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3587 set_current_state(TASK_INTERRUPTIBLE
);
3589 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3591 l2cap_send_disconn_req(pi
->conn
, sk
);
3598 if (signal_pending(current
)) {
3599 err
= sock_intr_errno(timeo
);
3604 timeo
= schedule_timeout(timeo
);
3607 err
= sock_error(sk
);
3611 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3612 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3613 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3615 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3619 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3626 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3629 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3630 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3631 l2cap_send_sframe(pi
, control
);
3632 l2cap_pi(sk
)->retry_count
= 1;
3634 del_timer(&pi
->retrans_timer
);
3635 __mod_monitor_timer();
3637 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3640 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3641 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3643 set_current_state(TASK_RUNNING
);
3644 remove_wait_queue(sk_sleep(sk
), &wait
);
3649 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3651 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3654 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3655 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3656 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3660 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3662 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3666 /* Busy Condition */
3667 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3668 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3669 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3671 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3672 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3673 l2cap_send_sframe(pi
, sctrl
);
3675 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3677 del_timer(&pi
->ack_timer
);
3679 queue_work(_busy_wq
, &pi
->busy_work
);
3684 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3686 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3687 struct sk_buff
*_skb
;
3691 * TODO: We have to notify the userland if some data is lost with the
3695 switch (control
& L2CAP_CTRL_SAR
) {
3696 case L2CAP_SDU_UNSEGMENTED
:
3697 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3702 err
= sock_queue_rcv_skb(sk
, skb
);
3708 case L2CAP_SDU_START
:
3709 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3714 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3717 if (pi
->sdu_len
> pi
->imtu
) {
3722 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3728 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3730 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3731 pi
->partial_sdu_len
= skb
->len
;
3735 case L2CAP_SDU_CONTINUE
:
3736 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3739 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3741 pi
->partial_sdu_len
+= skb
->len
;
3742 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3750 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3753 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3755 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3756 pi
->partial_sdu_len
+= skb
->len
;
3758 if (pi
->partial_sdu_len
> pi
->imtu
)
3761 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3762 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3763 err
= sock_queue_rcv_skb(sk
, _skb
);
3778 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3780 struct sk_buff
*skb
;
3783 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3784 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3787 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3788 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3789 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3790 l2cap_pi(sk
)->buffer_seq_srej
=
3791 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3792 tx_seq
= (tx_seq
+ 1) % 64;
3796 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3798 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3799 struct srej_list
*l
, *tmp
;
3802 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3803 if (l
->tx_seq
== tx_seq
) {
3808 control
= L2CAP_SUPER_SELECT_REJECT
;
3809 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3810 l2cap_send_sframe(pi
, control
);
3812 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3816 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3818 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3819 struct srej_list
*new;
3822 while (tx_seq
!= pi
->expected_tx_seq
) {
3823 control
= L2CAP_SUPER_SELECT_REJECT
;
3824 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3825 l2cap_send_sframe(pi
, control
);
3827 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3828 new->tx_seq
= pi
->expected_tx_seq
;
3829 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3830 list_add_tail(&new->list
, SREJ_LIST(sk
));
3832 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3835 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3837 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3838 u8 tx_seq
= __get_txseq(rx_control
);
3839 u8 req_seq
= __get_reqseq(rx_control
);
3840 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3841 int tx_seq_offset
, expected_tx_seq_offset
;
3842 int num_to_ack
= (pi
->tx_win
/6) + 1;
3845 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3847 if (L2CAP_CTRL_FINAL
& rx_control
&&
3848 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3849 del_timer(&pi
->monitor_timer
);
3850 if (pi
->unacked_frames
> 0)
3851 __mod_retrans_timer();
3852 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3855 pi
->expected_ack_seq
= req_seq
;
3856 l2cap_drop_acked_frames(sk
);
3858 if (tx_seq
== pi
->expected_tx_seq
)
3861 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3862 if (tx_seq_offset
< 0)
3863 tx_seq_offset
+= 64;
3865 /* invalid tx_seq */
3866 if (tx_seq_offset
>= pi
->tx_win
) {
3867 l2cap_send_disconn_req(pi
->conn
, sk
);
3871 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3874 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3875 struct srej_list
*first
;
3877 first
= list_first_entry(SREJ_LIST(sk
),
3878 struct srej_list
, list
);
3879 if (tx_seq
== first
->tx_seq
) {
3880 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3881 l2cap_check_srej_gap(sk
, tx_seq
);
3883 list_del(&first
->list
);
3886 if (list_empty(SREJ_LIST(sk
))) {
3887 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3888 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3892 struct srej_list
*l
;
3894 /* duplicated tx_seq */
3895 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3898 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3899 if (l
->tx_seq
== tx_seq
) {
3900 l2cap_resend_srejframe(sk
, tx_seq
);
3904 l2cap_send_srejframe(sk
, tx_seq
);
3907 expected_tx_seq_offset
=
3908 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3909 if (expected_tx_seq_offset
< 0)
3910 expected_tx_seq_offset
+= 64;
3912 /* duplicated tx_seq */
3913 if (tx_seq_offset
< expected_tx_seq_offset
)
3916 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3918 INIT_LIST_HEAD(SREJ_LIST(sk
));
3919 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3921 __skb_queue_head_init(SREJ_QUEUE(sk
));
3922 __skb_queue_head_init(BUSY_QUEUE(sk
));
3923 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3925 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3927 l2cap_send_srejframe(sk
, tx_seq
);
3929 del_timer(&pi
->ack_timer
);
3934 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3936 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3937 bt_cb(skb
)->tx_seq
= tx_seq
;
3938 bt_cb(skb
)->sar
= sar
;
3939 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3943 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3947 if (rx_control
& L2CAP_CTRL_FINAL
) {
3948 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3949 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3951 l2cap_retransmit_frames(sk
);
3956 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3957 if (pi
->num_acked
== num_to_ack
- 1)
3967 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3969 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3971 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3972 l2cap_drop_acked_frames(sk
);
3974 if (rx_control
& L2CAP_CTRL_POLL
) {
3975 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3976 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3977 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3978 (pi
->unacked_frames
> 0))
3979 __mod_retrans_timer();
3981 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3982 l2cap_send_srejtail(sk
);
3984 l2cap_send_i_or_rr_or_rnr(sk
);
3987 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3988 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3990 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3991 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3993 l2cap_retransmit_frames(sk
);
3996 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3997 (pi
->unacked_frames
> 0))
3998 __mod_retrans_timer();
4000 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4001 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4004 spin_lock_bh(&pi
->send_lock
);
4005 l2cap_ertm_send(sk
);
4006 spin_unlock_bh(&pi
->send_lock
);
4011 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4013 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4014 u8 tx_seq
= __get_reqseq(rx_control
);
4016 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4018 pi
->expected_ack_seq
= tx_seq
;
4019 l2cap_drop_acked_frames(sk
);
4021 if (rx_control
& L2CAP_CTRL_FINAL
) {
4022 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4023 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4025 l2cap_retransmit_frames(sk
);
4027 l2cap_retransmit_frames(sk
);
4029 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4030 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4033 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4035 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4036 u8 tx_seq
= __get_reqseq(rx_control
);
4038 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4040 if (rx_control
& L2CAP_CTRL_POLL
) {
4041 pi
->expected_ack_seq
= tx_seq
;
4042 l2cap_drop_acked_frames(sk
);
4044 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4045 l2cap_retransmit_one_frame(sk
, tx_seq
);
4047 spin_lock_bh(&pi
->send_lock
);
4048 l2cap_ertm_send(sk
);
4049 spin_unlock_bh(&pi
->send_lock
);
4051 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4052 pi
->srej_save_reqseq
= tx_seq
;
4053 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4055 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4056 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4057 pi
->srej_save_reqseq
== tx_seq
)
4058 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4060 l2cap_retransmit_one_frame(sk
, tx_seq
);
4062 l2cap_retransmit_one_frame(sk
, tx_seq
);
4063 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4064 pi
->srej_save_reqseq
= tx_seq
;
4065 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4070 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4072 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4073 u8 tx_seq
= __get_reqseq(rx_control
);
4075 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4076 pi
->expected_ack_seq
= tx_seq
;
4077 l2cap_drop_acked_frames(sk
);
4079 if (rx_control
& L2CAP_CTRL_POLL
)
4080 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4082 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4083 del_timer(&pi
->retrans_timer
);
4084 if (rx_control
& L2CAP_CTRL_POLL
)
4085 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4089 if (rx_control
& L2CAP_CTRL_POLL
)
4090 l2cap_send_srejtail(sk
);
4092 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4095 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4097 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4099 if (L2CAP_CTRL_FINAL
& rx_control
&&
4100 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4101 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4102 if (l2cap_pi(sk
)->unacked_frames
> 0)
4103 __mod_retrans_timer();
4104 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4107 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4108 case L2CAP_SUPER_RCV_READY
:
4109 l2cap_data_channel_rrframe(sk
, rx_control
);
4112 case L2CAP_SUPER_REJECT
:
4113 l2cap_data_channel_rejframe(sk
, rx_control
);
4116 case L2CAP_SUPER_SELECT_REJECT
:
4117 l2cap_data_channel_srejframe(sk
, rx_control
);
4120 case L2CAP_SUPER_RCV_NOT_READY
:
4121 l2cap_data_channel_rnrframe(sk
, rx_control
);
4129 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4132 struct l2cap_pinfo
*pi
;
4135 int len
, next_tx_seq_offset
, req_seq_offset
;
4137 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4139 BT_DBG("unknown cid 0x%4.4x", cid
);
4145 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4147 if (sk
->sk_state
!= BT_CONNECTED
)
4151 case L2CAP_MODE_BASIC
:
4152 /* If socket recv buffers overflows we drop data here
4153 * which is *bad* because L2CAP has to be reliable.
4154 * But we don't have any other choice. L2CAP doesn't
4155 * provide flow control mechanism. */
4157 if (pi
->imtu
< skb
->len
)
4160 if (!sock_queue_rcv_skb(sk
, skb
))
4164 case L2CAP_MODE_ERTM
:
4165 control
= get_unaligned_le16(skb
->data
);
4169 if (__is_sar_start(control
) && __is_iframe(control
))
4172 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4176 * We can just drop the corrupted I-frame here.
4177 * Receiver will miss it and start proper recovery
4178 * procedures and ask retransmission.
4180 if (len
> pi
->mps
) {
4181 l2cap_send_disconn_req(pi
->conn
, sk
);
4185 if (l2cap_check_fcs(pi
, skb
))
4188 req_seq
= __get_reqseq(control
);
4189 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4190 if (req_seq_offset
< 0)
4191 req_seq_offset
+= 64;
4193 next_tx_seq_offset
=
4194 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4195 if (next_tx_seq_offset
< 0)
4196 next_tx_seq_offset
+= 64;
4198 /* check for invalid req-seq */
4199 if (req_seq_offset
> next_tx_seq_offset
) {
4200 l2cap_send_disconn_req(pi
->conn
, sk
);
4204 if (__is_iframe(control
)) {
4206 l2cap_send_disconn_req(pi
->conn
, sk
);
4210 l2cap_data_channel_iframe(sk
, control
, skb
);
4213 l2cap_send_disconn_req(pi
->conn
, sk
);
4217 l2cap_data_channel_sframe(sk
, control
, skb
);
4222 case L2CAP_MODE_STREAMING
:
4223 control
= get_unaligned_le16(skb
->data
);
4227 if (__is_sar_start(control
))
4230 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4233 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4236 if (l2cap_check_fcs(pi
, skb
))
4239 tx_seq
= __get_txseq(control
);
4241 if (pi
->expected_tx_seq
== tx_seq
)
4242 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4244 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4246 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4251 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4265 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4269 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4273 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4275 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4278 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4281 if (!sock_queue_rcv_skb(sk
, skb
))
4293 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4295 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4299 skb_pull(skb
, L2CAP_HDR_SIZE
);
4300 cid
= __le16_to_cpu(lh
->cid
);
4301 len
= __le16_to_cpu(lh
->len
);
4303 if (len
!= skb
->len
) {
4308 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4311 case L2CAP_CID_SIGNALING
:
4312 l2cap_sig_channel(conn
, skb
);
4315 case L2CAP_CID_CONN_LESS
:
4316 psm
= get_unaligned_le16(skb
->data
);
4318 l2cap_conless_channel(conn
, psm
, skb
);
4322 l2cap_data_channel(conn
, cid
, skb
);
4327 /* ---- L2CAP interface with lower layer (HCI) ---- */
4329 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4331 int exact
= 0, lm1
= 0, lm2
= 0;
4332 register struct sock
*sk
;
4333 struct hlist_node
*node
;
4335 if (type
!= ACL_LINK
)
4338 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4340 /* Find listening sockets and check their link_mode */
4341 read_lock(&l2cap_sk_list
.lock
);
4342 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4343 if (sk
->sk_state
!= BT_LISTEN
)
4346 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4347 lm1
|= HCI_LM_ACCEPT
;
4348 if (l2cap_pi(sk
)->role_switch
)
4349 lm1
|= HCI_LM_MASTER
;
4351 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4352 lm2
|= HCI_LM_ACCEPT
;
4353 if (l2cap_pi(sk
)->role_switch
)
4354 lm2
|= HCI_LM_MASTER
;
4357 read_unlock(&l2cap_sk_list
.lock
);
4359 return exact
? lm1
: lm2
;
4362 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4364 struct l2cap_conn
*conn
;
4366 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4368 if (hcon
->type
!= ACL_LINK
)
4372 conn
= l2cap_conn_add(hcon
, status
);
4374 l2cap_conn_ready(conn
);
4376 l2cap_conn_del(hcon
, bt_err(status
));
4381 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4383 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4385 BT_DBG("hcon %p", hcon
);
4387 if (hcon
->type
!= ACL_LINK
|| !conn
)
4390 return conn
->disc_reason
;
4393 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4395 BT_DBG("hcon %p reason %d", hcon
, reason
);
4397 if (hcon
->type
!= ACL_LINK
)
4400 l2cap_conn_del(hcon
, bt_err(reason
));
4405 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4407 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4410 if (encrypt
== 0x00) {
4411 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4412 l2cap_sock_clear_timer(sk
);
4413 l2cap_sock_set_timer(sk
, HZ
* 5);
4414 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4415 __l2cap_sock_close(sk
, ECONNREFUSED
);
4417 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4418 l2cap_sock_clear_timer(sk
);
4422 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4424 struct l2cap_chan_list
*l
;
4425 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4431 l
= &conn
->chan_list
;
4433 BT_DBG("conn %p", conn
);
4435 read_lock(&l
->lock
);
4437 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4440 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4445 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4446 sk
->sk_state
== BT_CONFIG
)) {
4447 l2cap_check_encryption(sk
, encrypt
);
4452 if (sk
->sk_state
== BT_CONNECT
) {
4454 struct l2cap_conn_req req
;
4455 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4456 req
.psm
= l2cap_pi(sk
)->psm
;
4458 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4459 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4461 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4462 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4464 l2cap_sock_clear_timer(sk
);
4465 l2cap_sock_set_timer(sk
, HZ
/ 10);
4467 } else if (sk
->sk_state
== BT_CONNECT2
) {
4468 struct l2cap_conn_rsp rsp
;
4472 sk
->sk_state
= BT_CONFIG
;
4473 result
= L2CAP_CR_SUCCESS
;
4475 sk
->sk_state
= BT_DISCONN
;
4476 l2cap_sock_set_timer(sk
, HZ
/ 10);
4477 result
= L2CAP_CR_SEC_BLOCK
;
4480 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4481 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4482 rsp
.result
= cpu_to_le16(result
);
4483 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4484 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4485 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4491 read_unlock(&l
->lock
);
4496 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4498 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4500 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4503 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4505 if (flags
& ACL_START
) {
4506 struct l2cap_hdr
*hdr
;
4510 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4511 kfree_skb(conn
->rx_skb
);
4512 conn
->rx_skb
= NULL
;
4514 l2cap_conn_unreliable(conn
, ECOMM
);
4518 BT_ERR("Frame is too short (len %d)", skb
->len
);
4519 l2cap_conn_unreliable(conn
, ECOMM
);
4523 hdr
= (struct l2cap_hdr
*) skb
->data
;
4524 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4526 if (len
== skb
->len
) {
4527 /* Complete frame received */
4528 l2cap_recv_frame(conn
, skb
);
4532 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4534 if (skb
->len
> len
) {
4535 BT_ERR("Frame is too long (len %d, expected len %d)",
4537 l2cap_conn_unreliable(conn
, ECOMM
);
4541 /* Allocate skb for the complete frame (with header) */
4542 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4546 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4548 conn
->rx_len
= len
- skb
->len
;
4550 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4552 if (!conn
->rx_len
) {
4553 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4554 l2cap_conn_unreliable(conn
, ECOMM
);
4558 if (skb
->len
> conn
->rx_len
) {
4559 BT_ERR("Fragment is too long (len %d, expected %d)",
4560 skb
->len
, conn
->rx_len
);
4561 kfree_skb(conn
->rx_skb
);
4562 conn
->rx_skb
= NULL
;
4564 l2cap_conn_unreliable(conn
, ECOMM
);
4568 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4570 conn
->rx_len
-= skb
->len
;
4572 if (!conn
->rx_len
) {
4573 /* Complete frame received */
4574 l2cap_recv_frame(conn
, conn
->rx_skb
);
4575 conn
->rx_skb
= NULL
;
4584 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4587 struct hlist_node
*node
;
4589 read_lock_bh(&l2cap_sk_list
.lock
);
4591 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4592 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4594 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4595 batostr(&bt_sk(sk
)->src
),
4596 batostr(&bt_sk(sk
)->dst
),
4597 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4599 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4602 read_unlock_bh(&l2cap_sk_list
.lock
);
4607 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4609 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4612 static const struct file_operations l2cap_debugfs_fops
= {
4613 .open
= l2cap_debugfs_open
,
4615 .llseek
= seq_lseek
,
4616 .release
= single_release
,
4619 static struct dentry
*l2cap_debugfs
;
4621 static const struct proto_ops l2cap_sock_ops
= {
4622 .family
= PF_BLUETOOTH
,
4623 .owner
= THIS_MODULE
,
4624 .release
= l2cap_sock_release
,
4625 .bind
= l2cap_sock_bind
,
4626 .connect
= l2cap_sock_connect
,
4627 .listen
= l2cap_sock_listen
,
4628 .accept
= l2cap_sock_accept
,
4629 .getname
= l2cap_sock_getname
,
4630 .sendmsg
= l2cap_sock_sendmsg
,
4631 .recvmsg
= l2cap_sock_recvmsg
,
4632 .poll
= bt_sock_poll
,
4633 .ioctl
= bt_sock_ioctl
,
4634 .mmap
= sock_no_mmap
,
4635 .socketpair
= sock_no_socketpair
,
4636 .shutdown
= l2cap_sock_shutdown
,
4637 .setsockopt
= l2cap_sock_setsockopt
,
4638 .getsockopt
= l2cap_sock_getsockopt
4641 static const struct net_proto_family l2cap_sock_family_ops
= {
4642 .family
= PF_BLUETOOTH
,
4643 .owner
= THIS_MODULE
,
4644 .create
= l2cap_sock_create
,
4647 static struct hci_proto l2cap_hci_proto
= {
4649 .id
= HCI_PROTO_L2CAP
,
4650 .connect_ind
= l2cap_connect_ind
,
4651 .connect_cfm
= l2cap_connect_cfm
,
4652 .disconn_ind
= l2cap_disconn_ind
,
4653 .disconn_cfm
= l2cap_disconn_cfm
,
4654 .security_cfm
= l2cap_security_cfm
,
4655 .recv_acldata
= l2cap_recv_acldata
4658 static int __init
l2cap_init(void)
4662 err
= proto_register(&l2cap_proto
, 0);
4666 _busy_wq
= create_singlethread_workqueue("l2cap");
4670 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4672 BT_ERR("L2CAP socket registration failed");
4676 err
= hci_register_proto(&l2cap_hci_proto
);
4678 BT_ERR("L2CAP protocol registration failed");
4679 bt_sock_unregister(BTPROTO_L2CAP
);
4684 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4685 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4687 BT_ERR("Failed to create L2CAP debug file");
4690 BT_INFO("L2CAP ver %s", VERSION
);
4691 BT_INFO("L2CAP socket layer initialized");
4696 proto_unregister(&l2cap_proto
);
4700 static void __exit
l2cap_exit(void)
4702 debugfs_remove(l2cap_debugfs
);
4704 flush_workqueue(_busy_wq
);
4705 destroy_workqueue(_busy_wq
);
4707 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4708 BT_ERR("L2CAP socket unregistration failed");
4710 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4711 BT_ERR("L2CAP protocol unregistration failed");
4713 proto_unregister(&l2cap_proto
);
4716 void l2cap_load(void)
4718 /* Dummy function to trigger automatic L2CAP module loading by
4719 * other modules that use L2CAP sockets but don't use any other
4720 * symbols from it. */
4722 EXPORT_SYMBOL(l2cap_load
);
4724 module_init(l2cap_init
);
4725 module_exit(l2cap_exit
);
4727 module_param(enable_ertm
, bool, 0644);
4728 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4730 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4731 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4732 MODULE_VERSION(VERSION
);
4733 MODULE_LICENSE("GPL");
4734 MODULE_ALIAS("bt-proto-0");