]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/l2cap_core.c
Merge tag 'gpio-for-linus' of git://git.secretlab.ca/git/linux-2.6
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
109
110 return c;
111 }
112
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 {
115 struct l2cap_chan *c;
116
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
119 return c;
120 }
121 return NULL;
122 }
123
124 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 {
126 struct l2cap_chan *c;
127
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_ident(conn, ident);
130 mutex_unlock(&conn->chan_lock);
131
132 return c;
133 }
134
135 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 {
137 struct l2cap_chan *c;
138
139 list_for_each_entry(c, &chan_list, global_l) {
140 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
141 return c;
142 }
143 return NULL;
144 }
145
146 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
147 {
148 int err;
149
150 write_lock(&chan_list_lock);
151
152 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
153 err = -EADDRINUSE;
154 goto done;
155 }
156
157 if (psm) {
158 chan->psm = psm;
159 chan->sport = psm;
160 err = 0;
161 } else {
162 u16 p;
163
164 err = -EINVAL;
165 for (p = 0x1001; p < 0x1100; p += 2)
166 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
167 chan->psm = cpu_to_le16(p);
168 chan->sport = cpu_to_le16(p);
169 err = 0;
170 break;
171 }
172 }
173
174 done:
175 write_unlock(&chan_list_lock);
176 return err;
177 }
178
179 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 {
181 write_lock(&chan_list_lock);
182
183 chan->scid = scid;
184
185 write_unlock(&chan_list_lock);
186
187 return 0;
188 }
189
190 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 {
192 u16 cid = L2CAP_CID_DYN_START;
193
194 for (; cid < L2CAP_CID_DYN_END; cid++) {
195 if (!__l2cap_get_chan_by_scid(conn, cid))
196 return cid;
197 }
198
199 return 0;
200 }
201
202 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 {
204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
205 state_to_string(state));
206
207 chan->state = state;
208 chan->ops->state_change(chan->data, state);
209 }
210
211 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 {
213 struct sock *sk = chan->sk;
214
215 lock_sock(sk);
216 __l2cap_state_change(chan, state);
217 release_sock(sk);
218 }
219
220 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 {
222 struct sock *sk = chan->sk;
223
224 sk->sk_err = err;
225 }
226
227 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 {
229 struct sock *sk = chan->sk;
230
231 lock_sock(sk);
232 __l2cap_chan_set_err(chan, err);
233 release_sock(sk);
234 }
235
236 static void l2cap_chan_timeout(struct work_struct *work)
237 {
238 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
239 chan_timer.work);
240 struct l2cap_conn *conn = chan->conn;
241 int reason;
242
243 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
244
245 mutex_lock(&conn->chan_lock);
246 l2cap_chan_lock(chan);
247
248 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
249 reason = ECONNREFUSED;
250 else if (chan->state == BT_CONNECT &&
251 chan->sec_level != BT_SECURITY_SDP)
252 reason = ECONNREFUSED;
253 else
254 reason = ETIMEDOUT;
255
256 l2cap_chan_close(chan, reason);
257
258 l2cap_chan_unlock(chan);
259
260 chan->ops->close(chan->data);
261 mutex_unlock(&conn->chan_lock);
262
263 l2cap_chan_put(chan);
264 }
265
266 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
267 {
268 struct l2cap_chan *chan;
269
270 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
271 if (!chan)
272 return NULL;
273
274 mutex_init(&chan->lock);
275
276 chan->sk = sk;
277
278 write_lock(&chan_list_lock);
279 list_add(&chan->global_l, &chan_list);
280 write_unlock(&chan_list_lock);
281
282 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
283
284 chan->state = BT_OPEN;
285
286 atomic_set(&chan->refcnt, 1);
287
288 BT_DBG("sk %p chan %p", sk, chan);
289
290 return chan;
291 }
292
293 void l2cap_chan_destroy(struct l2cap_chan *chan)
294 {
295 write_lock(&chan_list_lock);
296 list_del(&chan->global_l);
297 write_unlock(&chan_list_lock);
298
299 l2cap_chan_put(chan);
300 }
301
302 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
303 {
304 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
305 chan->psm, chan->dcid);
306
307 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
308
309 chan->conn = conn;
310
311 switch (chan->chan_type) {
312 case L2CAP_CHAN_CONN_ORIENTED:
313 if (conn->hcon->type == LE_LINK) {
314 /* LE connection */
315 chan->omtu = L2CAP_LE_DEFAULT_MTU;
316 chan->scid = L2CAP_CID_LE_DATA;
317 chan->dcid = L2CAP_CID_LE_DATA;
318 } else {
319 /* Alloc CID for connection-oriented socket */
320 chan->scid = l2cap_alloc_cid(conn);
321 chan->omtu = L2CAP_DEFAULT_MTU;
322 }
323 break;
324
325 case L2CAP_CHAN_CONN_LESS:
326 /* Connectionless socket */
327 chan->scid = L2CAP_CID_CONN_LESS;
328 chan->dcid = L2CAP_CID_CONN_LESS;
329 chan->omtu = L2CAP_DEFAULT_MTU;
330 break;
331
332 default:
333 /* Raw socket can send/recv signalling messages only */
334 chan->scid = L2CAP_CID_SIGNALING;
335 chan->dcid = L2CAP_CID_SIGNALING;
336 chan->omtu = L2CAP_DEFAULT_MTU;
337 }
338
339 chan->local_id = L2CAP_BESTEFFORT_ID;
340 chan->local_stype = L2CAP_SERV_BESTEFFORT;
341 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
342 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
343 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
344 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
345
346 l2cap_chan_hold(chan);
347
348 list_add(&chan->list, &conn->chan_l);
349 }
350
351 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
352 {
353 mutex_lock(&conn->chan_lock);
354 __l2cap_chan_add(conn, chan);
355 mutex_unlock(&conn->chan_lock);
356 }
357
358 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 {
360 struct sock *sk = chan->sk;
361 struct l2cap_conn *conn = chan->conn;
362 struct sock *parent = bt_sk(sk)->parent;
363
364 __clear_chan_timer(chan);
365
366 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
367
368 if (conn) {
369 /* Delete from channel list */
370 list_del(&chan->list);
371
372 l2cap_chan_put(chan);
373
374 chan->conn = NULL;
375 hci_conn_put(conn->hcon);
376 }
377
378 lock_sock(sk);
379
380 __l2cap_state_change(chan, BT_CLOSED);
381 sock_set_flag(sk, SOCK_ZAPPED);
382
383 if (err)
384 __l2cap_chan_set_err(chan, err);
385
386 if (parent) {
387 bt_accept_unlink(sk);
388 parent->sk_data_ready(parent, 0);
389 } else
390 sk->sk_state_change(sk);
391
392 release_sock(sk);
393
394 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
395 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
396 return;
397
398 skb_queue_purge(&chan->tx_q);
399
400 if (chan->mode == L2CAP_MODE_ERTM) {
401 struct srej_list *l, *tmp;
402
403 __clear_retrans_timer(chan);
404 __clear_monitor_timer(chan);
405 __clear_ack_timer(chan);
406
407 skb_queue_purge(&chan->srej_q);
408
409 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 list_del(&l->list);
411 kfree(l);
412 }
413 }
414 }
415
416 static void l2cap_chan_cleanup_listen(struct sock *parent)
417 {
418 struct sock *sk;
419
420 BT_DBG("parent %p", parent);
421
422 /* Close not yet accepted channels */
423 while ((sk = bt_accept_dequeue(parent, NULL))) {
424 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425
426 l2cap_chan_lock(chan);
427 __clear_chan_timer(chan);
428 l2cap_chan_close(chan, ECONNRESET);
429 l2cap_chan_unlock(chan);
430
431 chan->ops->close(chan->data);
432 }
433 }
434
435 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 {
437 struct l2cap_conn *conn = chan->conn;
438 struct sock *sk = chan->sk;
439
440 BT_DBG("chan %p state %s sk %p", chan,
441 state_to_string(chan->state), sk);
442
443 switch (chan->state) {
444 case BT_LISTEN:
445 lock_sock(sk);
446 l2cap_chan_cleanup_listen(sk);
447
448 __l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
450 release_sock(sk);
451 break;
452
453 case BT_CONNECTED:
454 case BT_CONFIG:
455 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
456 conn->hcon->type == ACL_LINK) {
457 __clear_chan_timer(chan);
458 __set_chan_timer(chan, sk->sk_sndtimeo);
459 l2cap_send_disconn_req(conn, chan, reason);
460 } else
461 l2cap_chan_del(chan, reason);
462 break;
463
464 case BT_CONNECT2:
465 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
466 conn->hcon->type == ACL_LINK) {
467 struct l2cap_conn_rsp rsp;
468 __u16 result;
469
470 if (bt_sk(sk)->defer_setup)
471 result = L2CAP_CR_SEC_BLOCK;
472 else
473 result = L2CAP_CR_BAD_PSM;
474 l2cap_state_change(chan, BT_DISCONN);
475
476 rsp.scid = cpu_to_le16(chan->dcid);
477 rsp.dcid = cpu_to_le16(chan->scid);
478 rsp.result = cpu_to_le16(result);
479 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
480 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
481 sizeof(rsp), &rsp);
482 }
483
484 l2cap_chan_del(chan, reason);
485 break;
486
487 case BT_CONNECT:
488 case BT_DISCONN:
489 l2cap_chan_del(chan, reason);
490 break;
491
492 default:
493 lock_sock(sk);
494 sock_set_flag(sk, SOCK_ZAPPED);
495 release_sock(sk);
496 break;
497 }
498 }
499
500 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
501 {
502 if (chan->chan_type == L2CAP_CHAN_RAW) {
503 switch (chan->sec_level) {
504 case BT_SECURITY_HIGH:
505 return HCI_AT_DEDICATED_BONDING_MITM;
506 case BT_SECURITY_MEDIUM:
507 return HCI_AT_DEDICATED_BONDING;
508 default:
509 return HCI_AT_NO_BONDING;
510 }
511 } else if (chan->psm == cpu_to_le16(0x0001)) {
512 if (chan->sec_level == BT_SECURITY_LOW)
513 chan->sec_level = BT_SECURITY_SDP;
514
515 if (chan->sec_level == BT_SECURITY_HIGH)
516 return HCI_AT_NO_BONDING_MITM;
517 else
518 return HCI_AT_NO_BONDING;
519 } else {
520 switch (chan->sec_level) {
521 case BT_SECURITY_HIGH:
522 return HCI_AT_GENERAL_BONDING_MITM;
523 case BT_SECURITY_MEDIUM:
524 return HCI_AT_GENERAL_BONDING;
525 default:
526 return HCI_AT_NO_BONDING;
527 }
528 }
529 }
530
531 /* Service level security */
532 int l2cap_chan_check_security(struct l2cap_chan *chan)
533 {
534 struct l2cap_conn *conn = chan->conn;
535 __u8 auth_type;
536
537 auth_type = l2cap_get_auth_type(chan);
538
539 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
540 }
541
542 static u8 l2cap_get_ident(struct l2cap_conn *conn)
543 {
544 u8 id;
545
546 /* Get next available identificator.
547 * 1 - 128 are used by kernel.
548 * 129 - 199 are reserved.
549 * 200 - 254 are used by utilities like l2ping, etc.
550 */
551
552 spin_lock(&conn->lock);
553
554 if (++conn->tx_ident > 128)
555 conn->tx_ident = 1;
556
557 id = conn->tx_ident;
558
559 spin_unlock(&conn->lock);
560
561 return id;
562 }
563
564 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
565 {
566 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
567 u8 flags;
568
569 BT_DBG("code 0x%2.2x", code);
570
571 if (!skb)
572 return;
573
574 if (lmp_no_flush_capable(conn->hcon->hdev))
575 flags = ACL_START_NO_FLUSH;
576 else
577 flags = ACL_START;
578
579 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
580 skb->priority = HCI_PRIO_MAX;
581
582 hci_send_acl(conn->hchan, skb, flags);
583 }
584
585 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
586 {
587 struct hci_conn *hcon = chan->conn->hcon;
588 u16 flags;
589
590 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
591 skb->priority);
592
593 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
594 lmp_no_flush_capable(hcon->hdev))
595 flags = ACL_START_NO_FLUSH;
596 else
597 flags = ACL_START;
598
599 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
600 hci_send_acl(chan->conn->hchan, skb, flags);
601 }
602
603 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
604 {
605 struct sk_buff *skb;
606 struct l2cap_hdr *lh;
607 struct l2cap_conn *conn = chan->conn;
608 int count, hlen;
609
610 if (chan->state != BT_CONNECTED)
611 return;
612
613 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
614 hlen = L2CAP_EXT_HDR_SIZE;
615 else
616 hlen = L2CAP_ENH_HDR_SIZE;
617
618 if (chan->fcs == L2CAP_FCS_CRC16)
619 hlen += L2CAP_FCS_SIZE;
620
621 BT_DBG("chan %p, control 0x%8.8x", chan, control);
622
623 count = min_t(unsigned int, conn->mtu, hlen);
624
625 control |= __set_sframe(chan);
626
627 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
628 control |= __set_ctrl_final(chan);
629
630 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
631 control |= __set_ctrl_poll(chan);
632
633 skb = bt_skb_alloc(count, GFP_ATOMIC);
634 if (!skb)
635 return;
636
637 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
638 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
639 lh->cid = cpu_to_le16(chan->dcid);
640
641 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
642
643 if (chan->fcs == L2CAP_FCS_CRC16) {
644 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
645 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
646 }
647
648 skb->priority = HCI_PRIO_MAX;
649 l2cap_do_send(chan, skb);
650 }
651
652 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
653 {
654 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
656 set_bit(CONN_RNR_SENT, &chan->conn_state);
657 } else
658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
659
660 control |= __set_reqseq(chan, chan->buffer_seq);
661
662 l2cap_send_sframe(chan, control);
663 }
664
665 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
666 {
667 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
668 }
669
670 static void l2cap_send_conn_req(struct l2cap_chan *chan)
671 {
672 struct l2cap_conn *conn = chan->conn;
673 struct l2cap_conn_req req;
674
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679
680 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681
682 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
683 }
684
685 static void l2cap_do_start(struct l2cap_chan *chan)
686 {
687 struct l2cap_conn *conn = chan->conn;
688
689 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
690 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
691 return;
692
693 if (l2cap_chan_check_security(chan) &&
694 __l2cap_no_conn_pending(chan))
695 l2cap_send_conn_req(chan);
696 } else {
697 struct l2cap_info_req req;
698 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
699
700 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
701 conn->info_ident = l2cap_get_ident(conn);
702
703 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
704
705 l2cap_send_cmd(conn, conn->info_ident,
706 L2CAP_INFO_REQ, sizeof(req), &req);
707 }
708 }
709
710 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 {
712 u32 local_feat_mask = l2cap_feat_mask;
713 if (!disable_ertm)
714 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
715
716 switch (mode) {
717 case L2CAP_MODE_ERTM:
718 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
719 case L2CAP_MODE_STREAMING:
720 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
721 default:
722 return 0x00;
723 }
724 }
725
726 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
727 {
728 struct sock *sk = chan->sk;
729 struct l2cap_disconn_req req;
730
731 if (!conn)
732 return;
733
734 if (chan->mode == L2CAP_MODE_ERTM) {
735 __clear_retrans_timer(chan);
736 __clear_monitor_timer(chan);
737 __clear_ack_timer(chan);
738 }
739
740 req.dcid = cpu_to_le16(chan->dcid);
741 req.scid = cpu_to_le16(chan->scid);
742 l2cap_send_cmd(conn, l2cap_get_ident(conn),
743 L2CAP_DISCONN_REQ, sizeof(req), &req);
744
745 lock_sock(sk);
746 __l2cap_state_change(chan, BT_DISCONN);
747 __l2cap_chan_set_err(chan, err);
748 release_sock(sk);
749 }
750
751 /* ---- L2CAP connections ---- */
752 static void l2cap_conn_start(struct l2cap_conn *conn)
753 {
754 struct l2cap_chan *chan, *tmp;
755
756 BT_DBG("conn %p", conn);
757
758 mutex_lock(&conn->chan_lock);
759
760 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
761 struct sock *sk = chan->sk;
762
763 l2cap_chan_lock(chan);
764
765 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
766 l2cap_chan_unlock(chan);
767 continue;
768 }
769
770 if (chan->state == BT_CONNECT) {
771 if (!l2cap_chan_check_security(chan) ||
772 !__l2cap_no_conn_pending(chan)) {
773 l2cap_chan_unlock(chan);
774 continue;
775 }
776
777 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
778 && test_bit(CONF_STATE2_DEVICE,
779 &chan->conf_state)) {
780 l2cap_chan_close(chan, ECONNRESET);
781 l2cap_chan_unlock(chan);
782 continue;
783 }
784
785 l2cap_send_conn_req(chan);
786
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
789 char buf[128];
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
792
793 if (l2cap_chan_check_security(chan)) {
794 lock_sock(sk);
795 if (bt_sk(sk)->defer_setup) {
796 struct sock *parent = bt_sk(sk)->parent;
797 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
798 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
799 if (parent)
800 parent->sk_data_ready(parent, 0);
801
802 } else {
803 __l2cap_state_change(chan, BT_CONFIG);
804 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
805 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
806 }
807 release_sock(sk);
808 } else {
809 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
810 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
811 }
812
813 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
814 sizeof(rsp), &rsp);
815
816 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
817 rsp.result != L2CAP_CR_SUCCESS) {
818 l2cap_chan_unlock(chan);
819 continue;
820 }
821
822 set_bit(CONF_REQ_SENT, &chan->conf_state);
823 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
824 l2cap_build_conf_req(chan, buf), buf);
825 chan->num_conf_req++;
826 }
827
828 l2cap_chan_unlock(chan);
829 }
830
831 mutex_unlock(&conn->chan_lock);
832 }
833
834 /* Find socket with cid and source bdaddr.
835 * Returns closest match, locked.
836 */
837 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
838 {
839 struct l2cap_chan *c, *c1 = NULL;
840
841 read_lock(&chan_list_lock);
842
843 list_for_each_entry(c, &chan_list, global_l) {
844 struct sock *sk = c->sk;
845
846 if (state && c->state != state)
847 continue;
848
849 if (c->scid == cid) {
850 /* Exact match. */
851 if (!bacmp(&bt_sk(sk)->src, src)) {
852 read_unlock(&chan_list_lock);
853 return c;
854 }
855
856 /* Closest match */
857 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
858 c1 = c;
859 }
860 }
861
862 read_unlock(&chan_list_lock);
863
864 return c1;
865 }
866
867 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
868 {
869 struct sock *parent, *sk;
870 struct l2cap_chan *chan, *pchan;
871
872 BT_DBG("");
873
874 /* Check if we have socket listening on cid */
875 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
876 conn->src);
877 if (!pchan)
878 return;
879
880 parent = pchan->sk;
881
882 lock_sock(parent);
883
884 /* Check for backlog size */
885 if (sk_acceptq_is_full(parent)) {
886 BT_DBG("backlog full %d", parent->sk_ack_backlog);
887 goto clean;
888 }
889
890 chan = pchan->ops->new_connection(pchan->data);
891 if (!chan)
892 goto clean;
893
894 sk = chan->sk;
895
896 hci_conn_hold(conn->hcon);
897
898 bacpy(&bt_sk(sk)->src, conn->src);
899 bacpy(&bt_sk(sk)->dst, conn->dst);
900
901 bt_accept_enqueue(parent, sk);
902
903 l2cap_chan_add(conn, chan);
904
905 __set_chan_timer(chan, sk->sk_sndtimeo);
906
907 __l2cap_state_change(chan, BT_CONNECTED);
908 parent->sk_data_ready(parent, 0);
909
910 clean:
911 release_sock(parent);
912 }
913
914 static void l2cap_chan_ready(struct l2cap_chan *chan)
915 {
916 struct sock *sk = chan->sk;
917 struct sock *parent;
918
919 lock_sock(sk);
920
921 parent = bt_sk(sk)->parent;
922
923 BT_DBG("sk %p, parent %p", sk, parent);
924
925 chan->conf_state = 0;
926 __clear_chan_timer(chan);
927
928 __l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
930
931 if (parent)
932 parent->sk_data_ready(parent, 0);
933
934 release_sock(sk);
935 }
936
937 static void l2cap_conn_ready(struct l2cap_conn *conn)
938 {
939 struct l2cap_chan *chan;
940
941 BT_DBG("conn %p", conn);
942
943 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
944 l2cap_le_conn_ready(conn);
945
946 if (conn->hcon->out && conn->hcon->type == LE_LINK)
947 smp_conn_security(conn, conn->hcon->pending_sec_level);
948
949 mutex_lock(&conn->chan_lock);
950
951 list_for_each_entry(chan, &conn->chan_l, list) {
952
953 l2cap_chan_lock(chan);
954
955 if (conn->hcon->type == LE_LINK) {
956 if (smp_conn_security(conn, chan->sec_level))
957 l2cap_chan_ready(chan);
958
959 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
960 struct sock *sk = chan->sk;
961 __clear_chan_timer(chan);
962 lock_sock(sk);
963 __l2cap_state_change(chan, BT_CONNECTED);
964 sk->sk_state_change(sk);
965 release_sock(sk);
966
967 } else if (chan->state == BT_CONNECT)
968 l2cap_do_start(chan);
969
970 l2cap_chan_unlock(chan);
971 }
972
973 mutex_unlock(&conn->chan_lock);
974 }
975
976 /* Notify sockets that we cannot guaranty reliability anymore */
977 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
978 {
979 struct l2cap_chan *chan;
980
981 BT_DBG("conn %p", conn);
982
983 mutex_lock(&conn->chan_lock);
984
985 list_for_each_entry(chan, &conn->chan_l, list) {
986 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
987 __l2cap_chan_set_err(chan, err);
988 }
989
990 mutex_unlock(&conn->chan_lock);
991 }
992
993 static void l2cap_info_timeout(struct work_struct *work)
994 {
995 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
996 info_timer.work);
997
998 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
999 conn->info_ident = 0;
1000
1001 l2cap_conn_start(conn);
1002 }
1003
1004 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1005 {
1006 struct l2cap_conn *conn = hcon->l2cap_data;
1007 struct l2cap_chan *chan, *l;
1008
1009 if (!conn)
1010 return;
1011
1012 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1013
1014 kfree_skb(conn->rx_skb);
1015
1016 mutex_lock(&conn->chan_lock);
1017
1018 /* Kill channels */
1019 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1020 l2cap_chan_lock(chan);
1021
1022 l2cap_chan_del(chan, err);
1023
1024 l2cap_chan_unlock(chan);
1025
1026 chan->ops->close(chan->data);
1027 }
1028
1029 mutex_unlock(&conn->chan_lock);
1030
1031 hci_chan_del(conn->hchan);
1032
1033 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1034 cancel_delayed_work_sync(&conn->info_timer);
1035
1036 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1037 cancel_delayed_work_sync(&conn->security_timer);
1038 smp_chan_destroy(conn);
1039 }
1040
1041 hcon->l2cap_data = NULL;
1042 kfree(conn);
1043 }
1044
1045 static void security_timeout(struct work_struct *work)
1046 {
1047 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1048 security_timer.work);
1049
1050 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1051 }
1052
1053 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1054 {
1055 struct l2cap_conn *conn = hcon->l2cap_data;
1056 struct hci_chan *hchan;
1057
1058 if (conn || status)
1059 return conn;
1060
1061 hchan = hci_chan_create(hcon);
1062 if (!hchan)
1063 return NULL;
1064
1065 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1066 if (!conn) {
1067 hci_chan_del(hchan);
1068 return NULL;
1069 }
1070
1071 hcon->l2cap_data = conn;
1072 conn->hcon = hcon;
1073 conn->hchan = hchan;
1074
1075 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1076
1077 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1078 conn->mtu = hcon->hdev->le_mtu;
1079 else
1080 conn->mtu = hcon->hdev->acl_mtu;
1081
1082 conn->src = &hcon->hdev->bdaddr;
1083 conn->dst = &hcon->dst;
1084
1085 conn->feat_mask = 0;
1086
1087 spin_lock_init(&conn->lock);
1088 mutex_init(&conn->chan_lock);
1089
1090 INIT_LIST_HEAD(&conn->chan_l);
1091
1092 if (hcon->type == LE_LINK)
1093 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1094 else
1095 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1096
1097 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1098
1099 return conn;
1100 }
1101
1102 /* ---- Socket interface ---- */
1103
1104 /* Find socket with psm and source bdaddr.
1105 * Returns closest match.
1106 */
1107 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1108 {
1109 struct l2cap_chan *c, *c1 = NULL;
1110
1111 read_lock(&chan_list_lock);
1112
1113 list_for_each_entry(c, &chan_list, global_l) {
1114 struct sock *sk = c->sk;
1115
1116 if (state && c->state != state)
1117 continue;
1118
1119 if (c->psm == psm) {
1120 /* Exact match. */
1121 if (!bacmp(&bt_sk(sk)->src, src)) {
1122 read_unlock(&chan_list_lock);
1123 return c;
1124 }
1125
1126 /* Closest match */
1127 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1128 c1 = c;
1129 }
1130 }
1131
1132 read_unlock(&chan_list_lock);
1133
1134 return c1;
1135 }
1136
1137 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1138 {
1139 struct sock *sk = chan->sk;
1140 bdaddr_t *src = &bt_sk(sk)->src;
1141 struct l2cap_conn *conn;
1142 struct hci_conn *hcon;
1143 struct hci_dev *hdev;
1144 __u8 auth_type;
1145 int err;
1146
1147 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1148 chan->psm);
1149
1150 hdev = hci_get_route(dst, src);
1151 if (!hdev)
1152 return -EHOSTUNREACH;
1153
1154 hci_dev_lock(hdev);
1155
1156 l2cap_chan_lock(chan);
1157
1158 /* PSM must be odd and lsb of upper byte must be 0 */
1159 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1160 chan->chan_type != L2CAP_CHAN_RAW) {
1161 err = -EINVAL;
1162 goto done;
1163 }
1164
1165 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1166 err = -EINVAL;
1167 goto done;
1168 }
1169
1170 switch (chan->mode) {
1171 case L2CAP_MODE_BASIC:
1172 break;
1173 case L2CAP_MODE_ERTM:
1174 case L2CAP_MODE_STREAMING:
1175 if (!disable_ertm)
1176 break;
1177 /* fall through */
1178 default:
1179 err = -ENOTSUPP;
1180 goto done;
1181 }
1182
1183 lock_sock(sk);
1184
1185 switch (sk->sk_state) {
1186 case BT_CONNECT:
1187 case BT_CONNECT2:
1188 case BT_CONFIG:
1189 /* Already connecting */
1190 err = 0;
1191 release_sock(sk);
1192 goto done;
1193
1194 case BT_CONNECTED:
1195 /* Already connected */
1196 err = -EISCONN;
1197 release_sock(sk);
1198 goto done;
1199
1200 case BT_OPEN:
1201 case BT_BOUND:
1202 /* Can connect */
1203 break;
1204
1205 default:
1206 err = -EBADFD;
1207 release_sock(sk);
1208 goto done;
1209 }
1210
1211 /* Set destination address and psm */
1212 bacpy(&bt_sk(sk)->dst, dst);
1213
1214 release_sock(sk);
1215
1216 chan->psm = psm;
1217 chan->dcid = cid;
1218
1219 auth_type = l2cap_get_auth_type(chan);
1220
1221 if (chan->dcid == L2CAP_CID_LE_DATA)
1222 hcon = hci_connect(hdev, LE_LINK, dst,
1223 chan->sec_level, auth_type);
1224 else
1225 hcon = hci_connect(hdev, ACL_LINK, dst,
1226 chan->sec_level, auth_type);
1227
1228 if (IS_ERR(hcon)) {
1229 err = PTR_ERR(hcon);
1230 goto done;
1231 }
1232
1233 conn = l2cap_conn_add(hcon, 0);
1234 if (!conn) {
1235 hci_conn_put(hcon);
1236 err = -ENOMEM;
1237 goto done;
1238 }
1239
1240 /* Update source addr of the socket */
1241 bacpy(src, conn->src);
1242
1243 l2cap_chan_unlock(chan);
1244 l2cap_chan_add(conn, chan);
1245 l2cap_chan_lock(chan);
1246
1247 l2cap_state_change(chan, BT_CONNECT);
1248 __set_chan_timer(chan, sk->sk_sndtimeo);
1249
1250 if (hcon->state == BT_CONNECTED) {
1251 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1252 __clear_chan_timer(chan);
1253 if (l2cap_chan_check_security(chan))
1254 l2cap_state_change(chan, BT_CONNECTED);
1255 } else
1256 l2cap_do_start(chan);
1257 }
1258
1259 err = 0;
1260
1261 done:
1262 l2cap_chan_unlock(chan);
1263 hci_dev_unlock(hdev);
1264 hci_dev_put(hdev);
1265 return err;
1266 }
1267
1268 int __l2cap_wait_ack(struct sock *sk)
1269 {
1270 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1271 DECLARE_WAITQUEUE(wait, current);
1272 int err = 0;
1273 int timeo = HZ/5;
1274
1275 add_wait_queue(sk_sleep(sk), &wait);
1276 set_current_state(TASK_INTERRUPTIBLE);
1277 while (chan->unacked_frames > 0 && chan->conn) {
1278 if (!timeo)
1279 timeo = HZ/5;
1280
1281 if (signal_pending(current)) {
1282 err = sock_intr_errno(timeo);
1283 break;
1284 }
1285
1286 release_sock(sk);
1287 timeo = schedule_timeout(timeo);
1288 lock_sock(sk);
1289 set_current_state(TASK_INTERRUPTIBLE);
1290
1291 err = sock_error(sk);
1292 if (err)
1293 break;
1294 }
1295 set_current_state(TASK_RUNNING);
1296 remove_wait_queue(sk_sleep(sk), &wait);
1297 return err;
1298 }
1299
1300 static void l2cap_monitor_timeout(struct work_struct *work)
1301 {
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 monitor_timer.work);
1304
1305 BT_DBG("chan %p", chan);
1306
1307 l2cap_chan_lock(chan);
1308
1309 if (chan->retry_count >= chan->remote_max_tx) {
1310 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1311 l2cap_chan_unlock(chan);
1312 return;
1313 }
1314
1315 chan->retry_count++;
1316 __set_monitor_timer(chan);
1317
1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1319 l2cap_chan_unlock(chan);
1320 }
1321
1322 static void l2cap_retrans_timeout(struct work_struct *work)
1323 {
1324 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1325 retrans_timer.work);
1326
1327 BT_DBG("chan %p", chan);
1328
1329 l2cap_chan_lock(chan);
1330
1331 chan->retry_count = 1;
1332 __set_monitor_timer(chan);
1333
1334 set_bit(CONN_WAIT_F, &chan->conn_state);
1335
1336 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1337
1338 l2cap_chan_unlock(chan);
1339 }
1340
1341 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1342 {
1343 struct sk_buff *skb;
1344
1345 while ((skb = skb_peek(&chan->tx_q)) &&
1346 chan->unacked_frames) {
1347 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1348 break;
1349
1350 skb = skb_dequeue(&chan->tx_q);
1351 kfree_skb(skb);
1352
1353 chan->unacked_frames--;
1354 }
1355
1356 if (!chan->unacked_frames)
1357 __clear_retrans_timer(chan);
1358 }
1359
1360 static void l2cap_streaming_send(struct l2cap_chan *chan)
1361 {
1362 struct sk_buff *skb;
1363 u32 control;
1364 u16 fcs;
1365
1366 while ((skb = skb_dequeue(&chan->tx_q))) {
1367 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1368 control |= __set_txseq(chan, chan->next_tx_seq);
1369 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1370
1371 if (chan->fcs == L2CAP_FCS_CRC16) {
1372 fcs = crc16(0, (u8 *)skb->data,
1373 skb->len - L2CAP_FCS_SIZE);
1374 put_unaligned_le16(fcs,
1375 skb->data + skb->len - L2CAP_FCS_SIZE);
1376 }
1377
1378 l2cap_do_send(chan, skb);
1379
1380 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1381 }
1382 }
1383
1384 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1385 {
1386 struct sk_buff *skb, *tx_skb;
1387 u16 fcs;
1388 u32 control;
1389
1390 skb = skb_peek(&chan->tx_q);
1391 if (!skb)
1392 return;
1393
1394 while (bt_cb(skb)->tx_seq != tx_seq) {
1395 if (skb_queue_is_last(&chan->tx_q, skb))
1396 return;
1397
1398 skb = skb_queue_next(&chan->tx_q, skb);
1399 }
1400
1401 if (chan->remote_max_tx &&
1402 bt_cb(skb)->retries == chan->remote_max_tx) {
1403 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1404 return;
1405 }
1406
1407 tx_skb = skb_clone(skb, GFP_ATOMIC);
1408 bt_cb(skb)->retries++;
1409
1410 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1411 control &= __get_sar_mask(chan);
1412
1413 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1414 control |= __set_ctrl_final(chan);
1415
1416 control |= __set_reqseq(chan, chan->buffer_seq);
1417 control |= __set_txseq(chan, tx_seq);
1418
1419 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1420
1421 if (chan->fcs == L2CAP_FCS_CRC16) {
1422 fcs = crc16(0, (u8 *)tx_skb->data,
1423 tx_skb->len - L2CAP_FCS_SIZE);
1424 put_unaligned_le16(fcs,
1425 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1426 }
1427
1428 l2cap_do_send(chan, tx_skb);
1429 }
1430
1431 static int l2cap_ertm_send(struct l2cap_chan *chan)
1432 {
1433 struct sk_buff *skb, *tx_skb;
1434 u16 fcs;
1435 u32 control;
1436 int nsent = 0;
1437
1438 if (chan->state != BT_CONNECTED)
1439 return -ENOTCONN;
1440
1441 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1442
1443 if (chan->remote_max_tx &&
1444 bt_cb(skb)->retries == chan->remote_max_tx) {
1445 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1446 break;
1447 }
1448
1449 tx_skb = skb_clone(skb, GFP_ATOMIC);
1450
1451 bt_cb(skb)->retries++;
1452
1453 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1454 control &= __get_sar_mask(chan);
1455
1456 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1457 control |= __set_ctrl_final(chan);
1458
1459 control |= __set_reqseq(chan, chan->buffer_seq);
1460 control |= __set_txseq(chan, chan->next_tx_seq);
1461
1462 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1463
1464 if (chan->fcs == L2CAP_FCS_CRC16) {
1465 fcs = crc16(0, (u8 *)skb->data,
1466 tx_skb->len - L2CAP_FCS_SIZE);
1467 put_unaligned_le16(fcs, skb->data +
1468 tx_skb->len - L2CAP_FCS_SIZE);
1469 }
1470
1471 l2cap_do_send(chan, tx_skb);
1472
1473 __set_retrans_timer(chan);
1474
1475 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1476
1477 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1478
1479 if (bt_cb(skb)->retries == 1) {
1480 chan->unacked_frames++;
1481
1482 if (!nsent++)
1483 __clear_ack_timer(chan);
1484 }
1485
1486 chan->frames_sent++;
1487
1488 if (skb_queue_is_last(&chan->tx_q, skb))
1489 chan->tx_send_head = NULL;
1490 else
1491 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1492 }
1493
1494 return nsent;
1495 }
1496
1497 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1498 {
1499 int ret;
1500
1501 if (!skb_queue_empty(&chan->tx_q))
1502 chan->tx_send_head = chan->tx_q.next;
1503
1504 chan->next_tx_seq = chan->expected_ack_seq;
1505 ret = l2cap_ertm_send(chan);
1506 return ret;
1507 }
1508
1509 static void __l2cap_send_ack(struct l2cap_chan *chan)
1510 {
1511 u32 control = 0;
1512
1513 control |= __set_reqseq(chan, chan->buffer_seq);
1514
1515 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1516 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1517 set_bit(CONN_RNR_SENT, &chan->conn_state);
1518 l2cap_send_sframe(chan, control);
1519 return;
1520 }
1521
1522 if (l2cap_ertm_send(chan) > 0)
1523 return;
1524
1525 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1526 l2cap_send_sframe(chan, control);
1527 }
1528
1529 static void l2cap_send_ack(struct l2cap_chan *chan)
1530 {
1531 __clear_ack_timer(chan);
1532 __l2cap_send_ack(chan);
1533 }
1534
1535 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1536 {
1537 struct srej_list *tail;
1538 u32 control;
1539
1540 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1541 control |= __set_ctrl_final(chan);
1542
1543 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1544 control |= __set_reqseq(chan, tail->tx_seq);
1545
1546 l2cap_send_sframe(chan, control);
1547 }
1548
1549 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1550 struct msghdr *msg, int len,
1551 int count, struct sk_buff *skb)
1552 {
1553 struct l2cap_conn *conn = chan->conn;
1554 struct sk_buff **frag;
1555 int err, sent = 0;
1556
1557 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1558 return -EFAULT;
1559
1560 sent += count;
1561 len -= count;
1562
1563 /* Continuation fragments (no L2CAP header) */
1564 frag = &skb_shinfo(skb)->frag_list;
1565 while (len) {
1566 count = min_t(unsigned int, conn->mtu, len);
1567
1568 *frag = chan->ops->alloc_skb(chan, count,
1569 msg->msg_flags & MSG_DONTWAIT,
1570 &err);
1571
1572 if (!*frag)
1573 return err;
1574 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1575 return -EFAULT;
1576
1577 (*frag)->priority = skb->priority;
1578
1579 sent += count;
1580 len -= count;
1581
1582 frag = &(*frag)->next;
1583 }
1584
1585 return sent;
1586 }
1587
1588 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1589 struct msghdr *msg, size_t len,
1590 u32 priority)
1591 {
1592 struct l2cap_conn *conn = chan->conn;
1593 struct sk_buff *skb;
1594 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1595 struct l2cap_hdr *lh;
1596
1597 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1598
1599 count = min_t(unsigned int, (conn->mtu - hlen), len);
1600
1601 skb = chan->ops->alloc_skb(chan, count + hlen,
1602 msg->msg_flags & MSG_DONTWAIT, &err);
1603
1604 if (!skb)
1605 return ERR_PTR(err);
1606
1607 skb->priority = priority;
1608
1609 /* Create L2CAP header */
1610 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1611 lh->cid = cpu_to_le16(chan->dcid);
1612 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1613 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1614
1615 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1616 if (unlikely(err < 0)) {
1617 kfree_skb(skb);
1618 return ERR_PTR(err);
1619 }
1620 return skb;
1621 }
1622
1623 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1624 struct msghdr *msg, size_t len,
1625 u32 priority)
1626 {
1627 struct l2cap_conn *conn = chan->conn;
1628 struct sk_buff *skb;
1629 int err, count, hlen = L2CAP_HDR_SIZE;
1630 struct l2cap_hdr *lh;
1631
1632 BT_DBG("chan %p len %d", chan, (int)len);
1633
1634 count = min_t(unsigned int, (conn->mtu - hlen), len);
1635
1636 skb = chan->ops->alloc_skb(chan, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1638
1639 if (!skb)
1640 return ERR_PTR(err);
1641
1642 skb->priority = priority;
1643
1644 /* Create L2CAP header */
1645 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1646 lh->cid = cpu_to_le16(chan->dcid);
1647 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1648
1649 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1650 if (unlikely(err < 0)) {
1651 kfree_skb(skb);
1652 return ERR_PTR(err);
1653 }
1654 return skb;
1655 }
1656
1657 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1658 struct msghdr *msg, size_t len,
1659 u32 control, u16 sdulen)
1660 {
1661 struct l2cap_conn *conn = chan->conn;
1662 struct sk_buff *skb;
1663 int err, count, hlen;
1664 struct l2cap_hdr *lh;
1665
1666 BT_DBG("chan %p len %d", chan, (int)len);
1667
1668 if (!conn)
1669 return ERR_PTR(-ENOTCONN);
1670
1671 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1672 hlen = L2CAP_EXT_HDR_SIZE;
1673 else
1674 hlen = L2CAP_ENH_HDR_SIZE;
1675
1676 if (sdulen)
1677 hlen += L2CAP_SDULEN_SIZE;
1678
1679 if (chan->fcs == L2CAP_FCS_CRC16)
1680 hlen += L2CAP_FCS_SIZE;
1681
1682 count = min_t(unsigned int, (conn->mtu - hlen), len);
1683
1684 skb = chan->ops->alloc_skb(chan, count + hlen,
1685 msg->msg_flags & MSG_DONTWAIT, &err);
1686
1687 if (!skb)
1688 return ERR_PTR(err);
1689
1690 /* Create L2CAP header */
1691 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1692 lh->cid = cpu_to_le16(chan->dcid);
1693 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1694
1695 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1696
1697 if (sdulen)
1698 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1699
1700 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1701 if (unlikely(err < 0)) {
1702 kfree_skb(skb);
1703 return ERR_PTR(err);
1704 }
1705
1706 if (chan->fcs == L2CAP_FCS_CRC16)
1707 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1708
1709 bt_cb(skb)->retries = 0;
1710 return skb;
1711 }
1712
1713 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1714 {
1715 struct sk_buff *skb;
1716 struct sk_buff_head sar_queue;
1717 u32 control;
1718 size_t size = 0;
1719
1720 skb_queue_head_init(&sar_queue);
1721 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1722 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1723 if (IS_ERR(skb))
1724 return PTR_ERR(skb);
1725
1726 __skb_queue_tail(&sar_queue, skb);
1727 len -= chan->remote_mps;
1728 size += chan->remote_mps;
1729
1730 while (len > 0) {
1731 size_t buflen;
1732
1733 if (len > chan->remote_mps) {
1734 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1735 buflen = chan->remote_mps;
1736 } else {
1737 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1738 buflen = len;
1739 }
1740
1741 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1742 if (IS_ERR(skb)) {
1743 skb_queue_purge(&sar_queue);
1744 return PTR_ERR(skb);
1745 }
1746
1747 __skb_queue_tail(&sar_queue, skb);
1748 len -= buflen;
1749 size += buflen;
1750 }
1751 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1752 if (chan->tx_send_head == NULL)
1753 chan->tx_send_head = sar_queue.next;
1754
1755 return size;
1756 }
1757
1758 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1759 u32 priority)
1760 {
1761 struct sk_buff *skb;
1762 u32 control;
1763 int err;
1764
1765 /* Connectionless channel */
1766 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1767 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1768 if (IS_ERR(skb))
1769 return PTR_ERR(skb);
1770
1771 l2cap_do_send(chan, skb);
1772 return len;
1773 }
1774
1775 switch (chan->mode) {
1776 case L2CAP_MODE_BASIC:
1777 /* Check outgoing MTU */
1778 if (len > chan->omtu)
1779 return -EMSGSIZE;
1780
1781 /* Create a basic PDU */
1782 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1783 if (IS_ERR(skb))
1784 return PTR_ERR(skb);
1785
1786 l2cap_do_send(chan, skb);
1787 err = len;
1788 break;
1789
1790 case L2CAP_MODE_ERTM:
1791 case L2CAP_MODE_STREAMING:
1792 /* Entire SDU fits into one PDU */
1793 if (len <= chan->remote_mps) {
1794 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1795 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1796 0);
1797 if (IS_ERR(skb))
1798 return PTR_ERR(skb);
1799
1800 __skb_queue_tail(&chan->tx_q, skb);
1801
1802 if (chan->tx_send_head == NULL)
1803 chan->tx_send_head = skb;
1804
1805 } else {
1806 /* Segment SDU into multiples PDUs */
1807 err = l2cap_sar_segment_sdu(chan, msg, len);
1808 if (err < 0)
1809 return err;
1810 }
1811
1812 if (chan->mode == L2CAP_MODE_STREAMING) {
1813 l2cap_streaming_send(chan);
1814 err = len;
1815 break;
1816 }
1817
1818 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1819 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1820 err = len;
1821 break;
1822 }
1823
1824 err = l2cap_ertm_send(chan);
1825 if (err >= 0)
1826 err = len;
1827
1828 break;
1829
1830 default:
1831 BT_DBG("bad state %1.1x", chan->mode);
1832 err = -EBADFD;
1833 }
1834
1835 return err;
1836 }
1837
1838 /* Copy frame to all raw sockets on that connection */
1839 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1840 {
1841 struct sk_buff *nskb;
1842 struct l2cap_chan *chan;
1843
1844 BT_DBG("conn %p", conn);
1845
1846 mutex_lock(&conn->chan_lock);
1847
1848 list_for_each_entry(chan, &conn->chan_l, list) {
1849 struct sock *sk = chan->sk;
1850 if (chan->chan_type != L2CAP_CHAN_RAW)
1851 continue;
1852
1853 /* Don't send frame to the socket it came from */
1854 if (skb->sk == sk)
1855 continue;
1856 nskb = skb_clone(skb, GFP_ATOMIC);
1857 if (!nskb)
1858 continue;
1859
1860 if (chan->ops->recv(chan->data, nskb))
1861 kfree_skb(nskb);
1862 }
1863
1864 mutex_unlock(&conn->chan_lock);
1865 }
1866
1867 /* ---- L2CAP signalling commands ---- */
1868 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1869 u8 code, u8 ident, u16 dlen, void *data)
1870 {
1871 struct sk_buff *skb, **frag;
1872 struct l2cap_cmd_hdr *cmd;
1873 struct l2cap_hdr *lh;
1874 int len, count;
1875
1876 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1877 conn, code, ident, dlen);
1878
1879 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1880 count = min_t(unsigned int, conn->mtu, len);
1881
1882 skb = bt_skb_alloc(count, GFP_ATOMIC);
1883 if (!skb)
1884 return NULL;
1885
1886 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1887 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1888
1889 if (conn->hcon->type == LE_LINK)
1890 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1891 else
1892 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1893
1894 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1895 cmd->code = code;
1896 cmd->ident = ident;
1897 cmd->len = cpu_to_le16(dlen);
1898
1899 if (dlen) {
1900 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1901 memcpy(skb_put(skb, count), data, count);
1902 data += count;
1903 }
1904
1905 len -= skb->len;
1906
1907 /* Continuation fragments (no L2CAP header) */
1908 frag = &skb_shinfo(skb)->frag_list;
1909 while (len) {
1910 count = min_t(unsigned int, conn->mtu, len);
1911
1912 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1913 if (!*frag)
1914 goto fail;
1915
1916 memcpy(skb_put(*frag, count), data, count);
1917
1918 len -= count;
1919 data += count;
1920
1921 frag = &(*frag)->next;
1922 }
1923
1924 return skb;
1925
1926 fail:
1927 kfree_skb(skb);
1928 return NULL;
1929 }
1930
1931 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1932 {
1933 struct l2cap_conf_opt *opt = *ptr;
1934 int len;
1935
1936 len = L2CAP_CONF_OPT_SIZE + opt->len;
1937 *ptr += len;
1938
1939 *type = opt->type;
1940 *olen = opt->len;
1941
1942 switch (opt->len) {
1943 case 1:
1944 *val = *((u8 *) opt->val);
1945 break;
1946
1947 case 2:
1948 *val = get_unaligned_le16(opt->val);
1949 break;
1950
1951 case 4:
1952 *val = get_unaligned_le32(opt->val);
1953 break;
1954
1955 default:
1956 *val = (unsigned long) opt->val;
1957 break;
1958 }
1959
1960 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1961 return len;
1962 }
1963
1964 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1965 {
1966 struct l2cap_conf_opt *opt = *ptr;
1967
1968 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1969
1970 opt->type = type;
1971 opt->len = len;
1972
1973 switch (len) {
1974 case 1:
1975 *((u8 *) opt->val) = val;
1976 break;
1977
1978 case 2:
1979 put_unaligned_le16(val, opt->val);
1980 break;
1981
1982 case 4:
1983 put_unaligned_le32(val, opt->val);
1984 break;
1985
1986 default:
1987 memcpy(opt->val, (void *) val, len);
1988 break;
1989 }
1990
1991 *ptr += L2CAP_CONF_OPT_SIZE + len;
1992 }
1993
1994 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1995 {
1996 struct l2cap_conf_efs efs;
1997
1998 switch (chan->mode) {
1999 case L2CAP_MODE_ERTM:
2000 efs.id = chan->local_id;
2001 efs.stype = chan->local_stype;
2002 efs.msdu = cpu_to_le16(chan->local_msdu);
2003 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2004 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2005 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2006 break;
2007
2008 case L2CAP_MODE_STREAMING:
2009 efs.id = 1;
2010 efs.stype = L2CAP_SERV_BESTEFFORT;
2011 efs.msdu = cpu_to_le16(chan->local_msdu);
2012 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2013 efs.acc_lat = 0;
2014 efs.flush_to = 0;
2015 break;
2016
2017 default:
2018 return;
2019 }
2020
2021 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2022 (unsigned long) &efs);
2023 }
2024
2025 static void l2cap_ack_timeout(struct work_struct *work)
2026 {
2027 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 ack_timer.work);
2029
2030 BT_DBG("chan %p", chan);
2031
2032 l2cap_chan_lock(chan);
2033
2034 __l2cap_send_ack(chan);
2035
2036 l2cap_chan_unlock(chan);
2037
2038 l2cap_chan_put(chan);
2039 }
2040
2041 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2042 {
2043 chan->expected_ack_seq = 0;
2044 chan->unacked_frames = 0;
2045 chan->buffer_seq = 0;
2046 chan->num_acked = 0;
2047 chan->frames_sent = 0;
2048
2049 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2050 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2051 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2052
2053 skb_queue_head_init(&chan->srej_q);
2054
2055 INIT_LIST_HEAD(&chan->srej_l);
2056 }
2057
2058 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2059 {
2060 switch (mode) {
2061 case L2CAP_MODE_STREAMING:
2062 case L2CAP_MODE_ERTM:
2063 if (l2cap_mode_supported(mode, remote_feat_mask))
2064 return mode;
2065 /* fall through */
2066 default:
2067 return L2CAP_MODE_BASIC;
2068 }
2069 }
2070
2071 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2072 {
2073 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2074 }
2075
2076 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2077 {
2078 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2079 }
2080
2081 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2082 {
2083 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2084 __l2cap_ews_supported(chan)) {
2085 /* use extended control field */
2086 set_bit(FLAG_EXT_CTRL, &chan->flags);
2087 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2088 } else {
2089 chan->tx_win = min_t(u16, chan->tx_win,
2090 L2CAP_DEFAULT_TX_WINDOW);
2091 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2092 }
2093 }
2094
2095 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2096 {
2097 struct l2cap_conf_req *req = data;
2098 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2099 void *ptr = req->data;
2100 u16 size;
2101
2102 BT_DBG("chan %p", chan);
2103
2104 if (chan->num_conf_req || chan->num_conf_rsp)
2105 goto done;
2106
2107 switch (chan->mode) {
2108 case L2CAP_MODE_STREAMING:
2109 case L2CAP_MODE_ERTM:
2110 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2111 break;
2112
2113 if (__l2cap_efs_supported(chan))
2114 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2115
2116 /* fall through */
2117 default:
2118 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2119 break;
2120 }
2121
2122 done:
2123 if (chan->imtu != L2CAP_DEFAULT_MTU)
2124 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2125
2126 switch (chan->mode) {
2127 case L2CAP_MODE_BASIC:
2128 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2129 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2130 break;
2131
2132 rfc.mode = L2CAP_MODE_BASIC;
2133 rfc.txwin_size = 0;
2134 rfc.max_transmit = 0;
2135 rfc.retrans_timeout = 0;
2136 rfc.monitor_timeout = 0;
2137 rfc.max_pdu_size = 0;
2138
2139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2140 (unsigned long) &rfc);
2141 break;
2142
2143 case L2CAP_MODE_ERTM:
2144 rfc.mode = L2CAP_MODE_ERTM;
2145 rfc.max_transmit = chan->max_tx;
2146 rfc.retrans_timeout = 0;
2147 rfc.monitor_timeout = 0;
2148
2149 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2150 L2CAP_EXT_HDR_SIZE -
2151 L2CAP_SDULEN_SIZE -
2152 L2CAP_FCS_SIZE);
2153 rfc.max_pdu_size = cpu_to_le16(size);
2154
2155 l2cap_txwin_setup(chan);
2156
2157 rfc.txwin_size = min_t(u16, chan->tx_win,
2158 L2CAP_DEFAULT_TX_WINDOW);
2159
2160 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2161 (unsigned long) &rfc);
2162
2163 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2164 l2cap_add_opt_efs(&ptr, chan);
2165
2166 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2167 break;
2168
2169 if (chan->fcs == L2CAP_FCS_NONE ||
2170 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2171 chan->fcs = L2CAP_FCS_NONE;
2172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2173 }
2174
2175 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2177 chan->tx_win);
2178 break;
2179
2180 case L2CAP_MODE_STREAMING:
2181 rfc.mode = L2CAP_MODE_STREAMING;
2182 rfc.txwin_size = 0;
2183 rfc.max_transmit = 0;
2184 rfc.retrans_timeout = 0;
2185 rfc.monitor_timeout = 0;
2186
2187 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2188 L2CAP_EXT_HDR_SIZE -
2189 L2CAP_SDULEN_SIZE -
2190 L2CAP_FCS_SIZE);
2191 rfc.max_pdu_size = cpu_to_le16(size);
2192
2193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2194 (unsigned long) &rfc);
2195
2196 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2197 l2cap_add_opt_efs(&ptr, chan);
2198
2199 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2200 break;
2201
2202 if (chan->fcs == L2CAP_FCS_NONE ||
2203 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2204 chan->fcs = L2CAP_FCS_NONE;
2205 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2206 }
2207 break;
2208 }
2209
2210 req->dcid = cpu_to_le16(chan->dcid);
2211 req->flags = cpu_to_le16(0);
2212
2213 return ptr - data;
2214 }
2215
2216 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2217 {
2218 struct l2cap_conf_rsp *rsp = data;
2219 void *ptr = rsp->data;
2220 void *req = chan->conf_req;
2221 int len = chan->conf_len;
2222 int type, hint, olen;
2223 unsigned long val;
2224 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2225 struct l2cap_conf_efs efs;
2226 u8 remote_efs = 0;
2227 u16 mtu = L2CAP_DEFAULT_MTU;
2228 u16 result = L2CAP_CONF_SUCCESS;
2229 u16 size;
2230
2231 BT_DBG("chan %p", chan);
2232
2233 while (len >= L2CAP_CONF_OPT_SIZE) {
2234 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2235
2236 hint = type & L2CAP_CONF_HINT;
2237 type &= L2CAP_CONF_MASK;
2238
2239 switch (type) {
2240 case L2CAP_CONF_MTU:
2241 mtu = val;
2242 break;
2243
2244 case L2CAP_CONF_FLUSH_TO:
2245 chan->flush_to = val;
2246 break;
2247
2248 case L2CAP_CONF_QOS:
2249 break;
2250
2251 case L2CAP_CONF_RFC:
2252 if (olen == sizeof(rfc))
2253 memcpy(&rfc, (void *) val, olen);
2254 break;
2255
2256 case L2CAP_CONF_FCS:
2257 if (val == L2CAP_FCS_NONE)
2258 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2259 break;
2260
2261 case L2CAP_CONF_EFS:
2262 remote_efs = 1;
2263 if (olen == sizeof(efs))
2264 memcpy(&efs, (void *) val, olen);
2265 break;
2266
2267 case L2CAP_CONF_EWS:
2268 if (!enable_hs)
2269 return -ECONNREFUSED;
2270
2271 set_bit(FLAG_EXT_CTRL, &chan->flags);
2272 set_bit(CONF_EWS_RECV, &chan->conf_state);
2273 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2274 chan->remote_tx_win = val;
2275 break;
2276
2277 default:
2278 if (hint)
2279 break;
2280
2281 result = L2CAP_CONF_UNKNOWN;
2282 *((u8 *) ptr++) = type;
2283 break;
2284 }
2285 }
2286
2287 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2288 goto done;
2289
2290 switch (chan->mode) {
2291 case L2CAP_MODE_STREAMING:
2292 case L2CAP_MODE_ERTM:
2293 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2294 chan->mode = l2cap_select_mode(rfc.mode,
2295 chan->conn->feat_mask);
2296 break;
2297 }
2298
2299 if (remote_efs) {
2300 if (__l2cap_efs_supported(chan))
2301 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2302 else
2303 return -ECONNREFUSED;
2304 }
2305
2306 if (chan->mode != rfc.mode)
2307 return -ECONNREFUSED;
2308
2309 break;
2310 }
2311
2312 done:
2313 if (chan->mode != rfc.mode) {
2314 result = L2CAP_CONF_UNACCEPT;
2315 rfc.mode = chan->mode;
2316
2317 if (chan->num_conf_rsp == 1)
2318 return -ECONNREFUSED;
2319
2320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2321 sizeof(rfc), (unsigned long) &rfc);
2322 }
2323
2324 if (result == L2CAP_CONF_SUCCESS) {
2325 /* Configure output options and let the other side know
2326 * which ones we don't like. */
2327
2328 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2329 result = L2CAP_CONF_UNACCEPT;
2330 else {
2331 chan->omtu = mtu;
2332 set_bit(CONF_MTU_DONE, &chan->conf_state);
2333 }
2334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2335
2336 if (remote_efs) {
2337 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2338 efs.stype != L2CAP_SERV_NOTRAFIC &&
2339 efs.stype != chan->local_stype) {
2340
2341 result = L2CAP_CONF_UNACCEPT;
2342
2343 if (chan->num_conf_req >= 1)
2344 return -ECONNREFUSED;
2345
2346 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2347 sizeof(efs),
2348 (unsigned long) &efs);
2349 } else {
2350 /* Send PENDING Conf Rsp */
2351 result = L2CAP_CONF_PENDING;
2352 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2353 }
2354 }
2355
2356 switch (rfc.mode) {
2357 case L2CAP_MODE_BASIC:
2358 chan->fcs = L2CAP_FCS_NONE;
2359 set_bit(CONF_MODE_DONE, &chan->conf_state);
2360 break;
2361
2362 case L2CAP_MODE_ERTM:
2363 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2364 chan->remote_tx_win = rfc.txwin_size;
2365 else
2366 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2367
2368 chan->remote_max_tx = rfc.max_transmit;
2369
2370 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2371 chan->conn->mtu -
2372 L2CAP_EXT_HDR_SIZE -
2373 L2CAP_SDULEN_SIZE -
2374 L2CAP_FCS_SIZE);
2375 rfc.max_pdu_size = cpu_to_le16(size);
2376 chan->remote_mps = size;
2377
2378 rfc.retrans_timeout =
2379 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2380 rfc.monitor_timeout =
2381 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2382
2383 set_bit(CONF_MODE_DONE, &chan->conf_state);
2384
2385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2386 sizeof(rfc), (unsigned long) &rfc);
2387
2388 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2389 chan->remote_id = efs.id;
2390 chan->remote_stype = efs.stype;
2391 chan->remote_msdu = le16_to_cpu(efs.msdu);
2392 chan->remote_flush_to =
2393 le32_to_cpu(efs.flush_to);
2394 chan->remote_acc_lat =
2395 le32_to_cpu(efs.acc_lat);
2396 chan->remote_sdu_itime =
2397 le32_to_cpu(efs.sdu_itime);
2398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2399 sizeof(efs), (unsigned long) &efs);
2400 }
2401 break;
2402
2403 case L2CAP_MODE_STREAMING:
2404 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2405 chan->conn->mtu -
2406 L2CAP_EXT_HDR_SIZE -
2407 L2CAP_SDULEN_SIZE -
2408 L2CAP_FCS_SIZE);
2409 rfc.max_pdu_size = cpu_to_le16(size);
2410 chan->remote_mps = size;
2411
2412 set_bit(CONF_MODE_DONE, &chan->conf_state);
2413
2414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2415 sizeof(rfc), (unsigned long) &rfc);
2416
2417 break;
2418
2419 default:
2420 result = L2CAP_CONF_UNACCEPT;
2421
2422 memset(&rfc, 0, sizeof(rfc));
2423 rfc.mode = chan->mode;
2424 }
2425
2426 if (result == L2CAP_CONF_SUCCESS)
2427 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2428 }
2429 rsp->scid = cpu_to_le16(chan->dcid);
2430 rsp->result = cpu_to_le16(result);
2431 rsp->flags = cpu_to_le16(0x0000);
2432
2433 return ptr - data;
2434 }
2435
2436 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2437 {
2438 struct l2cap_conf_req *req = data;
2439 void *ptr = req->data;
2440 int type, olen;
2441 unsigned long val;
2442 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2443 struct l2cap_conf_efs efs;
2444
2445 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2446
2447 while (len >= L2CAP_CONF_OPT_SIZE) {
2448 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2449
2450 switch (type) {
2451 case L2CAP_CONF_MTU:
2452 if (val < L2CAP_DEFAULT_MIN_MTU) {
2453 *result = L2CAP_CONF_UNACCEPT;
2454 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2455 } else
2456 chan->imtu = val;
2457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2458 break;
2459
2460 case L2CAP_CONF_FLUSH_TO:
2461 chan->flush_to = val;
2462 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2463 2, chan->flush_to);
2464 break;
2465
2466 case L2CAP_CONF_RFC:
2467 if (olen == sizeof(rfc))
2468 memcpy(&rfc, (void *)val, olen);
2469
2470 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2471 rfc.mode != chan->mode)
2472 return -ECONNREFUSED;
2473
2474 chan->fcs = 0;
2475
2476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2477 sizeof(rfc), (unsigned long) &rfc);
2478 break;
2479
2480 case L2CAP_CONF_EWS:
2481 chan->tx_win = min_t(u16, val,
2482 L2CAP_DEFAULT_EXT_WINDOW);
2483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2484 chan->tx_win);
2485 break;
2486
2487 case L2CAP_CONF_EFS:
2488 if (olen == sizeof(efs))
2489 memcpy(&efs, (void *)val, olen);
2490
2491 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2492 efs.stype != L2CAP_SERV_NOTRAFIC &&
2493 efs.stype != chan->local_stype)
2494 return -ECONNREFUSED;
2495
2496 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2497 sizeof(efs), (unsigned long) &efs);
2498 break;
2499 }
2500 }
2501
2502 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2503 return -ECONNREFUSED;
2504
2505 chan->mode = rfc.mode;
2506
2507 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2508 switch (rfc.mode) {
2509 case L2CAP_MODE_ERTM:
2510 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2511 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2512 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2513
2514 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2515 chan->local_msdu = le16_to_cpu(efs.msdu);
2516 chan->local_sdu_itime =
2517 le32_to_cpu(efs.sdu_itime);
2518 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2519 chan->local_flush_to =
2520 le32_to_cpu(efs.flush_to);
2521 }
2522 break;
2523
2524 case L2CAP_MODE_STREAMING:
2525 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2526 }
2527 }
2528
2529 req->dcid = cpu_to_le16(chan->dcid);
2530 req->flags = cpu_to_le16(0x0000);
2531
2532 return ptr - data;
2533 }
2534
2535 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2536 {
2537 struct l2cap_conf_rsp *rsp = data;
2538 void *ptr = rsp->data;
2539
2540 BT_DBG("chan %p", chan);
2541
2542 rsp->scid = cpu_to_le16(chan->dcid);
2543 rsp->result = cpu_to_le16(result);
2544 rsp->flags = cpu_to_le16(flags);
2545
2546 return ptr - data;
2547 }
2548
2549 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2550 {
2551 struct l2cap_conn_rsp rsp;
2552 struct l2cap_conn *conn = chan->conn;
2553 u8 buf[128];
2554
2555 rsp.scid = cpu_to_le16(chan->dcid);
2556 rsp.dcid = cpu_to_le16(chan->scid);
2557 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2558 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2559 l2cap_send_cmd(conn, chan->ident,
2560 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2561
2562 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2563 return;
2564
2565 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2566 l2cap_build_conf_req(chan, buf), buf);
2567 chan->num_conf_req++;
2568 }
2569
2570 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2571 {
2572 int type, olen;
2573 unsigned long val;
2574 struct l2cap_conf_rfc rfc;
2575
2576 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2577
2578 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2579 return;
2580
2581 while (len >= L2CAP_CONF_OPT_SIZE) {
2582 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2583
2584 switch (type) {
2585 case L2CAP_CONF_RFC:
2586 if (olen == sizeof(rfc))
2587 memcpy(&rfc, (void *)val, olen);
2588 goto done;
2589 }
2590 }
2591
2592 /* Use sane default values in case a misbehaving remote device
2593 * did not send an RFC option.
2594 */
2595 rfc.mode = chan->mode;
2596 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2597 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2598 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2599
2600 BT_ERR("Expected RFC option was not found, using defaults");
2601
2602 done:
2603 switch (rfc.mode) {
2604 case L2CAP_MODE_ERTM:
2605 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2606 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2607 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2608 break;
2609 case L2CAP_MODE_STREAMING:
2610 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2611 }
2612 }
2613
2614 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2615 {
2616 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2617
2618 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2619 return 0;
2620
2621 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2622 cmd->ident == conn->info_ident) {
2623 cancel_delayed_work(&conn->info_timer);
2624
2625 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2626 conn->info_ident = 0;
2627
2628 l2cap_conn_start(conn);
2629 }
2630
2631 return 0;
2632 }
2633
2634 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2635 {
2636 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2637 struct l2cap_conn_rsp rsp;
2638 struct l2cap_chan *chan = NULL, *pchan;
2639 struct sock *parent, *sk = NULL;
2640 int result, status = L2CAP_CS_NO_INFO;
2641
2642 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2643 __le16 psm = req->psm;
2644
2645 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2646
2647 /* Check if we have socket listening on psm */
2648 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2649 if (!pchan) {
2650 result = L2CAP_CR_BAD_PSM;
2651 goto sendresp;
2652 }
2653
2654 parent = pchan->sk;
2655
2656 mutex_lock(&conn->chan_lock);
2657 lock_sock(parent);
2658
2659 /* Check if the ACL is secure enough (if not SDP) */
2660 if (psm != cpu_to_le16(0x0001) &&
2661 !hci_conn_check_link_mode(conn->hcon)) {
2662 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2663 result = L2CAP_CR_SEC_BLOCK;
2664 goto response;
2665 }
2666
2667 result = L2CAP_CR_NO_MEM;
2668
2669 /* Check for backlog size */
2670 if (sk_acceptq_is_full(parent)) {
2671 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2672 goto response;
2673 }
2674
2675 chan = pchan->ops->new_connection(pchan->data);
2676 if (!chan)
2677 goto response;
2678
2679 sk = chan->sk;
2680
2681 /* Check if we already have channel with that dcid */
2682 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2683 sock_set_flag(sk, SOCK_ZAPPED);
2684 chan->ops->close(chan->data);
2685 goto response;
2686 }
2687
2688 hci_conn_hold(conn->hcon);
2689
2690 bacpy(&bt_sk(sk)->src, conn->src);
2691 bacpy(&bt_sk(sk)->dst, conn->dst);
2692 chan->psm = psm;
2693 chan->dcid = scid;
2694
2695 bt_accept_enqueue(parent, sk);
2696
2697 __l2cap_chan_add(conn, chan);
2698
2699 dcid = chan->scid;
2700
2701 __set_chan_timer(chan, sk->sk_sndtimeo);
2702
2703 chan->ident = cmd->ident;
2704
2705 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2706 if (l2cap_chan_check_security(chan)) {
2707 if (bt_sk(sk)->defer_setup) {
2708 __l2cap_state_change(chan, BT_CONNECT2);
2709 result = L2CAP_CR_PEND;
2710 status = L2CAP_CS_AUTHOR_PEND;
2711 parent->sk_data_ready(parent, 0);
2712 } else {
2713 __l2cap_state_change(chan, BT_CONFIG);
2714 result = L2CAP_CR_SUCCESS;
2715 status = L2CAP_CS_NO_INFO;
2716 }
2717 } else {
2718 __l2cap_state_change(chan, BT_CONNECT2);
2719 result = L2CAP_CR_PEND;
2720 status = L2CAP_CS_AUTHEN_PEND;
2721 }
2722 } else {
2723 __l2cap_state_change(chan, BT_CONNECT2);
2724 result = L2CAP_CR_PEND;
2725 status = L2CAP_CS_NO_INFO;
2726 }
2727
2728 response:
2729 release_sock(parent);
2730 mutex_unlock(&conn->chan_lock);
2731
2732 sendresp:
2733 rsp.scid = cpu_to_le16(scid);
2734 rsp.dcid = cpu_to_le16(dcid);
2735 rsp.result = cpu_to_le16(result);
2736 rsp.status = cpu_to_le16(status);
2737 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2738
2739 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2740 struct l2cap_info_req info;
2741 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2742
2743 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2744 conn->info_ident = l2cap_get_ident(conn);
2745
2746 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2747
2748 l2cap_send_cmd(conn, conn->info_ident,
2749 L2CAP_INFO_REQ, sizeof(info), &info);
2750 }
2751
2752 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2753 result == L2CAP_CR_SUCCESS) {
2754 u8 buf[128];
2755 set_bit(CONF_REQ_SENT, &chan->conf_state);
2756 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2757 l2cap_build_conf_req(chan, buf), buf);
2758 chan->num_conf_req++;
2759 }
2760
2761 return 0;
2762 }
2763
2764 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2765 {
2766 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2767 u16 scid, dcid, result, status;
2768 struct l2cap_chan *chan;
2769 u8 req[128];
2770 int err;
2771
2772 scid = __le16_to_cpu(rsp->scid);
2773 dcid = __le16_to_cpu(rsp->dcid);
2774 result = __le16_to_cpu(rsp->result);
2775 status = __le16_to_cpu(rsp->status);
2776
2777 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2778 dcid, scid, result, status);
2779
2780 mutex_lock(&conn->chan_lock);
2781
2782 if (scid) {
2783 chan = __l2cap_get_chan_by_scid(conn, scid);
2784 if (!chan) {
2785 err = -EFAULT;
2786 goto unlock;
2787 }
2788 } else {
2789 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2790 if (!chan) {
2791 err = -EFAULT;
2792 goto unlock;
2793 }
2794 }
2795
2796 err = 0;
2797
2798 l2cap_chan_lock(chan);
2799
2800 switch (result) {
2801 case L2CAP_CR_SUCCESS:
2802 l2cap_state_change(chan, BT_CONFIG);
2803 chan->ident = 0;
2804 chan->dcid = dcid;
2805 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2806
2807 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2808 break;
2809
2810 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2811 l2cap_build_conf_req(chan, req), req);
2812 chan->num_conf_req++;
2813 break;
2814
2815 case L2CAP_CR_PEND:
2816 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2817 break;
2818
2819 default:
2820 l2cap_chan_del(chan, ECONNREFUSED);
2821 break;
2822 }
2823
2824 l2cap_chan_unlock(chan);
2825
2826 unlock:
2827 mutex_unlock(&conn->chan_lock);
2828
2829 return err;
2830 }
2831
2832 static inline void set_default_fcs(struct l2cap_chan *chan)
2833 {
2834 /* FCS is enabled only in ERTM or streaming mode, if one or both
2835 * sides request it.
2836 */
2837 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2838 chan->fcs = L2CAP_FCS_NONE;
2839 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2840 chan->fcs = L2CAP_FCS_CRC16;
2841 }
2842
2843 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2844 {
2845 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2846 u16 dcid, flags;
2847 u8 rsp[64];
2848 struct l2cap_chan *chan;
2849 int len;
2850
2851 dcid = __le16_to_cpu(req->dcid);
2852 flags = __le16_to_cpu(req->flags);
2853
2854 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2855
2856 chan = l2cap_get_chan_by_scid(conn, dcid);
2857 if (!chan)
2858 return -ENOENT;
2859
2860 l2cap_chan_lock(chan);
2861
2862 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2863 struct l2cap_cmd_rej_cid rej;
2864
2865 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2866 rej.scid = cpu_to_le16(chan->scid);
2867 rej.dcid = cpu_to_le16(chan->dcid);
2868
2869 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2870 sizeof(rej), &rej);
2871 goto unlock;
2872 }
2873
2874 /* Reject if config buffer is too small. */
2875 len = cmd_len - sizeof(*req);
2876 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2877 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2878 l2cap_build_conf_rsp(chan, rsp,
2879 L2CAP_CONF_REJECT, flags), rsp);
2880 goto unlock;
2881 }
2882
2883 /* Store config. */
2884 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2885 chan->conf_len += len;
2886
2887 if (flags & 0x0001) {
2888 /* Incomplete config. Send empty response. */
2889 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2890 l2cap_build_conf_rsp(chan, rsp,
2891 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2892 goto unlock;
2893 }
2894
2895 /* Complete config. */
2896 len = l2cap_parse_conf_req(chan, rsp);
2897 if (len < 0) {
2898 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2899 goto unlock;
2900 }
2901
2902 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2903 chan->num_conf_rsp++;
2904
2905 /* Reset config buffer. */
2906 chan->conf_len = 0;
2907
2908 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2909 goto unlock;
2910
2911 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2912 set_default_fcs(chan);
2913
2914 l2cap_state_change(chan, BT_CONNECTED);
2915
2916 chan->next_tx_seq = 0;
2917 chan->expected_tx_seq = 0;
2918 skb_queue_head_init(&chan->tx_q);
2919 if (chan->mode == L2CAP_MODE_ERTM)
2920 l2cap_ertm_init(chan);
2921
2922 l2cap_chan_ready(chan);
2923 goto unlock;
2924 }
2925
2926 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2927 u8 buf[64];
2928 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2929 l2cap_build_conf_req(chan, buf), buf);
2930 chan->num_conf_req++;
2931 }
2932
2933 /* Got Conf Rsp PENDING from remote side and asume we sent
2934 Conf Rsp PENDING in the code above */
2935 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2936 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2937
2938 /* check compatibility */
2939
2940 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2941 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2942
2943 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2944 l2cap_build_conf_rsp(chan, rsp,
2945 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2946 }
2947
2948 unlock:
2949 l2cap_chan_unlock(chan);
2950 return 0;
2951 }
2952
2953 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2954 {
2955 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2956 u16 scid, flags, result;
2957 struct l2cap_chan *chan;
2958 int len = cmd->len - sizeof(*rsp);
2959
2960 scid = __le16_to_cpu(rsp->scid);
2961 flags = __le16_to_cpu(rsp->flags);
2962 result = __le16_to_cpu(rsp->result);
2963
2964 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2965 scid, flags, result);
2966
2967 chan = l2cap_get_chan_by_scid(conn, scid);
2968 if (!chan)
2969 return 0;
2970
2971 l2cap_chan_lock(chan);
2972
2973 switch (result) {
2974 case L2CAP_CONF_SUCCESS:
2975 l2cap_conf_rfc_get(chan, rsp->data, len);
2976 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2977 break;
2978
2979 case L2CAP_CONF_PENDING:
2980 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2981
2982 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2983 char buf[64];
2984
2985 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2986 buf, &result);
2987 if (len < 0) {
2988 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2989 goto done;
2990 }
2991
2992 /* check compatibility */
2993
2994 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2995 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2996
2997 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2998 l2cap_build_conf_rsp(chan, buf,
2999 L2CAP_CONF_SUCCESS, 0x0000), buf);
3000 }
3001 goto done;
3002
3003 case L2CAP_CONF_UNACCEPT:
3004 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3005 char req[64];
3006
3007 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3008 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3009 goto done;
3010 }
3011
3012 /* throw out any old stored conf requests */
3013 result = L2CAP_CONF_SUCCESS;
3014 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3015 req, &result);
3016 if (len < 0) {
3017 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3018 goto done;
3019 }
3020
3021 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3022 L2CAP_CONF_REQ, len, req);
3023 chan->num_conf_req++;
3024 if (result != L2CAP_CONF_SUCCESS)
3025 goto done;
3026 break;
3027 }
3028
3029 default:
3030 l2cap_chan_set_err(chan, ECONNRESET);
3031
3032 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3033 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3034 goto done;
3035 }
3036
3037 if (flags & 0x01)
3038 goto done;
3039
3040 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3041
3042 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3043 set_default_fcs(chan);
3044
3045 l2cap_state_change(chan, BT_CONNECTED);
3046 chan->next_tx_seq = 0;
3047 chan->expected_tx_seq = 0;
3048 skb_queue_head_init(&chan->tx_q);
3049 if (chan->mode == L2CAP_MODE_ERTM)
3050 l2cap_ertm_init(chan);
3051
3052 l2cap_chan_ready(chan);
3053 }
3054
3055 done:
3056 l2cap_chan_unlock(chan);
3057 return 0;
3058 }
3059
3060 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3061 {
3062 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3063 struct l2cap_disconn_rsp rsp;
3064 u16 dcid, scid;
3065 struct l2cap_chan *chan;
3066 struct sock *sk;
3067
3068 scid = __le16_to_cpu(req->scid);
3069 dcid = __le16_to_cpu(req->dcid);
3070
3071 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3072
3073 mutex_lock(&conn->chan_lock);
3074
3075 chan = __l2cap_get_chan_by_scid(conn, dcid);
3076 if (!chan) {
3077 mutex_unlock(&conn->chan_lock);
3078 return 0;
3079 }
3080
3081 l2cap_chan_lock(chan);
3082
3083 sk = chan->sk;
3084
3085 rsp.dcid = cpu_to_le16(chan->scid);
3086 rsp.scid = cpu_to_le16(chan->dcid);
3087 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3088
3089 lock_sock(sk);
3090 sk->sk_shutdown = SHUTDOWN_MASK;
3091 release_sock(sk);
3092
3093 l2cap_chan_del(chan, ECONNRESET);
3094
3095 l2cap_chan_unlock(chan);
3096
3097 chan->ops->close(chan->data);
3098
3099 mutex_unlock(&conn->chan_lock);
3100
3101 return 0;
3102 }
3103
3104 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3105 {
3106 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3107 u16 dcid, scid;
3108 struct l2cap_chan *chan;
3109
3110 scid = __le16_to_cpu(rsp->scid);
3111 dcid = __le16_to_cpu(rsp->dcid);
3112
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3114
3115 mutex_lock(&conn->chan_lock);
3116
3117 chan = __l2cap_get_chan_by_scid(conn, scid);
3118 if (!chan) {
3119 mutex_unlock(&conn->chan_lock);
3120 return 0;
3121 }
3122
3123 l2cap_chan_lock(chan);
3124
3125 l2cap_chan_del(chan, 0);
3126
3127 l2cap_chan_unlock(chan);
3128
3129 chan->ops->close(chan->data);
3130
3131 mutex_unlock(&conn->chan_lock);
3132
3133 return 0;
3134 }
3135
3136 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3137 {
3138 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3139 u16 type;
3140
3141 type = __le16_to_cpu(req->type);
3142
3143 BT_DBG("type 0x%4.4x", type);
3144
3145 if (type == L2CAP_IT_FEAT_MASK) {
3146 u8 buf[8];
3147 u32 feat_mask = l2cap_feat_mask;
3148 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3149 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3150 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3151 if (!disable_ertm)
3152 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3153 | L2CAP_FEAT_FCS;
3154 if (enable_hs)
3155 feat_mask |= L2CAP_FEAT_EXT_FLOW
3156 | L2CAP_FEAT_EXT_WINDOW;
3157
3158 put_unaligned_le32(feat_mask, rsp->data);
3159 l2cap_send_cmd(conn, cmd->ident,
3160 L2CAP_INFO_RSP, sizeof(buf), buf);
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3162 u8 buf[12];
3163 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3164
3165 if (enable_hs)
3166 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3167 else
3168 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3169
3170 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3171 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3172 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3173 l2cap_send_cmd(conn, cmd->ident,
3174 L2CAP_INFO_RSP, sizeof(buf), buf);
3175 } else {
3176 struct l2cap_info_rsp rsp;
3177 rsp.type = cpu_to_le16(type);
3178 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3179 l2cap_send_cmd(conn, cmd->ident,
3180 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3181 }
3182
3183 return 0;
3184 }
3185
3186 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3187 {
3188 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3189 u16 type, result;
3190
3191 type = __le16_to_cpu(rsp->type);
3192 result = __le16_to_cpu(rsp->result);
3193
3194 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3195
3196 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3197 if (cmd->ident != conn->info_ident ||
3198 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3199 return 0;
3200
3201 cancel_delayed_work(&conn->info_timer);
3202
3203 if (result != L2CAP_IR_SUCCESS) {
3204 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3205 conn->info_ident = 0;
3206
3207 l2cap_conn_start(conn);
3208
3209 return 0;
3210 }
3211
3212 switch (type) {
3213 case L2CAP_IT_FEAT_MASK:
3214 conn->feat_mask = get_unaligned_le32(rsp->data);
3215
3216 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3217 struct l2cap_info_req req;
3218 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3219
3220 conn->info_ident = l2cap_get_ident(conn);
3221
3222 l2cap_send_cmd(conn, conn->info_ident,
3223 L2CAP_INFO_REQ, sizeof(req), &req);
3224 } else {
3225 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3226 conn->info_ident = 0;
3227
3228 l2cap_conn_start(conn);
3229 }
3230 break;
3231
3232 case L2CAP_IT_FIXED_CHAN:
3233 conn->fixed_chan_mask = rsp->data[0];
3234 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3235 conn->info_ident = 0;
3236
3237 l2cap_conn_start(conn);
3238 break;
3239 }
3240
3241 return 0;
3242 }
3243
3244 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3245 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3246 void *data)
3247 {
3248 struct l2cap_create_chan_req *req = data;
3249 struct l2cap_create_chan_rsp rsp;
3250 u16 psm, scid;
3251
3252 if (cmd_len != sizeof(*req))
3253 return -EPROTO;
3254
3255 if (!enable_hs)
3256 return -EINVAL;
3257
3258 psm = le16_to_cpu(req->psm);
3259 scid = le16_to_cpu(req->scid);
3260
3261 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3262
3263 /* Placeholder: Always reject */
3264 rsp.dcid = 0;
3265 rsp.scid = cpu_to_le16(scid);
3266 rsp.result = L2CAP_CR_NO_MEM;
3267 rsp.status = L2CAP_CS_NO_INFO;
3268
3269 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3270 sizeof(rsp), &rsp);
3271
3272 return 0;
3273 }
3274
3275 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, void *data)
3277 {
3278 BT_DBG("conn %p", conn);
3279
3280 return l2cap_connect_rsp(conn, cmd, data);
3281 }
3282
3283 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3284 u16 icid, u16 result)
3285 {
3286 struct l2cap_move_chan_rsp rsp;
3287
3288 BT_DBG("icid %d, result %d", icid, result);
3289
3290 rsp.icid = cpu_to_le16(icid);
3291 rsp.result = cpu_to_le16(result);
3292
3293 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3294 }
3295
3296 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3297 struct l2cap_chan *chan, u16 icid, u16 result)
3298 {
3299 struct l2cap_move_chan_cfm cfm;
3300 u8 ident;
3301
3302 BT_DBG("icid %d, result %d", icid, result);
3303
3304 ident = l2cap_get_ident(conn);
3305 if (chan)
3306 chan->ident = ident;
3307
3308 cfm.icid = cpu_to_le16(icid);
3309 cfm.result = cpu_to_le16(result);
3310
3311 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3312 }
3313
3314 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3315 u16 icid)
3316 {
3317 struct l2cap_move_chan_cfm_rsp rsp;
3318
3319 BT_DBG("icid %d", icid);
3320
3321 rsp.icid = cpu_to_le16(icid);
3322 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3323 }
3324
3325 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3326 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3327 {
3328 struct l2cap_move_chan_req *req = data;
3329 u16 icid = 0;
3330 u16 result = L2CAP_MR_NOT_ALLOWED;
3331
3332 if (cmd_len != sizeof(*req))
3333 return -EPROTO;
3334
3335 icid = le16_to_cpu(req->icid);
3336
3337 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3338
3339 if (!enable_hs)
3340 return -EINVAL;
3341
3342 /* Placeholder: Always refuse */
3343 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3344
3345 return 0;
3346 }
3347
3348 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3349 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3350 {
3351 struct l2cap_move_chan_rsp *rsp = data;
3352 u16 icid, result;
3353
3354 if (cmd_len != sizeof(*rsp))
3355 return -EPROTO;
3356
3357 icid = le16_to_cpu(rsp->icid);
3358 result = le16_to_cpu(rsp->result);
3359
3360 BT_DBG("icid %d, result %d", icid, result);
3361
3362 /* Placeholder: Always unconfirmed */
3363 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3364
3365 return 0;
3366 }
3367
3368 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3369 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3370 {
3371 struct l2cap_move_chan_cfm *cfm = data;
3372 u16 icid, result;
3373
3374 if (cmd_len != sizeof(*cfm))
3375 return -EPROTO;
3376
3377 icid = le16_to_cpu(cfm->icid);
3378 result = le16_to_cpu(cfm->result);
3379
3380 BT_DBG("icid %d, result %d", icid, result);
3381
3382 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3383
3384 return 0;
3385 }
3386
3387 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3388 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3389 {
3390 struct l2cap_move_chan_cfm_rsp *rsp = data;
3391 u16 icid;
3392
3393 if (cmd_len != sizeof(*rsp))
3394 return -EPROTO;
3395
3396 icid = le16_to_cpu(rsp->icid);
3397
3398 BT_DBG("icid %d", icid);
3399
3400 return 0;
3401 }
3402
3403 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3404 u16 to_multiplier)
3405 {
3406 u16 max_latency;
3407
3408 if (min > max || min < 6 || max > 3200)
3409 return -EINVAL;
3410
3411 if (to_multiplier < 10 || to_multiplier > 3200)
3412 return -EINVAL;
3413
3414 if (max >= to_multiplier * 8)
3415 return -EINVAL;
3416
3417 max_latency = (to_multiplier * 8 / max) - 1;
3418 if (latency > 499 || latency > max_latency)
3419 return -EINVAL;
3420
3421 return 0;
3422 }
3423
3424 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3425 struct l2cap_cmd_hdr *cmd, u8 *data)
3426 {
3427 struct hci_conn *hcon = conn->hcon;
3428 struct l2cap_conn_param_update_req *req;
3429 struct l2cap_conn_param_update_rsp rsp;
3430 u16 min, max, latency, to_multiplier, cmd_len;
3431 int err;
3432
3433 if (!(hcon->link_mode & HCI_LM_MASTER))
3434 return -EINVAL;
3435
3436 cmd_len = __le16_to_cpu(cmd->len);
3437 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3438 return -EPROTO;
3439
3440 req = (struct l2cap_conn_param_update_req *) data;
3441 min = __le16_to_cpu(req->min);
3442 max = __le16_to_cpu(req->max);
3443 latency = __le16_to_cpu(req->latency);
3444 to_multiplier = __le16_to_cpu(req->to_multiplier);
3445
3446 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3447 min, max, latency, to_multiplier);
3448
3449 memset(&rsp, 0, sizeof(rsp));
3450
3451 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3452 if (err)
3453 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3454 else
3455 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3456
3457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3458 sizeof(rsp), &rsp);
3459
3460 if (!err)
3461 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3462
3463 return 0;
3464 }
3465
3466 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3467 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3468 {
3469 int err = 0;
3470
3471 switch (cmd->code) {
3472 case L2CAP_COMMAND_REJ:
3473 l2cap_command_rej(conn, cmd, data);
3474 break;
3475
3476 case L2CAP_CONN_REQ:
3477 err = l2cap_connect_req(conn, cmd, data);
3478 break;
3479
3480 case L2CAP_CONN_RSP:
3481 err = l2cap_connect_rsp(conn, cmd, data);
3482 break;
3483
3484 case L2CAP_CONF_REQ:
3485 err = l2cap_config_req(conn, cmd, cmd_len, data);
3486 break;
3487
3488 case L2CAP_CONF_RSP:
3489 err = l2cap_config_rsp(conn, cmd, data);
3490 break;
3491
3492 case L2CAP_DISCONN_REQ:
3493 err = l2cap_disconnect_req(conn, cmd, data);
3494 break;
3495
3496 case L2CAP_DISCONN_RSP:
3497 err = l2cap_disconnect_rsp(conn, cmd, data);
3498 break;
3499
3500 case L2CAP_ECHO_REQ:
3501 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3502 break;
3503
3504 case L2CAP_ECHO_RSP:
3505 break;
3506
3507 case L2CAP_INFO_REQ:
3508 err = l2cap_information_req(conn, cmd, data);
3509 break;
3510
3511 case L2CAP_INFO_RSP:
3512 err = l2cap_information_rsp(conn, cmd, data);
3513 break;
3514
3515 case L2CAP_CREATE_CHAN_REQ:
3516 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3517 break;
3518
3519 case L2CAP_CREATE_CHAN_RSP:
3520 err = l2cap_create_channel_rsp(conn, cmd, data);
3521 break;
3522
3523 case L2CAP_MOVE_CHAN_REQ:
3524 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3525 break;
3526
3527 case L2CAP_MOVE_CHAN_RSP:
3528 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3529 break;
3530
3531 case L2CAP_MOVE_CHAN_CFM:
3532 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3533 break;
3534
3535 case L2CAP_MOVE_CHAN_CFM_RSP:
3536 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3537 break;
3538
3539 default:
3540 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3541 err = -EINVAL;
3542 break;
3543 }
3544
3545 return err;
3546 }
3547
3548 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3549 struct l2cap_cmd_hdr *cmd, u8 *data)
3550 {
3551 switch (cmd->code) {
3552 case L2CAP_COMMAND_REJ:
3553 return 0;
3554
3555 case L2CAP_CONN_PARAM_UPDATE_REQ:
3556 return l2cap_conn_param_update_req(conn, cmd, data);
3557
3558 case L2CAP_CONN_PARAM_UPDATE_RSP:
3559 return 0;
3560
3561 default:
3562 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3563 return -EINVAL;
3564 }
3565 }
3566
3567 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3568 struct sk_buff *skb)
3569 {
3570 u8 *data = skb->data;
3571 int len = skb->len;
3572 struct l2cap_cmd_hdr cmd;
3573 int err;
3574
3575 l2cap_raw_recv(conn, skb);
3576
3577 while (len >= L2CAP_CMD_HDR_SIZE) {
3578 u16 cmd_len;
3579 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3580 data += L2CAP_CMD_HDR_SIZE;
3581 len -= L2CAP_CMD_HDR_SIZE;
3582
3583 cmd_len = le16_to_cpu(cmd.len);
3584
3585 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3586
3587 if (cmd_len > len || !cmd.ident) {
3588 BT_DBG("corrupted command");
3589 break;
3590 }
3591
3592 if (conn->hcon->type == LE_LINK)
3593 err = l2cap_le_sig_cmd(conn, &cmd, data);
3594 else
3595 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3596
3597 if (err) {
3598 struct l2cap_cmd_rej_unk rej;
3599
3600 BT_ERR("Wrong link type (%d)", err);
3601
3602 /* FIXME: Map err to a valid reason */
3603 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3604 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3605 }
3606
3607 data += cmd_len;
3608 len -= cmd_len;
3609 }
3610
3611 kfree_skb(skb);
3612 }
3613
3614 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3615 {
3616 u16 our_fcs, rcv_fcs;
3617 int hdr_size;
3618
3619 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3620 hdr_size = L2CAP_EXT_HDR_SIZE;
3621 else
3622 hdr_size = L2CAP_ENH_HDR_SIZE;
3623
3624 if (chan->fcs == L2CAP_FCS_CRC16) {
3625 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3626 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3627 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3628
3629 if (our_fcs != rcv_fcs)
3630 return -EBADMSG;
3631 }
3632 return 0;
3633 }
3634
3635 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3636 {
3637 u32 control = 0;
3638
3639 chan->frames_sent = 0;
3640
3641 control |= __set_reqseq(chan, chan->buffer_seq);
3642
3643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3645 l2cap_send_sframe(chan, control);
3646 set_bit(CONN_RNR_SENT, &chan->conn_state);
3647 }
3648
3649 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3650 l2cap_retransmit_frames(chan);
3651
3652 l2cap_ertm_send(chan);
3653
3654 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3655 chan->frames_sent == 0) {
3656 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3657 l2cap_send_sframe(chan, control);
3658 }
3659 }
3660
3661 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3662 {
3663 struct sk_buff *next_skb;
3664 int tx_seq_offset, next_tx_seq_offset;
3665
3666 bt_cb(skb)->tx_seq = tx_seq;
3667 bt_cb(skb)->sar = sar;
3668
3669 next_skb = skb_peek(&chan->srej_q);
3670
3671 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3672
3673 while (next_skb) {
3674 if (bt_cb(next_skb)->tx_seq == tx_seq)
3675 return -EINVAL;
3676
3677 next_tx_seq_offset = __seq_offset(chan,
3678 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3679
3680 if (next_tx_seq_offset > tx_seq_offset) {
3681 __skb_queue_before(&chan->srej_q, next_skb, skb);
3682 return 0;
3683 }
3684
3685 if (skb_queue_is_last(&chan->srej_q, next_skb))
3686 next_skb = NULL;
3687 else
3688 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3689 }
3690
3691 __skb_queue_tail(&chan->srej_q, skb);
3692
3693 return 0;
3694 }
3695
3696 static void append_skb_frag(struct sk_buff *skb,
3697 struct sk_buff *new_frag, struct sk_buff **last_frag)
3698 {
3699 /* skb->len reflects data in skb as well as all fragments
3700 * skb->data_len reflects only data in fragments
3701 */
3702 if (!skb_has_frag_list(skb))
3703 skb_shinfo(skb)->frag_list = new_frag;
3704
3705 new_frag->next = NULL;
3706
3707 (*last_frag)->next = new_frag;
3708 *last_frag = new_frag;
3709
3710 skb->len += new_frag->len;
3711 skb->data_len += new_frag->len;
3712 skb->truesize += new_frag->truesize;
3713 }
3714
3715 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3716 {
3717 int err = -EINVAL;
3718
3719 switch (__get_ctrl_sar(chan, control)) {
3720 case L2CAP_SAR_UNSEGMENTED:
3721 if (chan->sdu)
3722 break;
3723
3724 err = chan->ops->recv(chan->data, skb);
3725 break;
3726
3727 case L2CAP_SAR_START:
3728 if (chan->sdu)
3729 break;
3730
3731 chan->sdu_len = get_unaligned_le16(skb->data);
3732 skb_pull(skb, L2CAP_SDULEN_SIZE);
3733
3734 if (chan->sdu_len > chan->imtu) {
3735 err = -EMSGSIZE;
3736 break;
3737 }
3738
3739 if (skb->len >= chan->sdu_len)
3740 break;
3741
3742 chan->sdu = skb;
3743 chan->sdu_last_frag = skb;
3744
3745 skb = NULL;
3746 err = 0;
3747 break;
3748
3749 case L2CAP_SAR_CONTINUE:
3750 if (!chan->sdu)
3751 break;
3752
3753 append_skb_frag(chan->sdu, skb,
3754 &chan->sdu_last_frag);
3755 skb = NULL;
3756
3757 if (chan->sdu->len >= chan->sdu_len)
3758 break;
3759
3760 err = 0;
3761 break;
3762
3763 case L2CAP_SAR_END:
3764 if (!chan->sdu)
3765 break;
3766
3767 append_skb_frag(chan->sdu, skb,
3768 &chan->sdu_last_frag);
3769 skb = NULL;
3770
3771 if (chan->sdu->len != chan->sdu_len)
3772 break;
3773
3774 err = chan->ops->recv(chan->data, chan->sdu);
3775
3776 if (!err) {
3777 /* Reassembly complete */
3778 chan->sdu = NULL;
3779 chan->sdu_last_frag = NULL;
3780 chan->sdu_len = 0;
3781 }
3782 break;
3783 }
3784
3785 if (err) {
3786 kfree_skb(skb);
3787 kfree_skb(chan->sdu);
3788 chan->sdu = NULL;
3789 chan->sdu_last_frag = NULL;
3790 chan->sdu_len = 0;
3791 }
3792
3793 return err;
3794 }
3795
3796 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3797 {
3798 BT_DBG("chan %p, Enter local busy", chan);
3799
3800 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3801
3802 __set_ack_timer(chan);
3803 }
3804
3805 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3806 {
3807 u32 control;
3808
3809 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3810 goto done;
3811
3812 control = __set_reqseq(chan, chan->buffer_seq);
3813 control |= __set_ctrl_poll(chan);
3814 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3815 l2cap_send_sframe(chan, control);
3816 chan->retry_count = 1;
3817
3818 __clear_retrans_timer(chan);
3819 __set_monitor_timer(chan);
3820
3821 set_bit(CONN_WAIT_F, &chan->conn_state);
3822
3823 done:
3824 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3825 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3826
3827 BT_DBG("chan %p, Exit local busy", chan);
3828 }
3829
3830 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3831 {
3832 if (chan->mode == L2CAP_MODE_ERTM) {
3833 if (busy)
3834 l2cap_ertm_enter_local_busy(chan);
3835 else
3836 l2cap_ertm_exit_local_busy(chan);
3837 }
3838 }
3839
3840 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3841 {
3842 struct sk_buff *skb;
3843 u32 control;
3844
3845 while ((skb = skb_peek(&chan->srej_q)) &&
3846 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3847 int err;
3848
3849 if (bt_cb(skb)->tx_seq != tx_seq)
3850 break;
3851
3852 skb = skb_dequeue(&chan->srej_q);
3853 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3854 err = l2cap_reassemble_sdu(chan, skb, control);
3855
3856 if (err < 0) {
3857 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3858 break;
3859 }
3860
3861 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3862 tx_seq = __next_seq(chan, tx_seq);
3863 }
3864 }
3865
3866 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3867 {
3868 struct srej_list *l, *tmp;
3869 u32 control;
3870
3871 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3872 if (l->tx_seq == tx_seq) {
3873 list_del(&l->list);
3874 kfree(l);
3875 return;
3876 }
3877 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3878 control |= __set_reqseq(chan, l->tx_seq);
3879 l2cap_send_sframe(chan, control);
3880 list_del(&l->list);
3881 list_add_tail(&l->list, &chan->srej_l);
3882 }
3883 }
3884
3885 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3886 {
3887 struct srej_list *new;
3888 u32 control;
3889
3890 while (tx_seq != chan->expected_tx_seq) {
3891 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3892 control |= __set_reqseq(chan, chan->expected_tx_seq);
3893 l2cap_send_sframe(chan, control);
3894
3895 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3896 if (!new)
3897 return -ENOMEM;
3898
3899 new->tx_seq = chan->expected_tx_seq;
3900
3901 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3902
3903 list_add_tail(&new->list, &chan->srej_l);
3904 }
3905
3906 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3907
3908 return 0;
3909 }
3910
3911 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3912 {
3913 u16 tx_seq = __get_txseq(chan, rx_control);
3914 u16 req_seq = __get_reqseq(chan, rx_control);
3915 u8 sar = __get_ctrl_sar(chan, rx_control);
3916 int tx_seq_offset, expected_tx_seq_offset;
3917 int num_to_ack = (chan->tx_win/6) + 1;
3918 int err = 0;
3919
3920 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3921 tx_seq, rx_control);
3922
3923 if (__is_ctrl_final(chan, rx_control) &&
3924 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3925 __clear_monitor_timer(chan);
3926 if (chan->unacked_frames > 0)
3927 __set_retrans_timer(chan);
3928 clear_bit(CONN_WAIT_F, &chan->conn_state);
3929 }
3930
3931 chan->expected_ack_seq = req_seq;
3932 l2cap_drop_acked_frames(chan);
3933
3934 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3935
3936 /* invalid tx_seq */
3937 if (tx_seq_offset >= chan->tx_win) {
3938 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3939 goto drop;
3940 }
3941
3942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3943 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3944 l2cap_send_ack(chan);
3945 goto drop;
3946 }
3947
3948 if (tx_seq == chan->expected_tx_seq)
3949 goto expected;
3950
3951 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3952 struct srej_list *first;
3953
3954 first = list_first_entry(&chan->srej_l,
3955 struct srej_list, list);
3956 if (tx_seq == first->tx_seq) {
3957 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3958 l2cap_check_srej_gap(chan, tx_seq);
3959
3960 list_del(&first->list);
3961 kfree(first);
3962
3963 if (list_empty(&chan->srej_l)) {
3964 chan->buffer_seq = chan->buffer_seq_srej;
3965 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3966 l2cap_send_ack(chan);
3967 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3968 }
3969 } else {
3970 struct srej_list *l;
3971
3972 /* duplicated tx_seq */
3973 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3974 goto drop;
3975
3976 list_for_each_entry(l, &chan->srej_l, list) {
3977 if (l->tx_seq == tx_seq) {
3978 l2cap_resend_srejframe(chan, tx_seq);
3979 return 0;
3980 }
3981 }
3982
3983 err = l2cap_send_srejframe(chan, tx_seq);
3984 if (err < 0) {
3985 l2cap_send_disconn_req(chan->conn, chan, -err);
3986 return err;
3987 }
3988 }
3989 } else {
3990 expected_tx_seq_offset = __seq_offset(chan,
3991 chan->expected_tx_seq, chan->buffer_seq);
3992
3993 /* duplicated tx_seq */
3994 if (tx_seq_offset < expected_tx_seq_offset)
3995 goto drop;
3996
3997 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3998
3999 BT_DBG("chan %p, Enter SREJ", chan);
4000
4001 INIT_LIST_HEAD(&chan->srej_l);
4002 chan->buffer_seq_srej = chan->buffer_seq;
4003
4004 __skb_queue_head_init(&chan->srej_q);
4005 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4006
4007 /* Set P-bit only if there are some I-frames to ack. */
4008 if (__clear_ack_timer(chan))
4009 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4010
4011 err = l2cap_send_srejframe(chan, tx_seq);
4012 if (err < 0) {
4013 l2cap_send_disconn_req(chan->conn, chan, -err);
4014 return err;
4015 }
4016 }
4017 return 0;
4018
4019 expected:
4020 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4021
4022 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4023 bt_cb(skb)->tx_seq = tx_seq;
4024 bt_cb(skb)->sar = sar;
4025 __skb_queue_tail(&chan->srej_q, skb);
4026 return 0;
4027 }
4028
4029 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4030 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4031
4032 if (err < 0) {
4033 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4034 return err;
4035 }
4036
4037 if (__is_ctrl_final(chan, rx_control)) {
4038 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4039 l2cap_retransmit_frames(chan);
4040 }
4041
4042
4043 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4044 if (chan->num_acked == num_to_ack - 1)
4045 l2cap_send_ack(chan);
4046 else
4047 __set_ack_timer(chan);
4048
4049 return 0;
4050
4051 drop:
4052 kfree_skb(skb);
4053 return 0;
4054 }
4055
4056 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4057 {
4058 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4059 __get_reqseq(chan, rx_control), rx_control);
4060
4061 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4062 l2cap_drop_acked_frames(chan);
4063
4064 if (__is_ctrl_poll(chan, rx_control)) {
4065 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4066 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4067 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4068 (chan->unacked_frames > 0))
4069 __set_retrans_timer(chan);
4070
4071 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4072 l2cap_send_srejtail(chan);
4073 } else {
4074 l2cap_send_i_or_rr_or_rnr(chan);
4075 }
4076
4077 } else if (__is_ctrl_final(chan, rx_control)) {
4078 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4079
4080 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4081 l2cap_retransmit_frames(chan);
4082
4083 } else {
4084 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4085 (chan->unacked_frames > 0))
4086 __set_retrans_timer(chan);
4087
4088 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4089 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4090 l2cap_send_ack(chan);
4091 else
4092 l2cap_ertm_send(chan);
4093 }
4094 }
4095
4096 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4097 {
4098 u16 tx_seq = __get_reqseq(chan, rx_control);
4099
4100 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4101
4102 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4103
4104 chan->expected_ack_seq = tx_seq;
4105 l2cap_drop_acked_frames(chan);
4106
4107 if (__is_ctrl_final(chan, rx_control)) {
4108 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4109 l2cap_retransmit_frames(chan);
4110 } else {
4111 l2cap_retransmit_frames(chan);
4112
4113 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4114 set_bit(CONN_REJ_ACT, &chan->conn_state);
4115 }
4116 }
4117 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4118 {
4119 u16 tx_seq = __get_reqseq(chan, rx_control);
4120
4121 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4122
4123 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4124
4125 if (__is_ctrl_poll(chan, rx_control)) {
4126 chan->expected_ack_seq = tx_seq;
4127 l2cap_drop_acked_frames(chan);
4128
4129 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4130 l2cap_retransmit_one_frame(chan, tx_seq);
4131
4132 l2cap_ertm_send(chan);
4133
4134 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4135 chan->srej_save_reqseq = tx_seq;
4136 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4137 }
4138 } else if (__is_ctrl_final(chan, rx_control)) {
4139 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4140 chan->srej_save_reqseq == tx_seq)
4141 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4142 else
4143 l2cap_retransmit_one_frame(chan, tx_seq);
4144 } else {
4145 l2cap_retransmit_one_frame(chan, tx_seq);
4146 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4147 chan->srej_save_reqseq = tx_seq;
4148 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4149 }
4150 }
4151 }
4152
4153 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4154 {
4155 u16 tx_seq = __get_reqseq(chan, rx_control);
4156
4157 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4158
4159 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4160 chan->expected_ack_seq = tx_seq;
4161 l2cap_drop_acked_frames(chan);
4162
4163 if (__is_ctrl_poll(chan, rx_control))
4164 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4165
4166 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4167 __clear_retrans_timer(chan);
4168 if (__is_ctrl_poll(chan, rx_control))
4169 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4170 return;
4171 }
4172
4173 if (__is_ctrl_poll(chan, rx_control)) {
4174 l2cap_send_srejtail(chan);
4175 } else {
4176 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4177 l2cap_send_sframe(chan, rx_control);
4178 }
4179 }
4180
4181 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4182 {
4183 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4184
4185 if (__is_ctrl_final(chan, rx_control) &&
4186 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4187 __clear_monitor_timer(chan);
4188 if (chan->unacked_frames > 0)
4189 __set_retrans_timer(chan);
4190 clear_bit(CONN_WAIT_F, &chan->conn_state);
4191 }
4192
4193 switch (__get_ctrl_super(chan, rx_control)) {
4194 case L2CAP_SUPER_RR:
4195 l2cap_data_channel_rrframe(chan, rx_control);
4196 break;
4197
4198 case L2CAP_SUPER_REJ:
4199 l2cap_data_channel_rejframe(chan, rx_control);
4200 break;
4201
4202 case L2CAP_SUPER_SREJ:
4203 l2cap_data_channel_srejframe(chan, rx_control);
4204 break;
4205
4206 case L2CAP_SUPER_RNR:
4207 l2cap_data_channel_rnrframe(chan, rx_control);
4208 break;
4209 }
4210
4211 kfree_skb(skb);
4212 return 0;
4213 }
4214
4215 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4216 {
4217 u32 control;
4218 u16 req_seq;
4219 int len, next_tx_seq_offset, req_seq_offset;
4220
4221 control = __get_control(chan, skb->data);
4222 skb_pull(skb, __ctrl_size(chan));
4223 len = skb->len;
4224
4225 /*
4226 * We can just drop the corrupted I-frame here.
4227 * Receiver will miss it and start proper recovery
4228 * procedures and ask retransmission.
4229 */
4230 if (l2cap_check_fcs(chan, skb))
4231 goto drop;
4232
4233 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4234 len -= L2CAP_SDULEN_SIZE;
4235
4236 if (chan->fcs == L2CAP_FCS_CRC16)
4237 len -= L2CAP_FCS_SIZE;
4238
4239 if (len > chan->mps) {
4240 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4241 goto drop;
4242 }
4243
4244 req_seq = __get_reqseq(chan, control);
4245
4246 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4247
4248 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4249 chan->expected_ack_seq);
4250
4251 /* check for invalid req-seq */
4252 if (req_seq_offset > next_tx_seq_offset) {
4253 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4254 goto drop;
4255 }
4256
4257 if (!__is_sframe(chan, control)) {
4258 if (len < 0) {
4259 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4260 goto drop;
4261 }
4262
4263 l2cap_data_channel_iframe(chan, control, skb);
4264 } else {
4265 if (len != 0) {
4266 BT_ERR("%d", len);
4267 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4268 goto drop;
4269 }
4270
4271 l2cap_data_channel_sframe(chan, control, skb);
4272 }
4273
4274 return 0;
4275
4276 drop:
4277 kfree_skb(skb);
4278 return 0;
4279 }
4280
4281 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4282 {
4283 struct l2cap_chan *chan;
4284 u32 control;
4285 u16 tx_seq;
4286 int len;
4287
4288 chan = l2cap_get_chan_by_scid(conn, cid);
4289 if (!chan) {
4290 BT_DBG("unknown cid 0x%4.4x", cid);
4291 /* Drop packet and return */
4292 kfree_skb(skb);
4293 return 0;
4294 }
4295
4296 l2cap_chan_lock(chan);
4297
4298 BT_DBG("chan %p, len %d", chan, skb->len);
4299
4300 if (chan->state != BT_CONNECTED)
4301 goto drop;
4302
4303 switch (chan->mode) {
4304 case L2CAP_MODE_BASIC:
4305 /* If socket recv buffers overflows we drop data here
4306 * which is *bad* because L2CAP has to be reliable.
4307 * But we don't have any other choice. L2CAP doesn't
4308 * provide flow control mechanism. */
4309
4310 if (chan->imtu < skb->len)
4311 goto drop;
4312
4313 if (!chan->ops->recv(chan->data, skb))
4314 goto done;
4315 break;
4316
4317 case L2CAP_MODE_ERTM:
4318 l2cap_ertm_data_rcv(chan, skb);
4319
4320 goto done;
4321
4322 case L2CAP_MODE_STREAMING:
4323 control = __get_control(chan, skb->data);
4324 skb_pull(skb, __ctrl_size(chan));
4325 len = skb->len;
4326
4327 if (l2cap_check_fcs(chan, skb))
4328 goto drop;
4329
4330 if (__is_sar_start(chan, control))
4331 len -= L2CAP_SDULEN_SIZE;
4332
4333 if (chan->fcs == L2CAP_FCS_CRC16)
4334 len -= L2CAP_FCS_SIZE;
4335
4336 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4337 goto drop;
4338
4339 tx_seq = __get_txseq(chan, control);
4340
4341 if (chan->expected_tx_seq != tx_seq) {
4342 /* Frame(s) missing - must discard partial SDU */
4343 kfree_skb(chan->sdu);
4344 chan->sdu = NULL;
4345 chan->sdu_last_frag = NULL;
4346 chan->sdu_len = 0;
4347
4348 /* TODO: Notify userland of missing data */
4349 }
4350
4351 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4352
4353 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4354 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4355
4356 goto done;
4357
4358 default:
4359 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4360 break;
4361 }
4362
4363 drop:
4364 kfree_skb(skb);
4365
4366 done:
4367 l2cap_chan_unlock(chan);
4368
4369 return 0;
4370 }
4371
4372 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4373 {
4374 struct l2cap_chan *chan;
4375
4376 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4377 if (!chan)
4378 goto drop;
4379
4380 BT_DBG("chan %p, len %d", chan, skb->len);
4381
4382 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4383 goto drop;
4384
4385 if (chan->imtu < skb->len)
4386 goto drop;
4387
4388 if (!chan->ops->recv(chan->data, skb))
4389 return 0;
4390
4391 drop:
4392 kfree_skb(skb);
4393
4394 return 0;
4395 }
4396
4397 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4398 {
4399 struct l2cap_chan *chan;
4400
4401 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4402 if (!chan)
4403 goto drop;
4404
4405 BT_DBG("chan %p, len %d", chan, skb->len);
4406
4407 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4408 goto drop;
4409
4410 if (chan->imtu < skb->len)
4411 goto drop;
4412
4413 if (!chan->ops->recv(chan->data, skb))
4414 return 0;
4415
4416 drop:
4417 kfree_skb(skb);
4418
4419 return 0;
4420 }
4421
4422 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4423 {
4424 struct l2cap_hdr *lh = (void *) skb->data;
4425 u16 cid, len;
4426 __le16 psm;
4427
4428 skb_pull(skb, L2CAP_HDR_SIZE);
4429 cid = __le16_to_cpu(lh->cid);
4430 len = __le16_to_cpu(lh->len);
4431
4432 if (len != skb->len) {
4433 kfree_skb(skb);
4434 return;
4435 }
4436
4437 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4438
4439 switch (cid) {
4440 case L2CAP_CID_LE_SIGNALING:
4441 case L2CAP_CID_SIGNALING:
4442 l2cap_sig_channel(conn, skb);
4443 break;
4444
4445 case L2CAP_CID_CONN_LESS:
4446 psm = get_unaligned_le16(skb->data);
4447 skb_pull(skb, 2);
4448 l2cap_conless_channel(conn, psm, skb);
4449 break;
4450
4451 case L2CAP_CID_LE_DATA:
4452 l2cap_att_channel(conn, cid, skb);
4453 break;
4454
4455 case L2CAP_CID_SMP:
4456 if (smp_sig_channel(conn, skb))
4457 l2cap_conn_del(conn->hcon, EACCES);
4458 break;
4459
4460 default:
4461 l2cap_data_channel(conn, cid, skb);
4462 break;
4463 }
4464 }
4465
4466 /* ---- L2CAP interface with lower layer (HCI) ---- */
4467
4468 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4469 {
4470 int exact = 0, lm1 = 0, lm2 = 0;
4471 struct l2cap_chan *c;
4472
4473 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4474
4475 /* Find listening sockets and check their link_mode */
4476 read_lock(&chan_list_lock);
4477 list_for_each_entry(c, &chan_list, global_l) {
4478 struct sock *sk = c->sk;
4479
4480 if (c->state != BT_LISTEN)
4481 continue;
4482
4483 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4484 lm1 |= HCI_LM_ACCEPT;
4485 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4486 lm1 |= HCI_LM_MASTER;
4487 exact++;
4488 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4489 lm2 |= HCI_LM_ACCEPT;
4490 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4491 lm2 |= HCI_LM_MASTER;
4492 }
4493 }
4494 read_unlock(&chan_list_lock);
4495
4496 return exact ? lm1 : lm2;
4497 }
4498
4499 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4500 {
4501 struct l2cap_conn *conn;
4502
4503 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4504
4505 if (!status) {
4506 conn = l2cap_conn_add(hcon, status);
4507 if (conn)
4508 l2cap_conn_ready(conn);
4509 } else
4510 l2cap_conn_del(hcon, bt_to_errno(status));
4511
4512 return 0;
4513 }
4514
4515 int l2cap_disconn_ind(struct hci_conn *hcon)
4516 {
4517 struct l2cap_conn *conn = hcon->l2cap_data;
4518
4519 BT_DBG("hcon %p", hcon);
4520
4521 if (!conn)
4522 return HCI_ERROR_REMOTE_USER_TERM;
4523 return conn->disc_reason;
4524 }
4525
4526 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4527 {
4528 BT_DBG("hcon %p reason %d", hcon, reason);
4529
4530 l2cap_conn_del(hcon, bt_to_errno(reason));
4531 return 0;
4532 }
4533
4534 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4535 {
4536 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4537 return;
4538
4539 if (encrypt == 0x00) {
4540 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4541 __clear_chan_timer(chan);
4542 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4543 } else if (chan->sec_level == BT_SECURITY_HIGH)
4544 l2cap_chan_close(chan, ECONNREFUSED);
4545 } else {
4546 if (chan->sec_level == BT_SECURITY_MEDIUM)
4547 __clear_chan_timer(chan);
4548 }
4549 }
4550
4551 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4552 {
4553 struct l2cap_conn *conn = hcon->l2cap_data;
4554 struct l2cap_chan *chan;
4555
4556 if (!conn)
4557 return 0;
4558
4559 BT_DBG("conn %p", conn);
4560
4561 if (hcon->type == LE_LINK) {
4562 smp_distribute_keys(conn, 0);
4563 cancel_delayed_work(&conn->security_timer);
4564 }
4565
4566 mutex_lock(&conn->chan_lock);
4567
4568 list_for_each_entry(chan, &conn->chan_l, list) {
4569 l2cap_chan_lock(chan);
4570
4571 BT_DBG("chan->scid %d", chan->scid);
4572
4573 if (chan->scid == L2CAP_CID_LE_DATA) {
4574 if (!status && encrypt) {
4575 chan->sec_level = hcon->sec_level;
4576 l2cap_chan_ready(chan);
4577 }
4578
4579 l2cap_chan_unlock(chan);
4580 continue;
4581 }
4582
4583 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4584 l2cap_chan_unlock(chan);
4585 continue;
4586 }
4587
4588 if (!status && (chan->state == BT_CONNECTED ||
4589 chan->state == BT_CONFIG)) {
4590 l2cap_check_encryption(chan, encrypt);
4591 l2cap_chan_unlock(chan);
4592 continue;
4593 }
4594
4595 if (chan->state == BT_CONNECT) {
4596 if (!status) {
4597 l2cap_send_conn_req(chan);
4598 } else {
4599 __clear_chan_timer(chan);
4600 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4601 }
4602 } else if (chan->state == BT_CONNECT2) {
4603 struct sock *sk = chan->sk;
4604 struct l2cap_conn_rsp rsp;
4605 __u16 res, stat;
4606
4607 lock_sock(sk);
4608
4609 if (!status) {
4610 if (bt_sk(sk)->defer_setup) {
4611 struct sock *parent = bt_sk(sk)->parent;
4612 res = L2CAP_CR_PEND;
4613 stat = L2CAP_CS_AUTHOR_PEND;
4614 if (parent)
4615 parent->sk_data_ready(parent, 0);
4616 } else {
4617 __l2cap_state_change(chan, BT_CONFIG);
4618 res = L2CAP_CR_SUCCESS;
4619 stat = L2CAP_CS_NO_INFO;
4620 }
4621 } else {
4622 __l2cap_state_change(chan, BT_DISCONN);
4623 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4624 res = L2CAP_CR_SEC_BLOCK;
4625 stat = L2CAP_CS_NO_INFO;
4626 }
4627
4628 release_sock(sk);
4629
4630 rsp.scid = cpu_to_le16(chan->dcid);
4631 rsp.dcid = cpu_to_le16(chan->scid);
4632 rsp.result = cpu_to_le16(res);
4633 rsp.status = cpu_to_le16(stat);
4634 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4635 sizeof(rsp), &rsp);
4636 }
4637
4638 l2cap_chan_unlock(chan);
4639 }
4640
4641 mutex_unlock(&conn->chan_lock);
4642
4643 return 0;
4644 }
4645
4646 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4647 {
4648 struct l2cap_conn *conn = hcon->l2cap_data;
4649
4650 if (!conn)
4651 conn = l2cap_conn_add(hcon, 0);
4652
4653 if (!conn)
4654 goto drop;
4655
4656 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4657
4658 if (!(flags & ACL_CONT)) {
4659 struct l2cap_hdr *hdr;
4660 struct l2cap_chan *chan;
4661 u16 cid;
4662 int len;
4663
4664 if (conn->rx_len) {
4665 BT_ERR("Unexpected start frame (len %d)", skb->len);
4666 kfree_skb(conn->rx_skb);
4667 conn->rx_skb = NULL;
4668 conn->rx_len = 0;
4669 l2cap_conn_unreliable(conn, ECOMM);
4670 }
4671
4672 /* Start fragment always begin with Basic L2CAP header */
4673 if (skb->len < L2CAP_HDR_SIZE) {
4674 BT_ERR("Frame is too short (len %d)", skb->len);
4675 l2cap_conn_unreliable(conn, ECOMM);
4676 goto drop;
4677 }
4678
4679 hdr = (struct l2cap_hdr *) skb->data;
4680 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4681 cid = __le16_to_cpu(hdr->cid);
4682
4683 if (len == skb->len) {
4684 /* Complete frame received */
4685 l2cap_recv_frame(conn, skb);
4686 return 0;
4687 }
4688
4689 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4690
4691 if (skb->len > len) {
4692 BT_ERR("Frame is too long (len %d, expected len %d)",
4693 skb->len, len);
4694 l2cap_conn_unreliable(conn, ECOMM);
4695 goto drop;
4696 }
4697
4698 chan = l2cap_get_chan_by_scid(conn, cid);
4699
4700 if (chan && chan->sk) {
4701 struct sock *sk = chan->sk;
4702 lock_sock(sk);
4703
4704 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4705 BT_ERR("Frame exceeding recv MTU (len %d, "
4706 "MTU %d)", len,
4707 chan->imtu);
4708 release_sock(sk);
4709 l2cap_conn_unreliable(conn, ECOMM);
4710 goto drop;
4711 }
4712 release_sock(sk);
4713 }
4714
4715 /* Allocate skb for the complete frame (with header) */
4716 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4717 if (!conn->rx_skb)
4718 goto drop;
4719
4720 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4721 skb->len);
4722 conn->rx_len = len - skb->len;
4723 } else {
4724 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4725
4726 if (!conn->rx_len) {
4727 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4728 l2cap_conn_unreliable(conn, ECOMM);
4729 goto drop;
4730 }
4731
4732 if (skb->len > conn->rx_len) {
4733 BT_ERR("Fragment is too long (len %d, expected %d)",
4734 skb->len, conn->rx_len);
4735 kfree_skb(conn->rx_skb);
4736 conn->rx_skb = NULL;
4737 conn->rx_len = 0;
4738 l2cap_conn_unreliable(conn, ECOMM);
4739 goto drop;
4740 }
4741
4742 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4743 skb->len);
4744 conn->rx_len -= skb->len;
4745
4746 if (!conn->rx_len) {
4747 /* Complete frame received */
4748 l2cap_recv_frame(conn, conn->rx_skb);
4749 conn->rx_skb = NULL;
4750 }
4751 }
4752
4753 drop:
4754 kfree_skb(skb);
4755 return 0;
4756 }
4757
4758 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4759 {
4760 struct l2cap_chan *c;
4761
4762 read_lock(&chan_list_lock);
4763
4764 list_for_each_entry(c, &chan_list, global_l) {
4765 struct sock *sk = c->sk;
4766
4767 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4768 batostr(&bt_sk(sk)->src),
4769 batostr(&bt_sk(sk)->dst),
4770 c->state, __le16_to_cpu(c->psm),
4771 c->scid, c->dcid, c->imtu, c->omtu,
4772 c->sec_level, c->mode);
4773 }
4774
4775 read_unlock(&chan_list_lock);
4776
4777 return 0;
4778 }
4779
4780 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4781 {
4782 return single_open(file, l2cap_debugfs_show, inode->i_private);
4783 }
4784
4785 static const struct file_operations l2cap_debugfs_fops = {
4786 .open = l2cap_debugfs_open,
4787 .read = seq_read,
4788 .llseek = seq_lseek,
4789 .release = single_release,
4790 };
4791
4792 static struct dentry *l2cap_debugfs;
4793
4794 int __init l2cap_init(void)
4795 {
4796 int err;
4797
4798 err = l2cap_init_sockets();
4799 if (err < 0)
4800 return err;
4801
4802 if (bt_debugfs) {
4803 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4804 bt_debugfs, NULL, &l2cap_debugfs_fops);
4805 if (!l2cap_debugfs)
4806 BT_ERR("Failed to create L2CAP debug file");
4807 }
4808
4809 return 0;
4810 }
4811
4812 void l2cap_exit(void)
4813 {
4814 debugfs_remove(l2cap_debugfs);
4815 l2cap_cleanup_sockets();
4816 }
4817
4818 module_param(disable_ertm, bool, 0644);
4819 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");