]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/bluetooth/l2cap_core.c
Merge branch 'rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-eoan-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42
43 bool disable_ertm;
44
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 u16 cid)
65 {
66 struct l2cap_chan *c;
67
68 list_for_each_entry(c, &conn->chan_l, list) {
69 if (c->dcid == cid)
70 return c;
71 }
72 return NULL;
73 }
74
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 u16 cid)
77 {
78 struct l2cap_chan *c;
79
80 list_for_each_entry(c, &conn->chan_l, list) {
81 if (c->scid == cid)
82 return c;
83 }
84 return NULL;
85 }
86
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
96 if (c)
97 l2cap_chan_lock(c);
98 mutex_unlock(&conn->chan_lock);
99
100 return c;
101 }
102
103 /* Find channel with given DCID.
104 * Returns locked channel.
105 */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_dcid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 u8 ident)
122 {
123 struct l2cap_chan *c;
124
125 list_for_each_entry(c, &conn->chan_l, list) {
126 if (c->ident == ident)
127 return c;
128 }
129 return NULL;
130 }
131
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 u8 ident)
134 {
135 struct l2cap_chan *c;
136
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_ident(conn, ident);
139 if (c)
140 l2cap_chan_lock(c);
141 mutex_unlock(&conn->chan_lock);
142
143 return c;
144 }
145
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 struct l2cap_chan *c;
149
150 list_for_each_entry(c, &chan_list, global_l) {
151 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 return c;
153 }
154 return NULL;
155 }
156
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 int err;
160
161 write_lock(&chan_list_lock);
162
163 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 err = -EADDRINUSE;
165 goto done;
166 }
167
168 if (psm) {
169 chan->psm = psm;
170 chan->sport = psm;
171 err = 0;
172 } else {
173 u16 p;
174
175 err = -EINVAL;
176 for (p = 0x1001; p < 0x1100; p += 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 chan->psm = cpu_to_le16(p);
179 chan->sport = cpu_to_le16(p);
180 err = 0;
181 break;
182 }
183 }
184
185 done:
186 write_unlock(&chan_list_lock);
187 return err;
188 }
189
190 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
191 {
192 write_lock(&chan_list_lock);
193
194 chan->scid = scid;
195
196 write_unlock(&chan_list_lock);
197
198 return 0;
199 }
200
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 u16 cid = L2CAP_CID_DYN_START;
204
205 for (; cid < L2CAP_CID_DYN_END; cid++) {
206 if (!__l2cap_get_chan_by_scid(conn, cid))
207 return cid;
208 }
209
210 return 0;
211 }
212
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 state_to_string(state));
217
218 chan->state = state;
219 chan->ops->state_change(chan, state);
220 }
221
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 struct sock *sk = chan->sk;
225
226 lock_sock(sk);
227 __l2cap_state_change(chan, state);
228 release_sock(sk);
229 }
230
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 struct sock *sk = chan->sk;
234
235 sk->sk_err = err;
236 }
237
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 struct sock *sk = chan->sk;
241
242 lock_sock(sk);
243 __l2cap_chan_set_err(chan, err);
244 release_sock(sk);
245 }
246
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
253 }
254 }
255
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
262 }
263 }
264
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 u16 seq)
267 {
268 struct sk_buff *skb;
269
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
272 return skb;
273 }
274
275 return NULL;
276 }
277
278 /* ---- L2CAP sequence number lists ---- */
279
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
286 * allocs or frees.
287 */
288
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 size_t alloc_size, i;
292
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
296 */
297 alloc_size = roundup_pow_of_two(size);
298
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 if (!seq_list->list)
301 return -ENOMEM;
302
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308
309 return 0;
310 }
311
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 kfree(seq_list->list);
315 }
316
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 u16 seq)
319 {
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 u16 mask = seq_list->mask;
327
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 }
340 } else {
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
347 }
348
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
354 }
355 return seq;
356 }
357
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 u16 i;
367
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
386
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 chan_timer.work);
400 struct l2cap_conn *conn = chan->conn;
401 int reason;
402
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
407
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
416 l2cap_chan_close(chan, reason);
417
418 l2cap_chan_unlock(chan);
419
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
422
423 l2cap_chan_put(chan);
424 }
425
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 struct l2cap_chan *chan;
429
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
433
434 mutex_init(&chan->lock);
435
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
439
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441
442 chan->state = BT_OPEN;
443
444 kref_init(&chan->kref);
445
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448
449 BT_DBG("chan %p", chan);
450
451 return chan;
452 }
453
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457
458 BT_DBG("chan %p", chan);
459
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
463
464 kfree(chan);
465 }
466
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470
471 kref_get(&c->kref);
472 }
473
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477
478 kref_put(&c->kref, l2cap_chan_destroy);
479 }
480
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
489
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
497
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499
500 chan->conn = conn;
501
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
505 /* LE connection */
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
509 else
510 chan->scid = l2cap_alloc_cid(conn);
511 } else {
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
515 }
516 break;
517
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
523 break;
524
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 break;
531
532 default:
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
537 }
538
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
545
546 l2cap_chan_hold(chan);
547
548 hci_conn_hold(conn->hcon);
549
550 list_add(&chan->list, &conn->chan_l);
551 }
552
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
558 }
559
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 struct l2cap_conn *conn = chan->conn;
563
564 __clear_chan_timer(chan);
565
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567
568 if (conn) {
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
572
573 l2cap_chan_put(chan);
574
575 chan->conn = NULL;
576
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
579
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
582 }
583
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
586
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
589 }
590
591 chan->ops->teardown(chan, err);
592
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 return;
595
596 switch(chan->mode) {
597 case L2CAP_MODE_BASIC:
598 break;
599
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
604
605 skb_queue_purge(&chan->srej_q);
606
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
609
610 /* fall through */
611
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
614 break;
615 }
616
617 return;
618 }
619
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 {
622 struct l2cap_conn *conn = chan->conn;
623 struct sock *sk = chan->sk;
624
625 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
626 sk);
627
628 switch (chan->state) {
629 case BT_LISTEN:
630 chan->ops->teardown(chan, 0);
631 break;
632
633 case BT_CONNECTED:
634 case BT_CONFIG:
635 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 conn->hcon->type == ACL_LINK) {
637 __set_chan_timer(chan, sk->sk_sndtimeo);
638 l2cap_send_disconn_req(chan, reason);
639 } else
640 l2cap_chan_del(chan, reason);
641 break;
642
643 case BT_CONNECT2:
644 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 conn->hcon->type == ACL_LINK) {
646 struct l2cap_conn_rsp rsp;
647 __u16 result;
648
649 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 result = L2CAP_CR_SEC_BLOCK;
651 else
652 result = L2CAP_CR_BAD_PSM;
653 l2cap_state_change(chan, BT_DISCONN);
654
655 rsp.scid = cpu_to_le16(chan->dcid);
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.result = cpu_to_le16(result);
658 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
660 sizeof(rsp), &rsp);
661 }
662
663 l2cap_chan_del(chan, reason);
664 break;
665
666 case BT_CONNECT:
667 case BT_DISCONN:
668 l2cap_chan_del(chan, reason);
669 break;
670
671 default:
672 chan->ops->teardown(chan, 0);
673 break;
674 }
675 }
676
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 {
679 if (chan->chan_type == L2CAP_CHAN_RAW) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
685 default:
686 return HCI_AT_NO_BONDING;
687 }
688 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 if (chan->sec_level == BT_SECURITY_LOW)
690 chan->sec_level = BT_SECURITY_SDP;
691
692 if (chan->sec_level == BT_SECURITY_HIGH)
693 return HCI_AT_NO_BONDING_MITM;
694 else
695 return HCI_AT_NO_BONDING;
696 } else {
697 switch (chan->sec_level) {
698 case BT_SECURITY_HIGH:
699 return HCI_AT_GENERAL_BONDING_MITM;
700 case BT_SECURITY_MEDIUM:
701 return HCI_AT_GENERAL_BONDING;
702 default:
703 return HCI_AT_NO_BONDING;
704 }
705 }
706 }
707
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
710 {
711 struct l2cap_conn *conn = chan->conn;
712 __u8 auth_type;
713
714 auth_type = l2cap_get_auth_type(chan);
715
716 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
717 }
718
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
720 {
721 u8 id;
722
723 /* Get next available identificator.
724 * 1 - 128 are used by kernel.
725 * 129 - 199 are reserved.
726 * 200 - 254 are used by utilities like l2ping, etc.
727 */
728
729 spin_lock(&conn->lock);
730
731 if (++conn->tx_ident > 128)
732 conn->tx_ident = 1;
733
734 id = conn->tx_ident;
735
736 spin_unlock(&conn->lock);
737
738 return id;
739 }
740
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
742 void *data)
743 {
744 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
745 u8 flags;
746
747 BT_DBG("code 0x%2.2x", code);
748
749 if (!skb)
750 return;
751
752 if (lmp_no_flush_capable(conn->hcon->hdev))
753 flags = ACL_START_NO_FLUSH;
754 else
755 flags = ACL_START;
756
757 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 skb->priority = HCI_PRIO_MAX;
759
760 hci_send_acl(conn->hchan, skb, flags);
761 }
762
763 static bool __chan_is_moving(struct l2cap_chan *chan)
764 {
765 return chan->move_state != L2CAP_MOVE_STABLE &&
766 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
767 }
768
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
770 {
771 struct hci_conn *hcon = chan->conn->hcon;
772 u16 flags;
773
774 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
775 skb->priority);
776
777 if (chan->hs_hcon && !__chan_is_moving(chan)) {
778 if (chan->hs_hchan)
779 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
780 else
781 kfree_skb(skb);
782
783 return;
784 }
785
786 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 lmp_no_flush_capable(hcon->hdev))
788 flags = ACL_START_NO_FLUSH;
789 else
790 flags = ACL_START;
791
792 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 hci_send_acl(chan->conn->hchan, skb, flags);
794 }
795
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
797 {
798 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
800
801 if (enh & L2CAP_CTRL_FRAME_TYPE) {
802 /* S-Frame */
803 control->sframe = 1;
804 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
806
807 control->sar = 0;
808 control->txseq = 0;
809 } else {
810 /* I-Frame */
811 control->sframe = 0;
812 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
814
815 control->poll = 0;
816 control->super = 0;
817 }
818 }
819
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
821 {
822 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
824
825 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
826 /* S-Frame */
827 control->sframe = 1;
828 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
830
831 control->sar = 0;
832 control->txseq = 0;
833 } else {
834 /* I-Frame */
835 control->sframe = 0;
836 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838
839 control->poll = 0;
840 control->super = 0;
841 }
842 }
843
844 static inline void __unpack_control(struct l2cap_chan *chan,
845 struct sk_buff *skb)
846 {
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 __unpack_extended_control(get_unaligned_le32(skb->data),
849 &bt_cb(skb)->control);
850 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
851 } else {
852 __unpack_enhanced_control(get_unaligned_le16(skb->data),
853 &bt_cb(skb)->control);
854 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
855 }
856 }
857
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
859 {
860 u32 packed;
861
862 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
864
865 if (control->sframe) {
866 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
869 } else {
870 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
872 }
873
874 return packed;
875 }
876
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
878 {
879 u16 packed;
880
881 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
883
884 if (control->sframe) {
885 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 packed |= L2CAP_CTRL_FRAME_TYPE;
888 } else {
889 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
891 }
892
893 return packed;
894 }
895
896 static inline void __pack_control(struct l2cap_chan *chan,
897 struct l2cap_ctrl *control,
898 struct sk_buff *skb)
899 {
900 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 put_unaligned_le32(__pack_extended_control(control),
902 skb->data + L2CAP_HDR_SIZE);
903 } else {
904 put_unaligned_le16(__pack_enhanced_control(control),
905 skb->data + L2CAP_HDR_SIZE);
906 }
907 }
908
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
910 {
911 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 return L2CAP_EXT_HDR_SIZE;
913 else
914 return L2CAP_ENH_HDR_SIZE;
915 }
916
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
918 u32 control)
919 {
920 struct sk_buff *skb;
921 struct l2cap_hdr *lh;
922 int hlen = __ertm_hdr_size(chan);
923
924 if (chan->fcs == L2CAP_FCS_CRC16)
925 hlen += L2CAP_FCS_SIZE;
926
927 skb = bt_skb_alloc(hlen, GFP_KERNEL);
928
929 if (!skb)
930 return ERR_PTR(-ENOMEM);
931
932 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 lh->cid = cpu_to_le16(chan->dcid);
935
936 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
938 else
939 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
940
941 if (chan->fcs == L2CAP_FCS_CRC16) {
942 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
944 }
945
946 skb->priority = HCI_PRIO_MAX;
947 return skb;
948 }
949
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 struct l2cap_ctrl *control)
952 {
953 struct sk_buff *skb;
954 u32 control_field;
955
956 BT_DBG("chan %p, control %p", chan, control);
957
958 if (!control->sframe)
959 return;
960
961 if (__chan_is_moving(chan))
962 return;
963
964 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
965 !control->poll)
966 control->final = 1;
967
968 if (control->super == L2CAP_SUPER_RR)
969 clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 else if (control->super == L2CAP_SUPER_RNR)
971 set_bit(CONN_RNR_SENT, &chan->conn_state);
972
973 if (control->super != L2CAP_SUPER_SREJ) {
974 chan->last_acked_seq = control->reqseq;
975 __clear_ack_timer(chan);
976 }
977
978 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 control->final, control->poll, control->super);
980
981 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 control_field = __pack_extended_control(control);
983 else
984 control_field = __pack_enhanced_control(control);
985
986 skb = l2cap_create_sframe_pdu(chan, control_field);
987 if (!IS_ERR(skb))
988 l2cap_do_send(chan, skb);
989 }
990
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
992 {
993 struct l2cap_ctrl control;
994
995 BT_DBG("chan %p, poll %d", chan, poll);
996
997 memset(&control, 0, sizeof(control));
998 control.sframe = 1;
999 control.poll = poll;
1000
1001 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 control.super = L2CAP_SUPER_RNR;
1003 else
1004 control.super = L2CAP_SUPER_RR;
1005
1006 control.reqseq = chan->buffer_seq;
1007 l2cap_send_sframe(chan, &control);
1008 }
1009
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1011 {
1012 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1013 }
1014
1015 static bool __amp_capable(struct l2cap_chan *chan)
1016 {
1017 struct l2cap_conn *conn = chan->conn;
1018
1019 if (enable_hs &&
1020 hci_amp_capable() &&
1021 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1022 conn->fixed_chan_mask & L2CAP_FC_A2MP)
1023 return true;
1024 else
1025 return false;
1026 }
1027
1028 static bool l2cap_check_efs(struct l2cap_chan *chan)
1029 {
1030 /* Check EFS parameters */
1031 return true;
1032 }
1033
1034 void l2cap_send_conn_req(struct l2cap_chan *chan)
1035 {
1036 struct l2cap_conn *conn = chan->conn;
1037 struct l2cap_conn_req req;
1038
1039 req.scid = cpu_to_le16(chan->scid);
1040 req.psm = chan->psm;
1041
1042 chan->ident = l2cap_get_ident(conn);
1043
1044 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1045
1046 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1047 }
1048
1049 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1050 {
1051 struct l2cap_create_chan_req req;
1052 req.scid = cpu_to_le16(chan->scid);
1053 req.psm = chan->psm;
1054 req.amp_id = amp_id;
1055
1056 chan->ident = l2cap_get_ident(chan->conn);
1057
1058 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1059 sizeof(req), &req);
1060 }
1061
1062 static void l2cap_move_setup(struct l2cap_chan *chan)
1063 {
1064 struct sk_buff *skb;
1065
1066 BT_DBG("chan %p", chan);
1067
1068 if (chan->mode != L2CAP_MODE_ERTM)
1069 return;
1070
1071 __clear_retrans_timer(chan);
1072 __clear_monitor_timer(chan);
1073 __clear_ack_timer(chan);
1074
1075 chan->retry_count = 0;
1076 skb_queue_walk(&chan->tx_q, skb) {
1077 if (bt_cb(skb)->control.retries)
1078 bt_cb(skb)->control.retries = 1;
1079 else
1080 break;
1081 }
1082
1083 chan->expected_tx_seq = chan->buffer_seq;
1084
1085 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1086 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1087 l2cap_seq_list_clear(&chan->retrans_list);
1088 l2cap_seq_list_clear(&chan->srej_list);
1089 skb_queue_purge(&chan->srej_q);
1090
1091 chan->tx_state = L2CAP_TX_STATE_XMIT;
1092 chan->rx_state = L2CAP_RX_STATE_MOVE;
1093
1094 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1095 }
1096
1097 static void l2cap_move_done(struct l2cap_chan *chan)
1098 {
1099 u8 move_role = chan->move_role;
1100 BT_DBG("chan %p", chan);
1101
1102 chan->move_state = L2CAP_MOVE_STABLE;
1103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1104
1105 if (chan->mode != L2CAP_MODE_ERTM)
1106 return;
1107
1108 switch (move_role) {
1109 case L2CAP_MOVE_ROLE_INITIATOR:
1110 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1111 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1112 break;
1113 case L2CAP_MOVE_ROLE_RESPONDER:
1114 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1115 break;
1116 }
1117 }
1118
1119 static void l2cap_chan_ready(struct l2cap_chan *chan)
1120 {
1121 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1122 chan->conf_state = 0;
1123 __clear_chan_timer(chan);
1124
1125 chan->state = BT_CONNECTED;
1126
1127 chan->ops->ready(chan);
1128 }
1129
1130 static void l2cap_start_connection(struct l2cap_chan *chan)
1131 {
1132 if (__amp_capable(chan)) {
1133 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1134 a2mp_discover_amp(chan);
1135 } else {
1136 l2cap_send_conn_req(chan);
1137 }
1138 }
1139
1140 static void l2cap_do_start(struct l2cap_chan *chan)
1141 {
1142 struct l2cap_conn *conn = chan->conn;
1143
1144 if (conn->hcon->type == LE_LINK) {
1145 l2cap_chan_ready(chan);
1146 return;
1147 }
1148
1149 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1150 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1151 return;
1152
1153 if (l2cap_chan_check_security(chan) &&
1154 __l2cap_no_conn_pending(chan)) {
1155 l2cap_start_connection(chan);
1156 }
1157 } else {
1158 struct l2cap_info_req req;
1159 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1160
1161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1162 conn->info_ident = l2cap_get_ident(conn);
1163
1164 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1165
1166 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1167 sizeof(req), &req);
1168 }
1169 }
1170
1171 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1172 {
1173 u32 local_feat_mask = l2cap_feat_mask;
1174 if (!disable_ertm)
1175 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1176
1177 switch (mode) {
1178 case L2CAP_MODE_ERTM:
1179 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1180 case L2CAP_MODE_STREAMING:
1181 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1182 default:
1183 return 0x00;
1184 }
1185 }
1186
1187 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1188 {
1189 struct sock *sk = chan->sk;
1190 struct l2cap_conn *conn = chan->conn;
1191 struct l2cap_disconn_req req;
1192
1193 if (!conn)
1194 return;
1195
1196 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1200 }
1201
1202 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1203 l2cap_state_change(chan, BT_DISCONN);
1204 return;
1205 }
1206
1207 req.dcid = cpu_to_le16(chan->dcid);
1208 req.scid = cpu_to_le16(chan->scid);
1209 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1210 sizeof(req), &req);
1211
1212 lock_sock(sk);
1213 __l2cap_state_change(chan, BT_DISCONN);
1214 __l2cap_chan_set_err(chan, err);
1215 release_sock(sk);
1216 }
1217
1218 /* ---- L2CAP connections ---- */
1219 static void l2cap_conn_start(struct l2cap_conn *conn)
1220 {
1221 struct l2cap_chan *chan, *tmp;
1222
1223 BT_DBG("conn %p", conn);
1224
1225 mutex_lock(&conn->chan_lock);
1226
1227 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1228 struct sock *sk = chan->sk;
1229
1230 l2cap_chan_lock(chan);
1231
1232 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1233 l2cap_chan_unlock(chan);
1234 continue;
1235 }
1236
1237 if (chan->state == BT_CONNECT) {
1238 if (!l2cap_chan_check_security(chan) ||
1239 !__l2cap_no_conn_pending(chan)) {
1240 l2cap_chan_unlock(chan);
1241 continue;
1242 }
1243
1244 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1245 && test_bit(CONF_STATE2_DEVICE,
1246 &chan->conf_state)) {
1247 l2cap_chan_close(chan, ECONNRESET);
1248 l2cap_chan_unlock(chan);
1249 continue;
1250 }
1251
1252 l2cap_start_connection(chan);
1253
1254 } else if (chan->state == BT_CONNECT2) {
1255 struct l2cap_conn_rsp rsp;
1256 char buf[128];
1257 rsp.scid = cpu_to_le16(chan->dcid);
1258 rsp.dcid = cpu_to_le16(chan->scid);
1259
1260 if (l2cap_chan_check_security(chan)) {
1261 lock_sock(sk);
1262 if (test_bit(BT_SK_DEFER_SETUP,
1263 &bt_sk(sk)->flags)) {
1264 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1265 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1266 chan->ops->defer(chan);
1267
1268 } else {
1269 __l2cap_state_change(chan, BT_CONFIG);
1270 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1271 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1272 }
1273 release_sock(sk);
1274 } else {
1275 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1276 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1277 }
1278
1279 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1280 sizeof(rsp), &rsp);
1281
1282 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1283 rsp.result != L2CAP_CR_SUCCESS) {
1284 l2cap_chan_unlock(chan);
1285 continue;
1286 }
1287
1288 set_bit(CONF_REQ_SENT, &chan->conf_state);
1289 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1290 l2cap_build_conf_req(chan, buf), buf);
1291 chan->num_conf_req++;
1292 }
1293
1294 l2cap_chan_unlock(chan);
1295 }
1296
1297 mutex_unlock(&conn->chan_lock);
1298 }
1299
1300 /* Find socket with cid and source/destination bdaddr.
1301 * Returns closest match, locked.
1302 */
1303 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1304 bdaddr_t *src,
1305 bdaddr_t *dst)
1306 {
1307 struct l2cap_chan *c, *c1 = NULL;
1308
1309 read_lock(&chan_list_lock);
1310
1311 list_for_each_entry(c, &chan_list, global_l) {
1312 struct sock *sk = c->sk;
1313
1314 if (state && c->state != state)
1315 continue;
1316
1317 if (c->scid == cid) {
1318 int src_match, dst_match;
1319 int src_any, dst_any;
1320
1321 /* Exact match. */
1322 src_match = !bacmp(&bt_sk(sk)->src, src);
1323 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1324 if (src_match && dst_match) {
1325 read_unlock(&chan_list_lock);
1326 return c;
1327 }
1328
1329 /* Closest match */
1330 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1331 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1332 if ((src_match && dst_any) || (src_any && dst_match) ||
1333 (src_any && dst_any))
1334 c1 = c;
1335 }
1336 }
1337
1338 read_unlock(&chan_list_lock);
1339
1340 return c1;
1341 }
1342
1343 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1344 {
1345 struct sock *parent;
1346 struct l2cap_chan *chan, *pchan;
1347
1348 BT_DBG("");
1349
1350 /* Check if we have socket listening on cid */
1351 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1352 conn->src, conn->dst);
1353 if (!pchan)
1354 return;
1355
1356 /* Client ATT sockets should override the server one */
1357 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1358 return;
1359
1360 parent = pchan->sk;
1361
1362 lock_sock(parent);
1363
1364 chan = pchan->ops->new_connection(pchan);
1365 if (!chan)
1366 goto clean;
1367
1368 chan->dcid = L2CAP_CID_ATT;
1369
1370 bacpy(&bt_sk(chan->sk)->src, conn->src);
1371 bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1372
1373 __l2cap_chan_add(conn, chan);
1374
1375 clean:
1376 release_sock(parent);
1377 }
1378
1379 static void l2cap_conn_ready(struct l2cap_conn *conn)
1380 {
1381 struct l2cap_chan *chan;
1382 struct hci_conn *hcon = conn->hcon;
1383
1384 BT_DBG("conn %p", conn);
1385
1386 /* For outgoing pairing which doesn't necessarily have an
1387 * associated socket (e.g. mgmt_pair_device).
1388 */
1389 if (hcon->out && hcon->type == LE_LINK)
1390 smp_conn_security(hcon, hcon->pending_sec_level);
1391
1392 mutex_lock(&conn->chan_lock);
1393
1394 if (hcon->type == LE_LINK)
1395 l2cap_le_conn_ready(conn);
1396
1397 list_for_each_entry(chan, &conn->chan_l, list) {
1398
1399 l2cap_chan_lock(chan);
1400
1401 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1402 l2cap_chan_unlock(chan);
1403 continue;
1404 }
1405
1406 if (hcon->type == LE_LINK) {
1407 if (smp_conn_security(hcon, chan->sec_level))
1408 l2cap_chan_ready(chan);
1409
1410 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1411 struct sock *sk = chan->sk;
1412 __clear_chan_timer(chan);
1413 lock_sock(sk);
1414 __l2cap_state_change(chan, BT_CONNECTED);
1415 sk->sk_state_change(sk);
1416 release_sock(sk);
1417
1418 } else if (chan->state == BT_CONNECT) {
1419 l2cap_do_start(chan);
1420 }
1421
1422 l2cap_chan_unlock(chan);
1423 }
1424
1425 mutex_unlock(&conn->chan_lock);
1426 }
1427
1428 /* Notify sockets that we cannot guaranty reliability anymore */
1429 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1430 {
1431 struct l2cap_chan *chan;
1432
1433 BT_DBG("conn %p", conn);
1434
1435 mutex_lock(&conn->chan_lock);
1436
1437 list_for_each_entry(chan, &conn->chan_l, list) {
1438 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1439 l2cap_chan_set_err(chan, err);
1440 }
1441
1442 mutex_unlock(&conn->chan_lock);
1443 }
1444
1445 static void l2cap_info_timeout(struct work_struct *work)
1446 {
1447 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1448 info_timer.work);
1449
1450 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1451 conn->info_ident = 0;
1452
1453 l2cap_conn_start(conn);
1454 }
1455
1456 /*
1457 * l2cap_user
1458 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1459 * callback is called during registration. The ->remove callback is called
1460 * during unregistration.
1461 * An l2cap_user object can either be explicitly unregistered or when the
1462 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1463 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1464 * External modules must own a reference to the l2cap_conn object if they intend
1465 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1466 * any time if they don't.
1467 */
1468
1469 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1470 {
1471 struct hci_dev *hdev = conn->hcon->hdev;
1472 int ret;
1473
1474 /* We need to check whether l2cap_conn is registered. If it is not, we
1475 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1476 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1477 * relies on the parent hci_conn object to be locked. This itself relies
1478 * on the hci_dev object to be locked. So we must lock the hci device
1479 * here, too. */
1480
1481 hci_dev_lock(hdev);
1482
1483 if (user->list.next || user->list.prev) {
1484 ret = -EINVAL;
1485 goto out_unlock;
1486 }
1487
1488 /* conn->hchan is NULL after l2cap_conn_del() was called */
1489 if (!conn->hchan) {
1490 ret = -ENODEV;
1491 goto out_unlock;
1492 }
1493
1494 ret = user->probe(conn, user);
1495 if (ret)
1496 goto out_unlock;
1497
1498 list_add(&user->list, &conn->users);
1499 ret = 0;
1500
1501 out_unlock:
1502 hci_dev_unlock(hdev);
1503 return ret;
1504 }
1505 EXPORT_SYMBOL(l2cap_register_user);
1506
1507 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1508 {
1509 struct hci_dev *hdev = conn->hcon->hdev;
1510
1511 hci_dev_lock(hdev);
1512
1513 if (!user->list.next || !user->list.prev)
1514 goto out_unlock;
1515
1516 list_del(&user->list);
1517 user->list.next = NULL;
1518 user->list.prev = NULL;
1519 user->remove(conn, user);
1520
1521 out_unlock:
1522 hci_dev_unlock(hdev);
1523 }
1524 EXPORT_SYMBOL(l2cap_unregister_user);
1525
1526 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1527 {
1528 struct l2cap_user *user;
1529
1530 while (!list_empty(&conn->users)) {
1531 user = list_first_entry(&conn->users, struct l2cap_user, list);
1532 list_del(&user->list);
1533 user->list.next = NULL;
1534 user->list.prev = NULL;
1535 user->remove(conn, user);
1536 }
1537 }
1538
1539 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1540 {
1541 struct l2cap_conn *conn = hcon->l2cap_data;
1542 struct l2cap_chan *chan, *l;
1543
1544 if (!conn)
1545 return;
1546
1547 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1548
1549 kfree_skb(conn->rx_skb);
1550
1551 l2cap_unregister_all_users(conn);
1552
1553 mutex_lock(&conn->chan_lock);
1554
1555 /* Kill channels */
1556 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1557 l2cap_chan_hold(chan);
1558 l2cap_chan_lock(chan);
1559
1560 l2cap_chan_del(chan, err);
1561
1562 l2cap_chan_unlock(chan);
1563
1564 chan->ops->close(chan);
1565 l2cap_chan_put(chan);
1566 }
1567
1568 mutex_unlock(&conn->chan_lock);
1569
1570 hci_chan_del(conn->hchan);
1571
1572 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1573 cancel_delayed_work_sync(&conn->info_timer);
1574
1575 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1576 cancel_delayed_work_sync(&conn->security_timer);
1577 smp_chan_destroy(conn);
1578 }
1579
1580 hcon->l2cap_data = NULL;
1581 conn->hchan = NULL;
1582 l2cap_conn_put(conn);
1583 }
1584
1585 static void security_timeout(struct work_struct *work)
1586 {
1587 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1588 security_timer.work);
1589
1590 BT_DBG("conn %p", conn);
1591
1592 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1593 smp_chan_destroy(conn);
1594 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1595 }
1596 }
1597
1598 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1599 {
1600 struct l2cap_conn *conn = hcon->l2cap_data;
1601 struct hci_chan *hchan;
1602
1603 if (conn)
1604 return conn;
1605
1606 hchan = hci_chan_create(hcon);
1607 if (!hchan)
1608 return NULL;
1609
1610 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1611 if (!conn) {
1612 hci_chan_del(hchan);
1613 return NULL;
1614 }
1615
1616 kref_init(&conn->ref);
1617 hcon->l2cap_data = conn;
1618 conn->hcon = hcon;
1619 hci_conn_get(conn->hcon);
1620 conn->hchan = hchan;
1621
1622 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1623
1624 switch (hcon->type) {
1625 case LE_LINK:
1626 if (hcon->hdev->le_mtu) {
1627 conn->mtu = hcon->hdev->le_mtu;
1628 break;
1629 }
1630 /* fall through */
1631 default:
1632 conn->mtu = hcon->hdev->acl_mtu;
1633 break;
1634 }
1635
1636 conn->src = &hcon->hdev->bdaddr;
1637 conn->dst = &hcon->dst;
1638
1639 conn->feat_mask = 0;
1640
1641 spin_lock_init(&conn->lock);
1642 mutex_init(&conn->chan_lock);
1643
1644 INIT_LIST_HEAD(&conn->chan_l);
1645 INIT_LIST_HEAD(&conn->users);
1646
1647 if (hcon->type == LE_LINK)
1648 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1649 else
1650 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1651
1652 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1653
1654 return conn;
1655 }
1656
1657 static void l2cap_conn_free(struct kref *ref)
1658 {
1659 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1660
1661 hci_conn_put(conn->hcon);
1662 kfree(conn);
1663 }
1664
1665 void l2cap_conn_get(struct l2cap_conn *conn)
1666 {
1667 kref_get(&conn->ref);
1668 }
1669 EXPORT_SYMBOL(l2cap_conn_get);
1670
1671 void l2cap_conn_put(struct l2cap_conn *conn)
1672 {
1673 kref_put(&conn->ref, l2cap_conn_free);
1674 }
1675 EXPORT_SYMBOL(l2cap_conn_put);
1676
1677 /* ---- Socket interface ---- */
1678
1679 /* Find socket with psm and source / destination bdaddr.
1680 * Returns closest match.
1681 */
1682 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1683 bdaddr_t *src,
1684 bdaddr_t *dst)
1685 {
1686 struct l2cap_chan *c, *c1 = NULL;
1687
1688 read_lock(&chan_list_lock);
1689
1690 list_for_each_entry(c, &chan_list, global_l) {
1691 struct sock *sk = c->sk;
1692
1693 if (state && c->state != state)
1694 continue;
1695
1696 if (c->psm == psm) {
1697 int src_match, dst_match;
1698 int src_any, dst_any;
1699
1700 /* Exact match. */
1701 src_match = !bacmp(&bt_sk(sk)->src, src);
1702 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1703 if (src_match && dst_match) {
1704 read_unlock(&chan_list_lock);
1705 return c;
1706 }
1707
1708 /* Closest match */
1709 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1710 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1711 if ((src_match && dst_any) || (src_any && dst_match) ||
1712 (src_any && dst_any))
1713 c1 = c;
1714 }
1715 }
1716
1717 read_unlock(&chan_list_lock);
1718
1719 return c1;
1720 }
1721
1722 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1723 bdaddr_t *dst, u8 dst_type)
1724 {
1725 struct sock *sk = chan->sk;
1726 bdaddr_t *src = &bt_sk(sk)->src;
1727 struct l2cap_conn *conn;
1728 struct hci_conn *hcon;
1729 struct hci_dev *hdev;
1730 __u8 auth_type;
1731 int err;
1732
1733 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1734 dst_type, __le16_to_cpu(psm));
1735
1736 hdev = hci_get_route(dst, src);
1737 if (!hdev)
1738 return -EHOSTUNREACH;
1739
1740 hci_dev_lock(hdev);
1741
1742 l2cap_chan_lock(chan);
1743
1744 /* PSM must be odd and lsb of upper byte must be 0 */
1745 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1746 chan->chan_type != L2CAP_CHAN_RAW) {
1747 err = -EINVAL;
1748 goto done;
1749 }
1750
1751 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1752 err = -EINVAL;
1753 goto done;
1754 }
1755
1756 switch (chan->mode) {
1757 case L2CAP_MODE_BASIC:
1758 break;
1759 case L2CAP_MODE_ERTM:
1760 case L2CAP_MODE_STREAMING:
1761 if (!disable_ertm)
1762 break;
1763 /* fall through */
1764 default:
1765 err = -ENOTSUPP;
1766 goto done;
1767 }
1768
1769 switch (chan->state) {
1770 case BT_CONNECT:
1771 case BT_CONNECT2:
1772 case BT_CONFIG:
1773 /* Already connecting */
1774 err = 0;
1775 goto done;
1776
1777 case BT_CONNECTED:
1778 /* Already connected */
1779 err = -EISCONN;
1780 goto done;
1781
1782 case BT_OPEN:
1783 case BT_BOUND:
1784 /* Can connect */
1785 break;
1786
1787 default:
1788 err = -EBADFD;
1789 goto done;
1790 }
1791
1792 /* Set destination address and psm */
1793 lock_sock(sk);
1794 bacpy(&bt_sk(sk)->dst, dst);
1795 release_sock(sk);
1796
1797 chan->psm = psm;
1798 chan->dcid = cid;
1799
1800 auth_type = l2cap_get_auth_type(chan);
1801
1802 if (bdaddr_type_is_le(dst_type))
1803 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1804 chan->sec_level, auth_type);
1805 else
1806 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1807 chan->sec_level, auth_type);
1808
1809 if (IS_ERR(hcon)) {
1810 err = PTR_ERR(hcon);
1811 goto done;
1812 }
1813
1814 conn = l2cap_conn_add(hcon);
1815 if (!conn) {
1816 hci_conn_drop(hcon);
1817 err = -ENOMEM;
1818 goto done;
1819 }
1820
1821 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1822 hci_conn_drop(hcon);
1823 err = -EBUSY;
1824 goto done;
1825 }
1826
1827 /* Update source addr of the socket */
1828 bacpy(src, conn->src);
1829
1830 l2cap_chan_unlock(chan);
1831 l2cap_chan_add(conn, chan);
1832 l2cap_chan_lock(chan);
1833
1834 /* l2cap_chan_add takes its own ref so we can drop this one */
1835 hci_conn_drop(hcon);
1836
1837 l2cap_state_change(chan, BT_CONNECT);
1838 __set_chan_timer(chan, sk->sk_sndtimeo);
1839
1840 if (hcon->state == BT_CONNECTED) {
1841 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1842 __clear_chan_timer(chan);
1843 if (l2cap_chan_check_security(chan))
1844 l2cap_state_change(chan, BT_CONNECTED);
1845 } else
1846 l2cap_do_start(chan);
1847 }
1848
1849 err = 0;
1850
1851 done:
1852 l2cap_chan_unlock(chan);
1853 hci_dev_unlock(hdev);
1854 hci_dev_put(hdev);
1855 return err;
1856 }
1857
1858 int __l2cap_wait_ack(struct sock *sk)
1859 {
1860 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1861 DECLARE_WAITQUEUE(wait, current);
1862 int err = 0;
1863 int timeo = HZ/5;
1864
1865 add_wait_queue(sk_sleep(sk), &wait);
1866 set_current_state(TASK_INTERRUPTIBLE);
1867 while (chan->unacked_frames > 0 && chan->conn) {
1868 if (!timeo)
1869 timeo = HZ/5;
1870
1871 if (signal_pending(current)) {
1872 err = sock_intr_errno(timeo);
1873 break;
1874 }
1875
1876 release_sock(sk);
1877 timeo = schedule_timeout(timeo);
1878 lock_sock(sk);
1879 set_current_state(TASK_INTERRUPTIBLE);
1880
1881 err = sock_error(sk);
1882 if (err)
1883 break;
1884 }
1885 set_current_state(TASK_RUNNING);
1886 remove_wait_queue(sk_sleep(sk), &wait);
1887 return err;
1888 }
1889
1890 static void l2cap_monitor_timeout(struct work_struct *work)
1891 {
1892 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1893 monitor_timer.work);
1894
1895 BT_DBG("chan %p", chan);
1896
1897 l2cap_chan_lock(chan);
1898
1899 if (!chan->conn) {
1900 l2cap_chan_unlock(chan);
1901 l2cap_chan_put(chan);
1902 return;
1903 }
1904
1905 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1906
1907 l2cap_chan_unlock(chan);
1908 l2cap_chan_put(chan);
1909 }
1910
1911 static void l2cap_retrans_timeout(struct work_struct *work)
1912 {
1913 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1914 retrans_timer.work);
1915
1916 BT_DBG("chan %p", chan);
1917
1918 l2cap_chan_lock(chan);
1919
1920 if (!chan->conn) {
1921 l2cap_chan_unlock(chan);
1922 l2cap_chan_put(chan);
1923 return;
1924 }
1925
1926 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1927 l2cap_chan_unlock(chan);
1928 l2cap_chan_put(chan);
1929 }
1930
1931 static void l2cap_streaming_send(struct l2cap_chan *chan,
1932 struct sk_buff_head *skbs)
1933 {
1934 struct sk_buff *skb;
1935 struct l2cap_ctrl *control;
1936
1937 BT_DBG("chan %p, skbs %p", chan, skbs);
1938
1939 if (__chan_is_moving(chan))
1940 return;
1941
1942 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1943
1944 while (!skb_queue_empty(&chan->tx_q)) {
1945
1946 skb = skb_dequeue(&chan->tx_q);
1947
1948 bt_cb(skb)->control.retries = 1;
1949 control = &bt_cb(skb)->control;
1950
1951 control->reqseq = 0;
1952 control->txseq = chan->next_tx_seq;
1953
1954 __pack_control(chan, control, skb);
1955
1956 if (chan->fcs == L2CAP_FCS_CRC16) {
1957 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1958 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1959 }
1960
1961 l2cap_do_send(chan, skb);
1962
1963 BT_DBG("Sent txseq %u", control->txseq);
1964
1965 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1966 chan->frames_sent++;
1967 }
1968 }
1969
1970 static int l2cap_ertm_send(struct l2cap_chan *chan)
1971 {
1972 struct sk_buff *skb, *tx_skb;
1973 struct l2cap_ctrl *control;
1974 int sent = 0;
1975
1976 BT_DBG("chan %p", chan);
1977
1978 if (chan->state != BT_CONNECTED)
1979 return -ENOTCONN;
1980
1981 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1982 return 0;
1983
1984 if (__chan_is_moving(chan))
1985 return 0;
1986
1987 while (chan->tx_send_head &&
1988 chan->unacked_frames < chan->remote_tx_win &&
1989 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1990
1991 skb = chan->tx_send_head;
1992
1993 bt_cb(skb)->control.retries = 1;
1994 control = &bt_cb(skb)->control;
1995
1996 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1997 control->final = 1;
1998
1999 control->reqseq = chan->buffer_seq;
2000 chan->last_acked_seq = chan->buffer_seq;
2001 control->txseq = chan->next_tx_seq;
2002
2003 __pack_control(chan, control, skb);
2004
2005 if (chan->fcs == L2CAP_FCS_CRC16) {
2006 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2007 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2008 }
2009
2010 /* Clone after data has been modified. Data is assumed to be
2011 read-only (for locking purposes) on cloned sk_buffs.
2012 */
2013 tx_skb = skb_clone(skb, GFP_KERNEL);
2014
2015 if (!tx_skb)
2016 break;
2017
2018 __set_retrans_timer(chan);
2019
2020 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2021 chan->unacked_frames++;
2022 chan->frames_sent++;
2023 sent++;
2024
2025 if (skb_queue_is_last(&chan->tx_q, skb))
2026 chan->tx_send_head = NULL;
2027 else
2028 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2029
2030 l2cap_do_send(chan, tx_skb);
2031 BT_DBG("Sent txseq %u", control->txseq);
2032 }
2033
2034 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2035 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2036
2037 return sent;
2038 }
2039
2040 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2041 {
2042 struct l2cap_ctrl control;
2043 struct sk_buff *skb;
2044 struct sk_buff *tx_skb;
2045 u16 seq;
2046
2047 BT_DBG("chan %p", chan);
2048
2049 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2050 return;
2051
2052 if (__chan_is_moving(chan))
2053 return;
2054
2055 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2056 seq = l2cap_seq_list_pop(&chan->retrans_list);
2057
2058 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2059 if (!skb) {
2060 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2061 seq);
2062 continue;
2063 }
2064
2065 bt_cb(skb)->control.retries++;
2066 control = bt_cb(skb)->control;
2067
2068 if (chan->max_tx != 0 &&
2069 bt_cb(skb)->control.retries > chan->max_tx) {
2070 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2071 l2cap_send_disconn_req(chan, ECONNRESET);
2072 l2cap_seq_list_clear(&chan->retrans_list);
2073 break;
2074 }
2075
2076 control.reqseq = chan->buffer_seq;
2077 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2078 control.final = 1;
2079 else
2080 control.final = 0;
2081
2082 if (skb_cloned(skb)) {
2083 /* Cloned sk_buffs are read-only, so we need a
2084 * writeable copy
2085 */
2086 tx_skb = skb_copy(skb, GFP_KERNEL);
2087 } else {
2088 tx_skb = skb_clone(skb, GFP_KERNEL);
2089 }
2090
2091 if (!tx_skb) {
2092 l2cap_seq_list_clear(&chan->retrans_list);
2093 break;
2094 }
2095
2096 /* Update skb contents */
2097 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2098 put_unaligned_le32(__pack_extended_control(&control),
2099 tx_skb->data + L2CAP_HDR_SIZE);
2100 } else {
2101 put_unaligned_le16(__pack_enhanced_control(&control),
2102 tx_skb->data + L2CAP_HDR_SIZE);
2103 }
2104
2105 if (chan->fcs == L2CAP_FCS_CRC16) {
2106 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2107 put_unaligned_le16(fcs, skb_put(tx_skb,
2108 L2CAP_FCS_SIZE));
2109 }
2110
2111 l2cap_do_send(chan, tx_skb);
2112
2113 BT_DBG("Resent txseq %d", control.txseq);
2114
2115 chan->last_acked_seq = chan->buffer_seq;
2116 }
2117 }
2118
2119 static void l2cap_retransmit(struct l2cap_chan *chan,
2120 struct l2cap_ctrl *control)
2121 {
2122 BT_DBG("chan %p, control %p", chan, control);
2123
2124 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2125 l2cap_ertm_resend(chan);
2126 }
2127
2128 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2129 struct l2cap_ctrl *control)
2130 {
2131 struct sk_buff *skb;
2132
2133 BT_DBG("chan %p, control %p", chan, control);
2134
2135 if (control->poll)
2136 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2137
2138 l2cap_seq_list_clear(&chan->retrans_list);
2139
2140 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2141 return;
2142
2143 if (chan->unacked_frames) {
2144 skb_queue_walk(&chan->tx_q, skb) {
2145 if (bt_cb(skb)->control.txseq == control->reqseq ||
2146 skb == chan->tx_send_head)
2147 break;
2148 }
2149
2150 skb_queue_walk_from(&chan->tx_q, skb) {
2151 if (skb == chan->tx_send_head)
2152 break;
2153
2154 l2cap_seq_list_append(&chan->retrans_list,
2155 bt_cb(skb)->control.txseq);
2156 }
2157
2158 l2cap_ertm_resend(chan);
2159 }
2160 }
2161
2162 static void l2cap_send_ack(struct l2cap_chan *chan)
2163 {
2164 struct l2cap_ctrl control;
2165 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2166 chan->last_acked_seq);
2167 int threshold;
2168
2169 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2170 chan, chan->last_acked_seq, chan->buffer_seq);
2171
2172 memset(&control, 0, sizeof(control));
2173 control.sframe = 1;
2174
2175 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2176 chan->rx_state == L2CAP_RX_STATE_RECV) {
2177 __clear_ack_timer(chan);
2178 control.super = L2CAP_SUPER_RNR;
2179 control.reqseq = chan->buffer_seq;
2180 l2cap_send_sframe(chan, &control);
2181 } else {
2182 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2183 l2cap_ertm_send(chan);
2184 /* If any i-frames were sent, they included an ack */
2185 if (chan->buffer_seq == chan->last_acked_seq)
2186 frames_to_ack = 0;
2187 }
2188
2189 /* Ack now if the window is 3/4ths full.
2190 * Calculate without mul or div
2191 */
2192 threshold = chan->ack_win;
2193 threshold += threshold << 1;
2194 threshold >>= 2;
2195
2196 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2197 threshold);
2198
2199 if (frames_to_ack >= threshold) {
2200 __clear_ack_timer(chan);
2201 control.super = L2CAP_SUPER_RR;
2202 control.reqseq = chan->buffer_seq;
2203 l2cap_send_sframe(chan, &control);
2204 frames_to_ack = 0;
2205 }
2206
2207 if (frames_to_ack)
2208 __set_ack_timer(chan);
2209 }
2210 }
2211
2212 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2213 struct msghdr *msg, int len,
2214 int count, struct sk_buff *skb)
2215 {
2216 struct l2cap_conn *conn = chan->conn;
2217 struct sk_buff **frag;
2218 int sent = 0;
2219
2220 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2221 return -EFAULT;
2222
2223 sent += count;
2224 len -= count;
2225
2226 /* Continuation fragments (no L2CAP header) */
2227 frag = &skb_shinfo(skb)->frag_list;
2228 while (len) {
2229 struct sk_buff *tmp;
2230
2231 count = min_t(unsigned int, conn->mtu, len);
2232
2233 tmp = chan->ops->alloc_skb(chan, count,
2234 msg->msg_flags & MSG_DONTWAIT);
2235 if (IS_ERR(tmp))
2236 return PTR_ERR(tmp);
2237
2238 *frag = tmp;
2239
2240 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2241 return -EFAULT;
2242
2243 (*frag)->priority = skb->priority;
2244
2245 sent += count;
2246 len -= count;
2247
2248 skb->len += (*frag)->len;
2249 skb->data_len += (*frag)->len;
2250
2251 frag = &(*frag)->next;
2252 }
2253
2254 return sent;
2255 }
2256
2257 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2258 struct msghdr *msg, size_t len,
2259 u32 priority)
2260 {
2261 struct l2cap_conn *conn = chan->conn;
2262 struct sk_buff *skb;
2263 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2264 struct l2cap_hdr *lh;
2265
2266 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2267
2268 count = min_t(unsigned int, (conn->mtu - hlen), len);
2269
2270 skb = chan->ops->alloc_skb(chan, count + hlen,
2271 msg->msg_flags & MSG_DONTWAIT);
2272 if (IS_ERR(skb))
2273 return skb;
2274
2275 skb->priority = priority;
2276
2277 /* Create L2CAP header */
2278 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2279 lh->cid = cpu_to_le16(chan->dcid);
2280 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2281 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2282
2283 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2284 if (unlikely(err < 0)) {
2285 kfree_skb(skb);
2286 return ERR_PTR(err);
2287 }
2288 return skb;
2289 }
2290
2291 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2292 struct msghdr *msg, size_t len,
2293 u32 priority)
2294 {
2295 struct l2cap_conn *conn = chan->conn;
2296 struct sk_buff *skb;
2297 int err, count;
2298 struct l2cap_hdr *lh;
2299
2300 BT_DBG("chan %p len %zu", chan, len);
2301
2302 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2303
2304 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2305 msg->msg_flags & MSG_DONTWAIT);
2306 if (IS_ERR(skb))
2307 return skb;
2308
2309 skb->priority = priority;
2310
2311 /* Create L2CAP header */
2312 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2313 lh->cid = cpu_to_le16(chan->dcid);
2314 lh->len = cpu_to_le16(len);
2315
2316 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2317 if (unlikely(err < 0)) {
2318 kfree_skb(skb);
2319 return ERR_PTR(err);
2320 }
2321 return skb;
2322 }
2323
2324 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2325 struct msghdr *msg, size_t len,
2326 u16 sdulen)
2327 {
2328 struct l2cap_conn *conn = chan->conn;
2329 struct sk_buff *skb;
2330 int err, count, hlen;
2331 struct l2cap_hdr *lh;
2332
2333 BT_DBG("chan %p len %zu", chan, len);
2334
2335 if (!conn)
2336 return ERR_PTR(-ENOTCONN);
2337
2338 hlen = __ertm_hdr_size(chan);
2339
2340 if (sdulen)
2341 hlen += L2CAP_SDULEN_SIZE;
2342
2343 if (chan->fcs == L2CAP_FCS_CRC16)
2344 hlen += L2CAP_FCS_SIZE;
2345
2346 count = min_t(unsigned int, (conn->mtu - hlen), len);
2347
2348 skb = chan->ops->alloc_skb(chan, count + hlen,
2349 msg->msg_flags & MSG_DONTWAIT);
2350 if (IS_ERR(skb))
2351 return skb;
2352
2353 /* Create L2CAP header */
2354 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2355 lh->cid = cpu_to_le16(chan->dcid);
2356 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2357
2358 /* Control header is populated later */
2359 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2360 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2361 else
2362 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2363
2364 if (sdulen)
2365 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2366
2367 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2368 if (unlikely(err < 0)) {
2369 kfree_skb(skb);
2370 return ERR_PTR(err);
2371 }
2372
2373 bt_cb(skb)->control.fcs = chan->fcs;
2374 bt_cb(skb)->control.retries = 0;
2375 return skb;
2376 }
2377
2378 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2379 struct sk_buff_head *seg_queue,
2380 struct msghdr *msg, size_t len)
2381 {
2382 struct sk_buff *skb;
2383 u16 sdu_len;
2384 size_t pdu_len;
2385 u8 sar;
2386
2387 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2388
2389 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2390 * so fragmented skbs are not used. The HCI layer's handling
2391 * of fragmented skbs is not compatible with ERTM's queueing.
2392 */
2393
2394 /* PDU size is derived from the HCI MTU */
2395 pdu_len = chan->conn->mtu;
2396
2397 /* Constrain PDU size for BR/EDR connections */
2398 if (!chan->hs_hcon)
2399 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2400
2401 /* Adjust for largest possible L2CAP overhead. */
2402 if (chan->fcs)
2403 pdu_len -= L2CAP_FCS_SIZE;
2404
2405 pdu_len -= __ertm_hdr_size(chan);
2406
2407 /* Remote device may have requested smaller PDUs */
2408 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2409
2410 if (len <= pdu_len) {
2411 sar = L2CAP_SAR_UNSEGMENTED;
2412 sdu_len = 0;
2413 pdu_len = len;
2414 } else {
2415 sar = L2CAP_SAR_START;
2416 sdu_len = len;
2417 pdu_len -= L2CAP_SDULEN_SIZE;
2418 }
2419
2420 while (len > 0) {
2421 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2422
2423 if (IS_ERR(skb)) {
2424 __skb_queue_purge(seg_queue);
2425 return PTR_ERR(skb);
2426 }
2427
2428 bt_cb(skb)->control.sar = sar;
2429 __skb_queue_tail(seg_queue, skb);
2430
2431 len -= pdu_len;
2432 if (sdu_len) {
2433 sdu_len = 0;
2434 pdu_len += L2CAP_SDULEN_SIZE;
2435 }
2436
2437 if (len <= pdu_len) {
2438 sar = L2CAP_SAR_END;
2439 pdu_len = len;
2440 } else {
2441 sar = L2CAP_SAR_CONTINUE;
2442 }
2443 }
2444
2445 return 0;
2446 }
2447
2448 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2449 u32 priority)
2450 {
2451 struct sk_buff *skb;
2452 int err;
2453 struct sk_buff_head seg_queue;
2454
2455 /* Connectionless channel */
2456 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2457 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2458 if (IS_ERR(skb))
2459 return PTR_ERR(skb);
2460
2461 l2cap_do_send(chan, skb);
2462 return len;
2463 }
2464
2465 switch (chan->mode) {
2466 case L2CAP_MODE_BASIC:
2467 /* Check outgoing MTU */
2468 if (len > chan->omtu)
2469 return -EMSGSIZE;
2470
2471 /* Create a basic PDU */
2472 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2473 if (IS_ERR(skb))
2474 return PTR_ERR(skb);
2475
2476 l2cap_do_send(chan, skb);
2477 err = len;
2478 break;
2479
2480 case L2CAP_MODE_ERTM:
2481 case L2CAP_MODE_STREAMING:
2482 /* Check outgoing MTU */
2483 if (len > chan->omtu) {
2484 err = -EMSGSIZE;
2485 break;
2486 }
2487
2488 __skb_queue_head_init(&seg_queue);
2489
2490 /* Do segmentation before calling in to the state machine,
2491 * since it's possible to block while waiting for memory
2492 * allocation.
2493 */
2494 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2495
2496 /* The channel could have been closed while segmenting,
2497 * check that it is still connected.
2498 */
2499 if (chan->state != BT_CONNECTED) {
2500 __skb_queue_purge(&seg_queue);
2501 err = -ENOTCONN;
2502 }
2503
2504 if (err)
2505 break;
2506
2507 if (chan->mode == L2CAP_MODE_ERTM)
2508 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2509 else
2510 l2cap_streaming_send(chan, &seg_queue);
2511
2512 err = len;
2513
2514 /* If the skbs were not queued for sending, they'll still be in
2515 * seg_queue and need to be purged.
2516 */
2517 __skb_queue_purge(&seg_queue);
2518 break;
2519
2520 default:
2521 BT_DBG("bad state %1.1x", chan->mode);
2522 err = -EBADFD;
2523 }
2524
2525 return err;
2526 }
2527
2528 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2529 {
2530 struct l2cap_ctrl control;
2531 u16 seq;
2532
2533 BT_DBG("chan %p, txseq %u", chan, txseq);
2534
2535 memset(&control, 0, sizeof(control));
2536 control.sframe = 1;
2537 control.super = L2CAP_SUPER_SREJ;
2538
2539 for (seq = chan->expected_tx_seq; seq != txseq;
2540 seq = __next_seq(chan, seq)) {
2541 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2542 control.reqseq = seq;
2543 l2cap_send_sframe(chan, &control);
2544 l2cap_seq_list_append(&chan->srej_list, seq);
2545 }
2546 }
2547
2548 chan->expected_tx_seq = __next_seq(chan, txseq);
2549 }
2550
2551 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2552 {
2553 struct l2cap_ctrl control;
2554
2555 BT_DBG("chan %p", chan);
2556
2557 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2558 return;
2559
2560 memset(&control, 0, sizeof(control));
2561 control.sframe = 1;
2562 control.super = L2CAP_SUPER_SREJ;
2563 control.reqseq = chan->srej_list.tail;
2564 l2cap_send_sframe(chan, &control);
2565 }
2566
2567 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2568 {
2569 struct l2cap_ctrl control;
2570 u16 initial_head;
2571 u16 seq;
2572
2573 BT_DBG("chan %p, txseq %u", chan, txseq);
2574
2575 memset(&control, 0, sizeof(control));
2576 control.sframe = 1;
2577 control.super = L2CAP_SUPER_SREJ;
2578
2579 /* Capture initial list head to allow only one pass through the list. */
2580 initial_head = chan->srej_list.head;
2581
2582 do {
2583 seq = l2cap_seq_list_pop(&chan->srej_list);
2584 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2585 break;
2586
2587 control.reqseq = seq;
2588 l2cap_send_sframe(chan, &control);
2589 l2cap_seq_list_append(&chan->srej_list, seq);
2590 } while (chan->srej_list.head != initial_head);
2591 }
2592
2593 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2594 {
2595 struct sk_buff *acked_skb;
2596 u16 ackseq;
2597
2598 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2599
2600 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2601 return;
2602
2603 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2604 chan->expected_ack_seq, chan->unacked_frames);
2605
2606 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2607 ackseq = __next_seq(chan, ackseq)) {
2608
2609 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2610 if (acked_skb) {
2611 skb_unlink(acked_skb, &chan->tx_q);
2612 kfree_skb(acked_skb);
2613 chan->unacked_frames--;
2614 }
2615 }
2616
2617 chan->expected_ack_seq = reqseq;
2618
2619 if (chan->unacked_frames == 0)
2620 __clear_retrans_timer(chan);
2621
2622 BT_DBG("unacked_frames %u", chan->unacked_frames);
2623 }
2624
2625 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2626 {
2627 BT_DBG("chan %p", chan);
2628
2629 chan->expected_tx_seq = chan->buffer_seq;
2630 l2cap_seq_list_clear(&chan->srej_list);
2631 skb_queue_purge(&chan->srej_q);
2632 chan->rx_state = L2CAP_RX_STATE_RECV;
2633 }
2634
2635 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2636 struct l2cap_ctrl *control,
2637 struct sk_buff_head *skbs, u8 event)
2638 {
2639 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2640 event);
2641
2642 switch (event) {
2643 case L2CAP_EV_DATA_REQUEST:
2644 if (chan->tx_send_head == NULL)
2645 chan->tx_send_head = skb_peek(skbs);
2646
2647 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2648 l2cap_ertm_send(chan);
2649 break;
2650 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2651 BT_DBG("Enter LOCAL_BUSY");
2652 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2653
2654 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2655 /* The SREJ_SENT state must be aborted if we are to
2656 * enter the LOCAL_BUSY state.
2657 */
2658 l2cap_abort_rx_srej_sent(chan);
2659 }
2660
2661 l2cap_send_ack(chan);
2662
2663 break;
2664 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2665 BT_DBG("Exit LOCAL_BUSY");
2666 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2667
2668 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2669 struct l2cap_ctrl local_control;
2670
2671 memset(&local_control, 0, sizeof(local_control));
2672 local_control.sframe = 1;
2673 local_control.super = L2CAP_SUPER_RR;
2674 local_control.poll = 1;
2675 local_control.reqseq = chan->buffer_seq;
2676 l2cap_send_sframe(chan, &local_control);
2677
2678 chan->retry_count = 1;
2679 __set_monitor_timer(chan);
2680 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2681 }
2682 break;
2683 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2684 l2cap_process_reqseq(chan, control->reqseq);
2685 break;
2686 case L2CAP_EV_EXPLICIT_POLL:
2687 l2cap_send_rr_or_rnr(chan, 1);
2688 chan->retry_count = 1;
2689 __set_monitor_timer(chan);
2690 __clear_ack_timer(chan);
2691 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2692 break;
2693 case L2CAP_EV_RETRANS_TO:
2694 l2cap_send_rr_or_rnr(chan, 1);
2695 chan->retry_count = 1;
2696 __set_monitor_timer(chan);
2697 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2698 break;
2699 case L2CAP_EV_RECV_FBIT:
2700 /* Nothing to process */
2701 break;
2702 default:
2703 break;
2704 }
2705 }
2706
2707 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2708 struct l2cap_ctrl *control,
2709 struct sk_buff_head *skbs, u8 event)
2710 {
2711 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2712 event);
2713
2714 switch (event) {
2715 case L2CAP_EV_DATA_REQUEST:
2716 if (chan->tx_send_head == NULL)
2717 chan->tx_send_head = skb_peek(skbs);
2718 /* Queue data, but don't send. */
2719 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2720 break;
2721 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2722 BT_DBG("Enter LOCAL_BUSY");
2723 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2724
2725 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2726 /* The SREJ_SENT state must be aborted if we are to
2727 * enter the LOCAL_BUSY state.
2728 */
2729 l2cap_abort_rx_srej_sent(chan);
2730 }
2731
2732 l2cap_send_ack(chan);
2733
2734 break;
2735 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2736 BT_DBG("Exit LOCAL_BUSY");
2737 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2738
2739 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2740 struct l2cap_ctrl local_control;
2741 memset(&local_control, 0, sizeof(local_control));
2742 local_control.sframe = 1;
2743 local_control.super = L2CAP_SUPER_RR;
2744 local_control.poll = 1;
2745 local_control.reqseq = chan->buffer_seq;
2746 l2cap_send_sframe(chan, &local_control);
2747
2748 chan->retry_count = 1;
2749 __set_monitor_timer(chan);
2750 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2751 }
2752 break;
2753 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2754 l2cap_process_reqseq(chan, control->reqseq);
2755
2756 /* Fall through */
2757
2758 case L2CAP_EV_RECV_FBIT:
2759 if (control && control->final) {
2760 __clear_monitor_timer(chan);
2761 if (chan->unacked_frames > 0)
2762 __set_retrans_timer(chan);
2763 chan->retry_count = 0;
2764 chan->tx_state = L2CAP_TX_STATE_XMIT;
2765 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2766 }
2767 break;
2768 case L2CAP_EV_EXPLICIT_POLL:
2769 /* Ignore */
2770 break;
2771 case L2CAP_EV_MONITOR_TO:
2772 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2773 l2cap_send_rr_or_rnr(chan, 1);
2774 __set_monitor_timer(chan);
2775 chan->retry_count++;
2776 } else {
2777 l2cap_send_disconn_req(chan, ECONNABORTED);
2778 }
2779 break;
2780 default:
2781 break;
2782 }
2783 }
2784
2785 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2786 struct sk_buff_head *skbs, u8 event)
2787 {
2788 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2789 chan, control, skbs, event, chan->tx_state);
2790
2791 switch (chan->tx_state) {
2792 case L2CAP_TX_STATE_XMIT:
2793 l2cap_tx_state_xmit(chan, control, skbs, event);
2794 break;
2795 case L2CAP_TX_STATE_WAIT_F:
2796 l2cap_tx_state_wait_f(chan, control, skbs, event);
2797 break;
2798 default:
2799 /* Ignore event */
2800 break;
2801 }
2802 }
2803
2804 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2805 struct l2cap_ctrl *control)
2806 {
2807 BT_DBG("chan %p, control %p", chan, control);
2808 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2809 }
2810
2811 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2812 struct l2cap_ctrl *control)
2813 {
2814 BT_DBG("chan %p, control %p", chan, control);
2815 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2816 }
2817
2818 /* Copy frame to all raw sockets on that connection */
2819 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2820 {
2821 struct sk_buff *nskb;
2822 struct l2cap_chan *chan;
2823
2824 BT_DBG("conn %p", conn);
2825
2826 mutex_lock(&conn->chan_lock);
2827
2828 list_for_each_entry(chan, &conn->chan_l, list) {
2829 struct sock *sk = chan->sk;
2830 if (chan->chan_type != L2CAP_CHAN_RAW)
2831 continue;
2832
2833 /* Don't send frame to the socket it came from */
2834 if (skb->sk == sk)
2835 continue;
2836 nskb = skb_clone(skb, GFP_KERNEL);
2837 if (!nskb)
2838 continue;
2839
2840 if (chan->ops->recv(chan, nskb))
2841 kfree_skb(nskb);
2842 }
2843
2844 mutex_unlock(&conn->chan_lock);
2845 }
2846
2847 /* ---- L2CAP signalling commands ---- */
2848 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2849 u8 ident, u16 dlen, void *data)
2850 {
2851 struct sk_buff *skb, **frag;
2852 struct l2cap_cmd_hdr *cmd;
2853 struct l2cap_hdr *lh;
2854 int len, count;
2855
2856 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2857 conn, code, ident, dlen);
2858
2859 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2860 return NULL;
2861
2862 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2863 count = min_t(unsigned int, conn->mtu, len);
2864
2865 skb = bt_skb_alloc(count, GFP_KERNEL);
2866 if (!skb)
2867 return NULL;
2868
2869 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2870 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2871
2872 if (conn->hcon->type == LE_LINK)
2873 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2874 else
2875 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2876
2877 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2878 cmd->code = code;
2879 cmd->ident = ident;
2880 cmd->len = cpu_to_le16(dlen);
2881
2882 if (dlen) {
2883 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2884 memcpy(skb_put(skb, count), data, count);
2885 data += count;
2886 }
2887
2888 len -= skb->len;
2889
2890 /* Continuation fragments (no L2CAP header) */
2891 frag = &skb_shinfo(skb)->frag_list;
2892 while (len) {
2893 count = min_t(unsigned int, conn->mtu, len);
2894
2895 *frag = bt_skb_alloc(count, GFP_KERNEL);
2896 if (!*frag)
2897 goto fail;
2898
2899 memcpy(skb_put(*frag, count), data, count);
2900
2901 len -= count;
2902 data += count;
2903
2904 frag = &(*frag)->next;
2905 }
2906
2907 return skb;
2908
2909 fail:
2910 kfree_skb(skb);
2911 return NULL;
2912 }
2913
2914 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2915 unsigned long *val)
2916 {
2917 struct l2cap_conf_opt *opt = *ptr;
2918 int len;
2919
2920 len = L2CAP_CONF_OPT_SIZE + opt->len;
2921 *ptr += len;
2922
2923 *type = opt->type;
2924 *olen = opt->len;
2925
2926 switch (opt->len) {
2927 case 1:
2928 *val = *((u8 *) opt->val);
2929 break;
2930
2931 case 2:
2932 *val = get_unaligned_le16(opt->val);
2933 break;
2934
2935 case 4:
2936 *val = get_unaligned_le32(opt->val);
2937 break;
2938
2939 default:
2940 *val = (unsigned long) opt->val;
2941 break;
2942 }
2943
2944 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2945 return len;
2946 }
2947
2948 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2949 {
2950 struct l2cap_conf_opt *opt = *ptr;
2951
2952 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2953
2954 opt->type = type;
2955 opt->len = len;
2956
2957 switch (len) {
2958 case 1:
2959 *((u8 *) opt->val) = val;
2960 break;
2961
2962 case 2:
2963 put_unaligned_le16(val, opt->val);
2964 break;
2965
2966 case 4:
2967 put_unaligned_le32(val, opt->val);
2968 break;
2969
2970 default:
2971 memcpy(opt->val, (void *) val, len);
2972 break;
2973 }
2974
2975 *ptr += L2CAP_CONF_OPT_SIZE + len;
2976 }
2977
2978 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2979 {
2980 struct l2cap_conf_efs efs;
2981
2982 switch (chan->mode) {
2983 case L2CAP_MODE_ERTM:
2984 efs.id = chan->local_id;
2985 efs.stype = chan->local_stype;
2986 efs.msdu = cpu_to_le16(chan->local_msdu);
2987 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2988 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2989 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2990 break;
2991
2992 case L2CAP_MODE_STREAMING:
2993 efs.id = 1;
2994 efs.stype = L2CAP_SERV_BESTEFFORT;
2995 efs.msdu = cpu_to_le16(chan->local_msdu);
2996 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2997 efs.acc_lat = 0;
2998 efs.flush_to = 0;
2999 break;
3000
3001 default:
3002 return;
3003 }
3004
3005 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3006 (unsigned long) &efs);
3007 }
3008
3009 static void l2cap_ack_timeout(struct work_struct *work)
3010 {
3011 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3012 ack_timer.work);
3013 u16 frames_to_ack;
3014
3015 BT_DBG("chan %p", chan);
3016
3017 l2cap_chan_lock(chan);
3018
3019 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3020 chan->last_acked_seq);
3021
3022 if (frames_to_ack)
3023 l2cap_send_rr_or_rnr(chan, 0);
3024
3025 l2cap_chan_unlock(chan);
3026 l2cap_chan_put(chan);
3027 }
3028
3029 int l2cap_ertm_init(struct l2cap_chan *chan)
3030 {
3031 int err;
3032
3033 chan->next_tx_seq = 0;
3034 chan->expected_tx_seq = 0;
3035 chan->expected_ack_seq = 0;
3036 chan->unacked_frames = 0;
3037 chan->buffer_seq = 0;
3038 chan->frames_sent = 0;
3039 chan->last_acked_seq = 0;
3040 chan->sdu = NULL;
3041 chan->sdu_last_frag = NULL;
3042 chan->sdu_len = 0;
3043
3044 skb_queue_head_init(&chan->tx_q);
3045
3046 chan->local_amp_id = 0;
3047 chan->move_id = 0;
3048 chan->move_state = L2CAP_MOVE_STABLE;
3049 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3050
3051 if (chan->mode != L2CAP_MODE_ERTM)
3052 return 0;
3053
3054 chan->rx_state = L2CAP_RX_STATE_RECV;
3055 chan->tx_state = L2CAP_TX_STATE_XMIT;
3056
3057 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3058 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3059 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3060
3061 skb_queue_head_init(&chan->srej_q);
3062
3063 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3064 if (err < 0)
3065 return err;
3066
3067 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3068 if (err < 0)
3069 l2cap_seq_list_free(&chan->srej_list);
3070
3071 return err;
3072 }
3073
3074 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3075 {
3076 switch (mode) {
3077 case L2CAP_MODE_STREAMING:
3078 case L2CAP_MODE_ERTM:
3079 if (l2cap_mode_supported(mode, remote_feat_mask))
3080 return mode;
3081 /* fall through */
3082 default:
3083 return L2CAP_MODE_BASIC;
3084 }
3085 }
3086
3087 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3088 {
3089 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3090 }
3091
3092 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3093 {
3094 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3095 }
3096
3097 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3098 struct l2cap_conf_rfc *rfc)
3099 {
3100 if (chan->local_amp_id && chan->hs_hcon) {
3101 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3102
3103 /* Class 1 devices have must have ERTM timeouts
3104 * exceeding the Link Supervision Timeout. The
3105 * default Link Supervision Timeout for AMP
3106 * controllers is 10 seconds.
3107 *
3108 * Class 1 devices use 0xffffffff for their
3109 * best-effort flush timeout, so the clamping logic
3110 * will result in a timeout that meets the above
3111 * requirement. ERTM timeouts are 16-bit values, so
3112 * the maximum timeout is 65.535 seconds.
3113 */
3114
3115 /* Convert timeout to milliseconds and round */
3116 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3117
3118 /* This is the recommended formula for class 2 devices
3119 * that start ERTM timers when packets are sent to the
3120 * controller.
3121 */
3122 ertm_to = 3 * ertm_to + 500;
3123
3124 if (ertm_to > 0xffff)
3125 ertm_to = 0xffff;
3126
3127 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3128 rfc->monitor_timeout = rfc->retrans_timeout;
3129 } else {
3130 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3131 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3132 }
3133 }
3134
3135 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3136 {
3137 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3138 __l2cap_ews_supported(chan)) {
3139 /* use extended control field */
3140 set_bit(FLAG_EXT_CTRL, &chan->flags);
3141 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3142 } else {
3143 chan->tx_win = min_t(u16, chan->tx_win,
3144 L2CAP_DEFAULT_TX_WINDOW);
3145 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3146 }
3147 chan->ack_win = chan->tx_win;
3148 }
3149
3150 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3151 {
3152 struct l2cap_conf_req *req = data;
3153 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3154 void *ptr = req->data;
3155 u16 size;
3156
3157 BT_DBG("chan %p", chan);
3158
3159 if (chan->num_conf_req || chan->num_conf_rsp)
3160 goto done;
3161
3162 switch (chan->mode) {
3163 case L2CAP_MODE_STREAMING:
3164 case L2CAP_MODE_ERTM:
3165 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3166 break;
3167
3168 if (__l2cap_efs_supported(chan))
3169 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3170
3171 /* fall through */
3172 default:
3173 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3174 break;
3175 }
3176
3177 done:
3178 if (chan->imtu != L2CAP_DEFAULT_MTU)
3179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3180
3181 switch (chan->mode) {
3182 case L2CAP_MODE_BASIC:
3183 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3184 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3185 break;
3186
3187 rfc.mode = L2CAP_MODE_BASIC;
3188 rfc.txwin_size = 0;
3189 rfc.max_transmit = 0;
3190 rfc.retrans_timeout = 0;
3191 rfc.monitor_timeout = 0;
3192 rfc.max_pdu_size = 0;
3193
3194 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3195 (unsigned long) &rfc);
3196 break;
3197
3198 case L2CAP_MODE_ERTM:
3199 rfc.mode = L2CAP_MODE_ERTM;
3200 rfc.max_transmit = chan->max_tx;
3201
3202 __l2cap_set_ertm_timeouts(chan, &rfc);
3203
3204 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3205 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3206 L2CAP_FCS_SIZE);
3207 rfc.max_pdu_size = cpu_to_le16(size);
3208
3209 l2cap_txwin_setup(chan);
3210
3211 rfc.txwin_size = min_t(u16, chan->tx_win,
3212 L2CAP_DEFAULT_TX_WINDOW);
3213
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3215 (unsigned long) &rfc);
3216
3217 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3218 l2cap_add_opt_efs(&ptr, chan);
3219
3220 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3221 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3222 chan->tx_win);
3223
3224 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3225 if (chan->fcs == L2CAP_FCS_NONE ||
3226 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3227 chan->fcs = L2CAP_FCS_NONE;
3228 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3229 chan->fcs);
3230 }
3231 break;
3232
3233 case L2CAP_MODE_STREAMING:
3234 l2cap_txwin_setup(chan);
3235 rfc.mode = L2CAP_MODE_STREAMING;
3236 rfc.txwin_size = 0;
3237 rfc.max_transmit = 0;
3238 rfc.retrans_timeout = 0;
3239 rfc.monitor_timeout = 0;
3240
3241 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3242 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3243 L2CAP_FCS_SIZE);
3244 rfc.max_pdu_size = cpu_to_le16(size);
3245
3246 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 (unsigned long) &rfc);
3248
3249 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3250 l2cap_add_opt_efs(&ptr, chan);
3251
3252 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3253 if (chan->fcs == L2CAP_FCS_NONE ||
3254 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3255 chan->fcs = L2CAP_FCS_NONE;
3256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3257 chan->fcs);
3258 }
3259 break;
3260 }
3261
3262 req->dcid = cpu_to_le16(chan->dcid);
3263 req->flags = __constant_cpu_to_le16(0);
3264
3265 return ptr - data;
3266 }
3267
3268 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3269 {
3270 struct l2cap_conf_rsp *rsp = data;
3271 void *ptr = rsp->data;
3272 void *req = chan->conf_req;
3273 int len = chan->conf_len;
3274 int type, hint, olen;
3275 unsigned long val;
3276 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3277 struct l2cap_conf_efs efs;
3278 u8 remote_efs = 0;
3279 u16 mtu = L2CAP_DEFAULT_MTU;
3280 u16 result = L2CAP_CONF_SUCCESS;
3281 u16 size;
3282
3283 BT_DBG("chan %p", chan);
3284
3285 while (len >= L2CAP_CONF_OPT_SIZE) {
3286 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3287
3288 hint = type & L2CAP_CONF_HINT;
3289 type &= L2CAP_CONF_MASK;
3290
3291 switch (type) {
3292 case L2CAP_CONF_MTU:
3293 mtu = val;
3294 break;
3295
3296 case L2CAP_CONF_FLUSH_TO:
3297 chan->flush_to = val;
3298 break;
3299
3300 case L2CAP_CONF_QOS:
3301 break;
3302
3303 case L2CAP_CONF_RFC:
3304 if (olen == sizeof(rfc))
3305 memcpy(&rfc, (void *) val, olen);
3306 break;
3307
3308 case L2CAP_CONF_FCS:
3309 if (val == L2CAP_FCS_NONE)
3310 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3311 break;
3312
3313 case L2CAP_CONF_EFS:
3314 remote_efs = 1;
3315 if (olen == sizeof(efs))
3316 memcpy(&efs, (void *) val, olen);
3317 break;
3318
3319 case L2CAP_CONF_EWS:
3320 if (!enable_hs)
3321 return -ECONNREFUSED;
3322
3323 set_bit(FLAG_EXT_CTRL, &chan->flags);
3324 set_bit(CONF_EWS_RECV, &chan->conf_state);
3325 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3326 chan->remote_tx_win = val;
3327 break;
3328
3329 default:
3330 if (hint)
3331 break;
3332
3333 result = L2CAP_CONF_UNKNOWN;
3334 *((u8 *) ptr++) = type;
3335 break;
3336 }
3337 }
3338
3339 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3340 goto done;
3341
3342 switch (chan->mode) {
3343 case L2CAP_MODE_STREAMING:
3344 case L2CAP_MODE_ERTM:
3345 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3346 chan->mode = l2cap_select_mode(rfc.mode,
3347 chan->conn->feat_mask);
3348 break;
3349 }
3350
3351 if (remote_efs) {
3352 if (__l2cap_efs_supported(chan))
3353 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3354 else
3355 return -ECONNREFUSED;
3356 }
3357
3358 if (chan->mode != rfc.mode)
3359 return -ECONNREFUSED;
3360
3361 break;
3362 }
3363
3364 done:
3365 if (chan->mode != rfc.mode) {
3366 result = L2CAP_CONF_UNACCEPT;
3367 rfc.mode = chan->mode;
3368
3369 if (chan->num_conf_rsp == 1)
3370 return -ECONNREFUSED;
3371
3372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3373 (unsigned long) &rfc);
3374 }
3375
3376 if (result == L2CAP_CONF_SUCCESS) {
3377 /* Configure output options and let the other side know
3378 * which ones we don't like. */
3379
3380 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3381 result = L2CAP_CONF_UNACCEPT;
3382 else {
3383 chan->omtu = mtu;
3384 set_bit(CONF_MTU_DONE, &chan->conf_state);
3385 }
3386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3387
3388 if (remote_efs) {
3389 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3390 efs.stype != L2CAP_SERV_NOTRAFIC &&
3391 efs.stype != chan->local_stype) {
3392
3393 result = L2CAP_CONF_UNACCEPT;
3394
3395 if (chan->num_conf_req >= 1)
3396 return -ECONNREFUSED;
3397
3398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3399 sizeof(efs),
3400 (unsigned long) &efs);
3401 } else {
3402 /* Send PENDING Conf Rsp */
3403 result = L2CAP_CONF_PENDING;
3404 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3405 }
3406 }
3407
3408 switch (rfc.mode) {
3409 case L2CAP_MODE_BASIC:
3410 chan->fcs = L2CAP_FCS_NONE;
3411 set_bit(CONF_MODE_DONE, &chan->conf_state);
3412 break;
3413
3414 case L2CAP_MODE_ERTM:
3415 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3416 chan->remote_tx_win = rfc.txwin_size;
3417 else
3418 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3419
3420 chan->remote_max_tx = rfc.max_transmit;
3421
3422 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3423 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3424 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3425 rfc.max_pdu_size = cpu_to_le16(size);
3426 chan->remote_mps = size;
3427
3428 __l2cap_set_ertm_timeouts(chan, &rfc);
3429
3430 set_bit(CONF_MODE_DONE, &chan->conf_state);
3431
3432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3433 sizeof(rfc), (unsigned long) &rfc);
3434
3435 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3436 chan->remote_id = efs.id;
3437 chan->remote_stype = efs.stype;
3438 chan->remote_msdu = le16_to_cpu(efs.msdu);
3439 chan->remote_flush_to =
3440 le32_to_cpu(efs.flush_to);
3441 chan->remote_acc_lat =
3442 le32_to_cpu(efs.acc_lat);
3443 chan->remote_sdu_itime =
3444 le32_to_cpu(efs.sdu_itime);
3445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3446 sizeof(efs),
3447 (unsigned long) &efs);
3448 }
3449 break;
3450
3451 case L2CAP_MODE_STREAMING:
3452 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3453 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3454 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3455 rfc.max_pdu_size = cpu_to_le16(size);
3456 chan->remote_mps = size;
3457
3458 set_bit(CONF_MODE_DONE, &chan->conf_state);
3459
3460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3461 (unsigned long) &rfc);
3462
3463 break;
3464
3465 default:
3466 result = L2CAP_CONF_UNACCEPT;
3467
3468 memset(&rfc, 0, sizeof(rfc));
3469 rfc.mode = chan->mode;
3470 }
3471
3472 if (result == L2CAP_CONF_SUCCESS)
3473 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3474 }
3475 rsp->scid = cpu_to_le16(chan->dcid);
3476 rsp->result = cpu_to_le16(result);
3477 rsp->flags = __constant_cpu_to_le16(0);
3478
3479 return ptr - data;
3480 }
3481
3482 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3483 void *data, u16 *result)
3484 {
3485 struct l2cap_conf_req *req = data;
3486 void *ptr = req->data;
3487 int type, olen;
3488 unsigned long val;
3489 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3490 struct l2cap_conf_efs efs;
3491
3492 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3493
3494 while (len >= L2CAP_CONF_OPT_SIZE) {
3495 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3496
3497 switch (type) {
3498 case L2CAP_CONF_MTU:
3499 if (val < L2CAP_DEFAULT_MIN_MTU) {
3500 *result = L2CAP_CONF_UNACCEPT;
3501 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3502 } else
3503 chan->imtu = val;
3504 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3505 break;
3506
3507 case L2CAP_CONF_FLUSH_TO:
3508 chan->flush_to = val;
3509 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3510 2, chan->flush_to);
3511 break;
3512
3513 case L2CAP_CONF_RFC:
3514 if (olen == sizeof(rfc))
3515 memcpy(&rfc, (void *)val, olen);
3516
3517 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3518 rfc.mode != chan->mode)
3519 return -ECONNREFUSED;
3520
3521 chan->fcs = 0;
3522
3523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3524 sizeof(rfc), (unsigned long) &rfc);
3525 break;
3526
3527 case L2CAP_CONF_EWS:
3528 chan->ack_win = min_t(u16, val, chan->ack_win);
3529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3530 chan->tx_win);
3531 break;
3532
3533 case L2CAP_CONF_EFS:
3534 if (olen == sizeof(efs))
3535 memcpy(&efs, (void *)val, olen);
3536
3537 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3538 efs.stype != L2CAP_SERV_NOTRAFIC &&
3539 efs.stype != chan->local_stype)
3540 return -ECONNREFUSED;
3541
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3543 (unsigned long) &efs);
3544 break;
3545
3546 case L2CAP_CONF_FCS:
3547 if (*result == L2CAP_CONF_PENDING)
3548 if (val == L2CAP_FCS_NONE)
3549 set_bit(CONF_RECV_NO_FCS,
3550 &chan->conf_state);
3551 break;
3552 }
3553 }
3554
3555 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3556 return -ECONNREFUSED;
3557
3558 chan->mode = rfc.mode;
3559
3560 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3561 switch (rfc.mode) {
3562 case L2CAP_MODE_ERTM:
3563 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3564 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3565 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3566 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3567 chan->ack_win = min_t(u16, chan->ack_win,
3568 rfc.txwin_size);
3569
3570 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3571 chan->local_msdu = le16_to_cpu(efs.msdu);
3572 chan->local_sdu_itime =
3573 le32_to_cpu(efs.sdu_itime);
3574 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3575 chan->local_flush_to =
3576 le32_to_cpu(efs.flush_to);
3577 }
3578 break;
3579
3580 case L2CAP_MODE_STREAMING:
3581 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3582 }
3583 }
3584
3585 req->dcid = cpu_to_le16(chan->dcid);
3586 req->flags = __constant_cpu_to_le16(0);
3587
3588 return ptr - data;
3589 }
3590
3591 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3592 u16 result, u16 flags)
3593 {
3594 struct l2cap_conf_rsp *rsp = data;
3595 void *ptr = rsp->data;
3596
3597 BT_DBG("chan %p", chan);
3598
3599 rsp->scid = cpu_to_le16(chan->dcid);
3600 rsp->result = cpu_to_le16(result);
3601 rsp->flags = cpu_to_le16(flags);
3602
3603 return ptr - data;
3604 }
3605
3606 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3607 {
3608 struct l2cap_conn_rsp rsp;
3609 struct l2cap_conn *conn = chan->conn;
3610 u8 buf[128];
3611 u8 rsp_code;
3612
3613 rsp.scid = cpu_to_le16(chan->dcid);
3614 rsp.dcid = cpu_to_le16(chan->scid);
3615 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3616 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3617
3618 if (chan->hs_hcon)
3619 rsp_code = L2CAP_CREATE_CHAN_RSP;
3620 else
3621 rsp_code = L2CAP_CONN_RSP;
3622
3623 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3624
3625 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3626
3627 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3628 return;
3629
3630 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3631 l2cap_build_conf_req(chan, buf), buf);
3632 chan->num_conf_req++;
3633 }
3634
3635 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3636 {
3637 int type, olen;
3638 unsigned long val;
3639 /* Use sane default values in case a misbehaving remote device
3640 * did not send an RFC or extended window size option.
3641 */
3642 u16 txwin_ext = chan->ack_win;
3643 struct l2cap_conf_rfc rfc = {
3644 .mode = chan->mode,
3645 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3646 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3647 .max_pdu_size = cpu_to_le16(chan->imtu),
3648 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3649 };
3650
3651 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3652
3653 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3654 return;
3655
3656 while (len >= L2CAP_CONF_OPT_SIZE) {
3657 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3658
3659 switch (type) {
3660 case L2CAP_CONF_RFC:
3661 if (olen == sizeof(rfc))
3662 memcpy(&rfc, (void *)val, olen);
3663 break;
3664 case L2CAP_CONF_EWS:
3665 txwin_ext = val;
3666 break;
3667 }
3668 }
3669
3670 switch (rfc.mode) {
3671 case L2CAP_MODE_ERTM:
3672 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3673 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3674 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3675 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3676 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3677 else
3678 chan->ack_win = min_t(u16, chan->ack_win,
3679 rfc.txwin_size);
3680 break;
3681 case L2CAP_MODE_STREAMING:
3682 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3683 }
3684 }
3685
3686 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3687 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3688 u8 *data)
3689 {
3690 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3691
3692 if (cmd_len < sizeof(*rej))
3693 return -EPROTO;
3694
3695 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3696 return 0;
3697
3698 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3699 cmd->ident == conn->info_ident) {
3700 cancel_delayed_work(&conn->info_timer);
3701
3702 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3703 conn->info_ident = 0;
3704
3705 l2cap_conn_start(conn);
3706 }
3707
3708 return 0;
3709 }
3710
3711 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3712 struct l2cap_cmd_hdr *cmd,
3713 u8 *data, u8 rsp_code, u8 amp_id)
3714 {
3715 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3716 struct l2cap_conn_rsp rsp;
3717 struct l2cap_chan *chan = NULL, *pchan;
3718 struct sock *parent, *sk = NULL;
3719 int result, status = L2CAP_CS_NO_INFO;
3720
3721 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3722 __le16 psm = req->psm;
3723
3724 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3725
3726 /* Check if we have socket listening on psm */
3727 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3728 if (!pchan) {
3729 result = L2CAP_CR_BAD_PSM;
3730 goto sendresp;
3731 }
3732
3733 parent = pchan->sk;
3734
3735 mutex_lock(&conn->chan_lock);
3736 lock_sock(parent);
3737
3738 /* Check if the ACL is secure enough (if not SDP) */
3739 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3740 !hci_conn_check_link_mode(conn->hcon)) {
3741 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3742 result = L2CAP_CR_SEC_BLOCK;
3743 goto response;
3744 }
3745
3746 result = L2CAP_CR_NO_MEM;
3747
3748 /* Check if we already have channel with that dcid */
3749 if (__l2cap_get_chan_by_dcid(conn, scid))
3750 goto response;
3751
3752 chan = pchan->ops->new_connection(pchan);
3753 if (!chan)
3754 goto response;
3755
3756 sk = chan->sk;
3757
3758 /* For certain devices (ex: HID mouse), support for authentication,
3759 * pairing and bonding is optional. For such devices, inorder to avoid
3760 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3761 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3762 */
3763 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3764
3765 bacpy(&bt_sk(sk)->src, conn->src);
3766 bacpy(&bt_sk(sk)->dst, conn->dst);
3767 chan->psm = psm;
3768 chan->dcid = scid;
3769 chan->local_amp_id = amp_id;
3770
3771 __l2cap_chan_add(conn, chan);
3772
3773 dcid = chan->scid;
3774
3775 __set_chan_timer(chan, sk->sk_sndtimeo);
3776
3777 chan->ident = cmd->ident;
3778
3779 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3780 if (l2cap_chan_check_security(chan)) {
3781 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3782 __l2cap_state_change(chan, BT_CONNECT2);
3783 result = L2CAP_CR_PEND;
3784 status = L2CAP_CS_AUTHOR_PEND;
3785 chan->ops->defer(chan);
3786 } else {
3787 /* Force pending result for AMP controllers.
3788 * The connection will succeed after the
3789 * physical link is up.
3790 */
3791 if (amp_id) {
3792 __l2cap_state_change(chan, BT_CONNECT2);
3793 result = L2CAP_CR_PEND;
3794 } else {
3795 __l2cap_state_change(chan, BT_CONFIG);
3796 result = L2CAP_CR_SUCCESS;
3797 }
3798 status = L2CAP_CS_NO_INFO;
3799 }
3800 } else {
3801 __l2cap_state_change(chan, BT_CONNECT2);
3802 result = L2CAP_CR_PEND;
3803 status = L2CAP_CS_AUTHEN_PEND;
3804 }
3805 } else {
3806 __l2cap_state_change(chan, BT_CONNECT2);
3807 result = L2CAP_CR_PEND;
3808 status = L2CAP_CS_NO_INFO;
3809 }
3810
3811 response:
3812 release_sock(parent);
3813 mutex_unlock(&conn->chan_lock);
3814
3815 sendresp:
3816 rsp.scid = cpu_to_le16(scid);
3817 rsp.dcid = cpu_to_le16(dcid);
3818 rsp.result = cpu_to_le16(result);
3819 rsp.status = cpu_to_le16(status);
3820 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3821
3822 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3823 struct l2cap_info_req info;
3824 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3825
3826 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3827 conn->info_ident = l2cap_get_ident(conn);
3828
3829 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3830
3831 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3832 sizeof(info), &info);
3833 }
3834
3835 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3836 result == L2CAP_CR_SUCCESS) {
3837 u8 buf[128];
3838 set_bit(CONF_REQ_SENT, &chan->conf_state);
3839 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3840 l2cap_build_conf_req(chan, buf), buf);
3841 chan->num_conf_req++;
3842 }
3843
3844 return chan;
3845 }
3846
3847 static int l2cap_connect_req(struct l2cap_conn *conn,
3848 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3849 {
3850 struct hci_dev *hdev = conn->hcon->hdev;
3851 struct hci_conn *hcon = conn->hcon;
3852
3853 if (cmd_len < sizeof(struct l2cap_conn_req))
3854 return -EPROTO;
3855
3856 hci_dev_lock(hdev);
3857 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3858 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3859 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3860 hcon->dst_type, 0, NULL, 0,
3861 hcon->dev_class);
3862 hci_dev_unlock(hdev);
3863
3864 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3865 return 0;
3866 }
3867
3868 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3869 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3870 u8 *data)
3871 {
3872 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3873 u16 scid, dcid, result, status;
3874 struct l2cap_chan *chan;
3875 u8 req[128];
3876 int err;
3877
3878 if (cmd_len < sizeof(*rsp))
3879 return -EPROTO;
3880
3881 scid = __le16_to_cpu(rsp->scid);
3882 dcid = __le16_to_cpu(rsp->dcid);
3883 result = __le16_to_cpu(rsp->result);
3884 status = __le16_to_cpu(rsp->status);
3885
3886 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3887 dcid, scid, result, status);
3888
3889 mutex_lock(&conn->chan_lock);
3890
3891 if (scid) {
3892 chan = __l2cap_get_chan_by_scid(conn, scid);
3893 if (!chan) {
3894 err = -EFAULT;
3895 goto unlock;
3896 }
3897 } else {
3898 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3899 if (!chan) {
3900 err = -EFAULT;
3901 goto unlock;
3902 }
3903 }
3904
3905 err = 0;
3906
3907 l2cap_chan_lock(chan);
3908
3909 switch (result) {
3910 case L2CAP_CR_SUCCESS:
3911 l2cap_state_change(chan, BT_CONFIG);
3912 chan->ident = 0;
3913 chan->dcid = dcid;
3914 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3915
3916 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3917 break;
3918
3919 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3920 l2cap_build_conf_req(chan, req), req);
3921 chan->num_conf_req++;
3922 break;
3923
3924 case L2CAP_CR_PEND:
3925 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3926 break;
3927
3928 default:
3929 l2cap_chan_del(chan, ECONNREFUSED);
3930 break;
3931 }
3932
3933 l2cap_chan_unlock(chan);
3934
3935 unlock:
3936 mutex_unlock(&conn->chan_lock);
3937
3938 return err;
3939 }
3940
3941 static inline void set_default_fcs(struct l2cap_chan *chan)
3942 {
3943 /* FCS is enabled only in ERTM or streaming mode, if one or both
3944 * sides request it.
3945 */
3946 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3947 chan->fcs = L2CAP_FCS_NONE;
3948 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3949 chan->fcs = L2CAP_FCS_CRC16;
3950 }
3951
3952 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3953 u8 ident, u16 flags)
3954 {
3955 struct l2cap_conn *conn = chan->conn;
3956
3957 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3958 flags);
3959
3960 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3961 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3962
3963 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3964 l2cap_build_conf_rsp(chan, data,
3965 L2CAP_CONF_SUCCESS, flags), data);
3966 }
3967
3968 static inline int l2cap_config_req(struct l2cap_conn *conn,
3969 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3970 u8 *data)
3971 {
3972 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3973 u16 dcid, flags;
3974 u8 rsp[64];
3975 struct l2cap_chan *chan;
3976 int len, err = 0;
3977
3978 if (cmd_len < sizeof(*req))
3979 return -EPROTO;
3980
3981 dcid = __le16_to_cpu(req->dcid);
3982 flags = __le16_to_cpu(req->flags);
3983
3984 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3985
3986 chan = l2cap_get_chan_by_scid(conn, dcid);
3987 if (!chan)
3988 return -ENOENT;
3989
3990 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3991 struct l2cap_cmd_rej_cid rej;
3992
3993 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3994 rej.scid = cpu_to_le16(chan->scid);
3995 rej.dcid = cpu_to_le16(chan->dcid);
3996
3997 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3998 sizeof(rej), &rej);
3999 goto unlock;
4000 }
4001
4002 /* Reject if config buffer is too small. */
4003 len = cmd_len - sizeof(*req);
4004 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4005 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4006 l2cap_build_conf_rsp(chan, rsp,
4007 L2CAP_CONF_REJECT, flags), rsp);
4008 goto unlock;
4009 }
4010
4011 /* Store config. */
4012 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4013 chan->conf_len += len;
4014
4015 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4016 /* Incomplete config. Send empty response. */
4017 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4018 l2cap_build_conf_rsp(chan, rsp,
4019 L2CAP_CONF_SUCCESS, flags), rsp);
4020 goto unlock;
4021 }
4022
4023 /* Complete config. */
4024 len = l2cap_parse_conf_req(chan, rsp);
4025 if (len < 0) {
4026 l2cap_send_disconn_req(chan, ECONNRESET);
4027 goto unlock;
4028 }
4029
4030 chan->ident = cmd->ident;
4031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4032 chan->num_conf_rsp++;
4033
4034 /* Reset config buffer. */
4035 chan->conf_len = 0;
4036
4037 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4038 goto unlock;
4039
4040 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4041 set_default_fcs(chan);
4042
4043 if (chan->mode == L2CAP_MODE_ERTM ||
4044 chan->mode == L2CAP_MODE_STREAMING)
4045 err = l2cap_ertm_init(chan);
4046
4047 if (err < 0)
4048 l2cap_send_disconn_req(chan, -err);
4049 else
4050 l2cap_chan_ready(chan);
4051
4052 goto unlock;
4053 }
4054
4055 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4056 u8 buf[64];
4057 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4058 l2cap_build_conf_req(chan, buf), buf);
4059 chan->num_conf_req++;
4060 }
4061
4062 /* Got Conf Rsp PENDING from remote side and asume we sent
4063 Conf Rsp PENDING in the code above */
4064 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4065 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4066
4067 /* check compatibility */
4068
4069 /* Send rsp for BR/EDR channel */
4070 if (!chan->hs_hcon)
4071 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4072 else
4073 chan->ident = cmd->ident;
4074 }
4075
4076 unlock:
4077 l2cap_chan_unlock(chan);
4078 return err;
4079 }
4080
4081 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4082 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4083 u8 *data)
4084 {
4085 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4086 u16 scid, flags, result;
4087 struct l2cap_chan *chan;
4088 int len = cmd_len - sizeof(*rsp);
4089 int err = 0;
4090
4091 if (cmd_len < sizeof(*rsp))
4092 return -EPROTO;
4093
4094 scid = __le16_to_cpu(rsp->scid);
4095 flags = __le16_to_cpu(rsp->flags);
4096 result = __le16_to_cpu(rsp->result);
4097
4098 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4099 result, len);
4100
4101 chan = l2cap_get_chan_by_scid(conn, scid);
4102 if (!chan)
4103 return 0;
4104
4105 switch (result) {
4106 case L2CAP_CONF_SUCCESS:
4107 l2cap_conf_rfc_get(chan, rsp->data, len);
4108 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4109 break;
4110
4111 case L2CAP_CONF_PENDING:
4112 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4113
4114 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4115 char buf[64];
4116
4117 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4118 buf, &result);
4119 if (len < 0) {
4120 l2cap_send_disconn_req(chan, ECONNRESET);
4121 goto done;
4122 }
4123
4124 if (!chan->hs_hcon) {
4125 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4126 0);
4127 } else {
4128 if (l2cap_check_efs(chan)) {
4129 amp_create_logical_link(chan);
4130 chan->ident = cmd->ident;
4131 }
4132 }
4133 }
4134 goto done;
4135
4136 case L2CAP_CONF_UNACCEPT:
4137 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4138 char req[64];
4139
4140 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4141 l2cap_send_disconn_req(chan, ECONNRESET);
4142 goto done;
4143 }
4144
4145 /* throw out any old stored conf requests */
4146 result = L2CAP_CONF_SUCCESS;
4147 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4148 req, &result);
4149 if (len < 0) {
4150 l2cap_send_disconn_req(chan, ECONNRESET);
4151 goto done;
4152 }
4153
4154 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4155 L2CAP_CONF_REQ, len, req);
4156 chan->num_conf_req++;
4157 if (result != L2CAP_CONF_SUCCESS)
4158 goto done;
4159 break;
4160 }
4161
4162 default:
4163 l2cap_chan_set_err(chan, ECONNRESET);
4164
4165 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4166 l2cap_send_disconn_req(chan, ECONNRESET);
4167 goto done;
4168 }
4169
4170 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4171 goto done;
4172
4173 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4174
4175 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4176 set_default_fcs(chan);
4177
4178 if (chan->mode == L2CAP_MODE_ERTM ||
4179 chan->mode == L2CAP_MODE_STREAMING)
4180 err = l2cap_ertm_init(chan);
4181
4182 if (err < 0)
4183 l2cap_send_disconn_req(chan, -err);
4184 else
4185 l2cap_chan_ready(chan);
4186 }
4187
4188 done:
4189 l2cap_chan_unlock(chan);
4190 return err;
4191 }
4192
4193 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4194 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4195 u8 *data)
4196 {
4197 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4198 struct l2cap_disconn_rsp rsp;
4199 u16 dcid, scid;
4200 struct l2cap_chan *chan;
4201 struct sock *sk;
4202
4203 if (cmd_len != sizeof(*req))
4204 return -EPROTO;
4205
4206 scid = __le16_to_cpu(req->scid);
4207 dcid = __le16_to_cpu(req->dcid);
4208
4209 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4210
4211 mutex_lock(&conn->chan_lock);
4212
4213 chan = __l2cap_get_chan_by_scid(conn, dcid);
4214 if (!chan) {
4215 mutex_unlock(&conn->chan_lock);
4216 return 0;
4217 }
4218
4219 l2cap_chan_lock(chan);
4220
4221 sk = chan->sk;
4222
4223 rsp.dcid = cpu_to_le16(chan->scid);
4224 rsp.scid = cpu_to_le16(chan->dcid);
4225 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4226
4227 lock_sock(sk);
4228 sk->sk_shutdown = SHUTDOWN_MASK;
4229 release_sock(sk);
4230
4231 l2cap_chan_hold(chan);
4232 l2cap_chan_del(chan, ECONNRESET);
4233
4234 l2cap_chan_unlock(chan);
4235
4236 chan->ops->close(chan);
4237 l2cap_chan_put(chan);
4238
4239 mutex_unlock(&conn->chan_lock);
4240
4241 return 0;
4242 }
4243
4244 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4245 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4246 u8 *data)
4247 {
4248 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4249 u16 dcid, scid;
4250 struct l2cap_chan *chan;
4251
4252 if (cmd_len != sizeof(*rsp))
4253 return -EPROTO;
4254
4255 scid = __le16_to_cpu(rsp->scid);
4256 dcid = __le16_to_cpu(rsp->dcid);
4257
4258 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4259
4260 mutex_lock(&conn->chan_lock);
4261
4262 chan = __l2cap_get_chan_by_scid(conn, scid);
4263 if (!chan) {
4264 mutex_unlock(&conn->chan_lock);
4265 return 0;
4266 }
4267
4268 l2cap_chan_lock(chan);
4269
4270 l2cap_chan_hold(chan);
4271 l2cap_chan_del(chan, 0);
4272
4273 l2cap_chan_unlock(chan);
4274
4275 chan->ops->close(chan);
4276 l2cap_chan_put(chan);
4277
4278 mutex_unlock(&conn->chan_lock);
4279
4280 return 0;
4281 }
4282
4283 static inline int l2cap_information_req(struct l2cap_conn *conn,
4284 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4285 u8 *data)
4286 {
4287 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4288 u16 type;
4289
4290 if (cmd_len != sizeof(*req))
4291 return -EPROTO;
4292
4293 type = __le16_to_cpu(req->type);
4294
4295 BT_DBG("type 0x%4.4x", type);
4296
4297 if (type == L2CAP_IT_FEAT_MASK) {
4298 u8 buf[8];
4299 u32 feat_mask = l2cap_feat_mask;
4300 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4301 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4302 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4303 if (!disable_ertm)
4304 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4305 | L2CAP_FEAT_FCS;
4306 if (enable_hs)
4307 feat_mask |= L2CAP_FEAT_EXT_FLOW
4308 | L2CAP_FEAT_EXT_WINDOW;
4309
4310 put_unaligned_le32(feat_mask, rsp->data);
4311 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4312 buf);
4313 } else if (type == L2CAP_IT_FIXED_CHAN) {
4314 u8 buf[12];
4315 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4316
4317 if (enable_hs)
4318 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4319 else
4320 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4321
4322 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4323 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4324 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4325 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4326 buf);
4327 } else {
4328 struct l2cap_info_rsp rsp;
4329 rsp.type = cpu_to_le16(type);
4330 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4331 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4332 &rsp);
4333 }
4334
4335 return 0;
4336 }
4337
4338 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4339 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4340 u8 *data)
4341 {
4342 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4343 u16 type, result;
4344
4345 if (cmd_len < sizeof(*rsp))
4346 return -EPROTO;
4347
4348 type = __le16_to_cpu(rsp->type);
4349 result = __le16_to_cpu(rsp->result);
4350
4351 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4352
4353 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4354 if (cmd->ident != conn->info_ident ||
4355 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4356 return 0;
4357
4358 cancel_delayed_work(&conn->info_timer);
4359
4360 if (result != L2CAP_IR_SUCCESS) {
4361 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4362 conn->info_ident = 0;
4363
4364 l2cap_conn_start(conn);
4365
4366 return 0;
4367 }
4368
4369 switch (type) {
4370 case L2CAP_IT_FEAT_MASK:
4371 conn->feat_mask = get_unaligned_le32(rsp->data);
4372
4373 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4374 struct l2cap_info_req req;
4375 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4376
4377 conn->info_ident = l2cap_get_ident(conn);
4378
4379 l2cap_send_cmd(conn, conn->info_ident,
4380 L2CAP_INFO_REQ, sizeof(req), &req);
4381 } else {
4382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4383 conn->info_ident = 0;
4384
4385 l2cap_conn_start(conn);
4386 }
4387 break;
4388
4389 case L2CAP_IT_FIXED_CHAN:
4390 conn->fixed_chan_mask = rsp->data[0];
4391 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4392 conn->info_ident = 0;
4393
4394 l2cap_conn_start(conn);
4395 break;
4396 }
4397
4398 return 0;
4399 }
4400
4401 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4402 struct l2cap_cmd_hdr *cmd,
4403 u16 cmd_len, void *data)
4404 {
4405 struct l2cap_create_chan_req *req = data;
4406 struct l2cap_create_chan_rsp rsp;
4407 struct l2cap_chan *chan;
4408 struct hci_dev *hdev;
4409 u16 psm, scid;
4410
4411 if (cmd_len != sizeof(*req))
4412 return -EPROTO;
4413
4414 if (!enable_hs)
4415 return -EINVAL;
4416
4417 psm = le16_to_cpu(req->psm);
4418 scid = le16_to_cpu(req->scid);
4419
4420 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4421
4422 /* For controller id 0 make BR/EDR connection */
4423 if (req->amp_id == HCI_BREDR_ID) {
4424 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4425 req->amp_id);
4426 return 0;
4427 }
4428
4429 /* Validate AMP controller id */
4430 hdev = hci_dev_get(req->amp_id);
4431 if (!hdev)
4432 goto error;
4433
4434 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4435 hci_dev_put(hdev);
4436 goto error;
4437 }
4438
4439 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4440 req->amp_id);
4441 if (chan) {
4442 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4443 struct hci_conn *hs_hcon;
4444
4445 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4446 if (!hs_hcon) {
4447 hci_dev_put(hdev);
4448 return -EFAULT;
4449 }
4450
4451 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4452
4453 mgr->bredr_chan = chan;
4454 chan->hs_hcon = hs_hcon;
4455 chan->fcs = L2CAP_FCS_NONE;
4456 conn->mtu = hdev->block_mtu;
4457 }
4458
4459 hci_dev_put(hdev);
4460
4461 return 0;
4462
4463 error:
4464 rsp.dcid = 0;
4465 rsp.scid = cpu_to_le16(scid);
4466 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4467 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4468
4469 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4470 sizeof(rsp), &rsp);
4471
4472 return -EFAULT;
4473 }
4474
4475 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4476 {
4477 struct l2cap_move_chan_req req;
4478 u8 ident;
4479
4480 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4481
4482 ident = l2cap_get_ident(chan->conn);
4483 chan->ident = ident;
4484
4485 req.icid = cpu_to_le16(chan->scid);
4486 req.dest_amp_id = dest_amp_id;
4487
4488 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4489 &req);
4490
4491 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4492 }
4493
4494 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4495 {
4496 struct l2cap_move_chan_rsp rsp;
4497
4498 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4499
4500 rsp.icid = cpu_to_le16(chan->dcid);
4501 rsp.result = cpu_to_le16(result);
4502
4503 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4504 sizeof(rsp), &rsp);
4505 }
4506
4507 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4508 {
4509 struct l2cap_move_chan_cfm cfm;
4510
4511 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4512
4513 chan->ident = l2cap_get_ident(chan->conn);
4514
4515 cfm.icid = cpu_to_le16(chan->scid);
4516 cfm.result = cpu_to_le16(result);
4517
4518 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4519 sizeof(cfm), &cfm);
4520
4521 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4522 }
4523
4524 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4525 {
4526 struct l2cap_move_chan_cfm cfm;
4527
4528 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4529
4530 cfm.icid = cpu_to_le16(icid);
4531 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4532
4533 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4534 sizeof(cfm), &cfm);
4535 }
4536
4537 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4538 u16 icid)
4539 {
4540 struct l2cap_move_chan_cfm_rsp rsp;
4541
4542 BT_DBG("icid 0x%4.4x", icid);
4543
4544 rsp.icid = cpu_to_le16(icid);
4545 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4546 }
4547
4548 static void __release_logical_link(struct l2cap_chan *chan)
4549 {
4550 chan->hs_hchan = NULL;
4551 chan->hs_hcon = NULL;
4552
4553 /* Placeholder - release the logical link */
4554 }
4555
4556 static void l2cap_logical_fail(struct l2cap_chan *chan)
4557 {
4558 /* Logical link setup failed */
4559 if (chan->state != BT_CONNECTED) {
4560 /* Create channel failure, disconnect */
4561 l2cap_send_disconn_req(chan, ECONNRESET);
4562 return;
4563 }
4564
4565 switch (chan->move_role) {
4566 case L2CAP_MOVE_ROLE_RESPONDER:
4567 l2cap_move_done(chan);
4568 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4569 break;
4570 case L2CAP_MOVE_ROLE_INITIATOR:
4571 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4572 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4573 /* Remote has only sent pending or
4574 * success responses, clean up
4575 */
4576 l2cap_move_done(chan);
4577 }
4578
4579 /* Other amp move states imply that the move
4580 * has already aborted
4581 */
4582 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4583 break;
4584 }
4585 }
4586
4587 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4588 struct hci_chan *hchan)
4589 {
4590 struct l2cap_conf_rsp rsp;
4591
4592 chan->hs_hchan = hchan;
4593 chan->hs_hcon->l2cap_data = chan->conn;
4594
4595 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4596
4597 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4598 int err;
4599
4600 set_default_fcs(chan);
4601
4602 err = l2cap_ertm_init(chan);
4603 if (err < 0)
4604 l2cap_send_disconn_req(chan, -err);
4605 else
4606 l2cap_chan_ready(chan);
4607 }
4608 }
4609
4610 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4611 struct hci_chan *hchan)
4612 {
4613 chan->hs_hcon = hchan->conn;
4614 chan->hs_hcon->l2cap_data = chan->conn;
4615
4616 BT_DBG("move_state %d", chan->move_state);
4617
4618 switch (chan->move_state) {
4619 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4620 /* Move confirm will be sent after a success
4621 * response is received
4622 */
4623 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4624 break;
4625 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4626 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4627 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4628 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4629 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4630 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4631 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4632 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4633 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4634 }
4635 break;
4636 default:
4637 /* Move was not in expected state, free the channel */
4638 __release_logical_link(chan);
4639
4640 chan->move_state = L2CAP_MOVE_STABLE;
4641 }
4642 }
4643
4644 /* Call with chan locked */
4645 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4646 u8 status)
4647 {
4648 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4649
4650 if (status) {
4651 l2cap_logical_fail(chan);
4652 __release_logical_link(chan);
4653 return;
4654 }
4655
4656 if (chan->state != BT_CONNECTED) {
4657 /* Ignore logical link if channel is on BR/EDR */
4658 if (chan->local_amp_id)
4659 l2cap_logical_finish_create(chan, hchan);
4660 } else {
4661 l2cap_logical_finish_move(chan, hchan);
4662 }
4663 }
4664
4665 void l2cap_move_start(struct l2cap_chan *chan)
4666 {
4667 BT_DBG("chan %p", chan);
4668
4669 if (chan->local_amp_id == HCI_BREDR_ID) {
4670 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4671 return;
4672 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4673 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4674 /* Placeholder - start physical link setup */
4675 } else {
4676 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4677 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4678 chan->move_id = 0;
4679 l2cap_move_setup(chan);
4680 l2cap_send_move_chan_req(chan, 0);
4681 }
4682 }
4683
4684 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4685 u8 local_amp_id, u8 remote_amp_id)
4686 {
4687 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4688 local_amp_id, remote_amp_id);
4689
4690 chan->fcs = L2CAP_FCS_NONE;
4691
4692 /* Outgoing channel on AMP */
4693 if (chan->state == BT_CONNECT) {
4694 if (result == L2CAP_CR_SUCCESS) {
4695 chan->local_amp_id = local_amp_id;
4696 l2cap_send_create_chan_req(chan, remote_amp_id);
4697 } else {
4698 /* Revert to BR/EDR connect */
4699 l2cap_send_conn_req(chan);
4700 }
4701
4702 return;
4703 }
4704
4705 /* Incoming channel on AMP */
4706 if (__l2cap_no_conn_pending(chan)) {
4707 struct l2cap_conn_rsp rsp;
4708 char buf[128];
4709 rsp.scid = cpu_to_le16(chan->dcid);
4710 rsp.dcid = cpu_to_le16(chan->scid);
4711
4712 if (result == L2CAP_CR_SUCCESS) {
4713 /* Send successful response */
4714 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4715 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4716 } else {
4717 /* Send negative response */
4718 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4719 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4720 }
4721
4722 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4723 sizeof(rsp), &rsp);
4724
4725 if (result == L2CAP_CR_SUCCESS) {
4726 __l2cap_state_change(chan, BT_CONFIG);
4727 set_bit(CONF_REQ_SENT, &chan->conf_state);
4728 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4729 L2CAP_CONF_REQ,
4730 l2cap_build_conf_req(chan, buf), buf);
4731 chan->num_conf_req++;
4732 }
4733 }
4734 }
4735
4736 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4737 u8 remote_amp_id)
4738 {
4739 l2cap_move_setup(chan);
4740 chan->move_id = local_amp_id;
4741 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4742
4743 l2cap_send_move_chan_req(chan, remote_amp_id);
4744 }
4745
4746 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4747 {
4748 struct hci_chan *hchan = NULL;
4749
4750 /* Placeholder - get hci_chan for logical link */
4751
4752 if (hchan) {
4753 if (hchan->state == BT_CONNECTED) {
4754 /* Logical link is ready to go */
4755 chan->hs_hcon = hchan->conn;
4756 chan->hs_hcon->l2cap_data = chan->conn;
4757 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4758 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4759
4760 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4761 } else {
4762 /* Wait for logical link to be ready */
4763 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4764 }
4765 } else {
4766 /* Logical link not available */
4767 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4768 }
4769 }
4770
4771 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4772 {
4773 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4774 u8 rsp_result;
4775 if (result == -EINVAL)
4776 rsp_result = L2CAP_MR_BAD_ID;
4777 else
4778 rsp_result = L2CAP_MR_NOT_ALLOWED;
4779
4780 l2cap_send_move_chan_rsp(chan, rsp_result);
4781 }
4782
4783 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4784 chan->move_state = L2CAP_MOVE_STABLE;
4785
4786 /* Restart data transmission */
4787 l2cap_ertm_send(chan);
4788 }
4789
4790 /* Invoke with locked chan */
4791 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4792 {
4793 u8 local_amp_id = chan->local_amp_id;
4794 u8 remote_amp_id = chan->remote_amp_id;
4795
4796 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4797 chan, result, local_amp_id, remote_amp_id);
4798
4799 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4800 l2cap_chan_unlock(chan);
4801 return;
4802 }
4803
4804 if (chan->state != BT_CONNECTED) {
4805 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4806 } else if (result != L2CAP_MR_SUCCESS) {
4807 l2cap_do_move_cancel(chan, result);
4808 } else {
4809 switch (chan->move_role) {
4810 case L2CAP_MOVE_ROLE_INITIATOR:
4811 l2cap_do_move_initiate(chan, local_amp_id,
4812 remote_amp_id);
4813 break;
4814 case L2CAP_MOVE_ROLE_RESPONDER:
4815 l2cap_do_move_respond(chan, result);
4816 break;
4817 default:
4818 l2cap_do_move_cancel(chan, result);
4819 break;
4820 }
4821 }
4822 }
4823
4824 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4825 struct l2cap_cmd_hdr *cmd,
4826 u16 cmd_len, void *data)
4827 {
4828 struct l2cap_move_chan_req *req = data;
4829 struct l2cap_move_chan_rsp rsp;
4830 struct l2cap_chan *chan;
4831 u16 icid = 0;
4832 u16 result = L2CAP_MR_NOT_ALLOWED;
4833
4834 if (cmd_len != sizeof(*req))
4835 return -EPROTO;
4836
4837 icid = le16_to_cpu(req->icid);
4838
4839 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4840
4841 if (!enable_hs)
4842 return -EINVAL;
4843
4844 chan = l2cap_get_chan_by_dcid(conn, icid);
4845 if (!chan) {
4846 rsp.icid = cpu_to_le16(icid);
4847 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4848 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4849 sizeof(rsp), &rsp);
4850 return 0;
4851 }
4852
4853 chan->ident = cmd->ident;
4854
4855 if (chan->scid < L2CAP_CID_DYN_START ||
4856 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4857 (chan->mode != L2CAP_MODE_ERTM &&
4858 chan->mode != L2CAP_MODE_STREAMING)) {
4859 result = L2CAP_MR_NOT_ALLOWED;
4860 goto send_move_response;
4861 }
4862
4863 if (chan->local_amp_id == req->dest_amp_id) {
4864 result = L2CAP_MR_SAME_ID;
4865 goto send_move_response;
4866 }
4867
4868 if (req->dest_amp_id) {
4869 struct hci_dev *hdev;
4870 hdev = hci_dev_get(req->dest_amp_id);
4871 if (!hdev || hdev->dev_type != HCI_AMP ||
4872 !test_bit(HCI_UP, &hdev->flags)) {
4873 if (hdev)
4874 hci_dev_put(hdev);
4875
4876 result = L2CAP_MR_BAD_ID;
4877 goto send_move_response;
4878 }
4879 hci_dev_put(hdev);
4880 }
4881
4882 /* Detect a move collision. Only send a collision response
4883 * if this side has "lost", otherwise proceed with the move.
4884 * The winner has the larger bd_addr.
4885 */
4886 if ((__chan_is_moving(chan) ||
4887 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4888 bacmp(conn->src, conn->dst) > 0) {
4889 result = L2CAP_MR_COLLISION;
4890 goto send_move_response;
4891 }
4892
4893 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4894 l2cap_move_setup(chan);
4895 chan->move_id = req->dest_amp_id;
4896 icid = chan->dcid;
4897
4898 if (!req->dest_amp_id) {
4899 /* Moving to BR/EDR */
4900 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4901 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4902 result = L2CAP_MR_PEND;
4903 } else {
4904 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4905 result = L2CAP_MR_SUCCESS;
4906 }
4907 } else {
4908 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4909 /* Placeholder - uncomment when amp functions are available */
4910 /*amp_accept_physical(chan, req->dest_amp_id);*/
4911 result = L2CAP_MR_PEND;
4912 }
4913
4914 send_move_response:
4915 l2cap_send_move_chan_rsp(chan, result);
4916
4917 l2cap_chan_unlock(chan);
4918
4919 return 0;
4920 }
4921
4922 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4923 {
4924 struct l2cap_chan *chan;
4925 struct hci_chan *hchan = NULL;
4926
4927 chan = l2cap_get_chan_by_scid(conn, icid);
4928 if (!chan) {
4929 l2cap_send_move_chan_cfm_icid(conn, icid);
4930 return;
4931 }
4932
4933 __clear_chan_timer(chan);
4934 if (result == L2CAP_MR_PEND)
4935 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4936
4937 switch (chan->move_state) {
4938 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4939 /* Move confirm will be sent when logical link
4940 * is complete.
4941 */
4942 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4943 break;
4944 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4945 if (result == L2CAP_MR_PEND) {
4946 break;
4947 } else if (test_bit(CONN_LOCAL_BUSY,
4948 &chan->conn_state)) {
4949 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4950 } else {
4951 /* Logical link is up or moving to BR/EDR,
4952 * proceed with move
4953 */
4954 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4955 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4956 }
4957 break;
4958 case L2CAP_MOVE_WAIT_RSP:
4959 /* Moving to AMP */
4960 if (result == L2CAP_MR_SUCCESS) {
4961 /* Remote is ready, send confirm immediately
4962 * after logical link is ready
4963 */
4964 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4965 } else {
4966 /* Both logical link and move success
4967 * are required to confirm
4968 */
4969 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4970 }
4971
4972 /* Placeholder - get hci_chan for logical link */
4973 if (!hchan) {
4974 /* Logical link not available */
4975 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4976 break;
4977 }
4978
4979 /* If the logical link is not yet connected, do not
4980 * send confirmation.
4981 */
4982 if (hchan->state != BT_CONNECTED)
4983 break;
4984
4985 /* Logical link is already ready to go */
4986
4987 chan->hs_hcon = hchan->conn;
4988 chan->hs_hcon->l2cap_data = chan->conn;
4989
4990 if (result == L2CAP_MR_SUCCESS) {
4991 /* Can confirm now */
4992 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4993 } else {
4994 /* Now only need move success
4995 * to confirm
4996 */
4997 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4998 }
4999
5000 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5001 break;
5002 default:
5003 /* Any other amp move state means the move failed. */
5004 chan->move_id = chan->local_amp_id;
5005 l2cap_move_done(chan);
5006 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5007 }
5008
5009 l2cap_chan_unlock(chan);
5010 }
5011
5012 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5013 u16 result)
5014 {
5015 struct l2cap_chan *chan;
5016
5017 chan = l2cap_get_chan_by_ident(conn, ident);
5018 if (!chan) {
5019 /* Could not locate channel, icid is best guess */
5020 l2cap_send_move_chan_cfm_icid(conn, icid);
5021 return;
5022 }
5023
5024 __clear_chan_timer(chan);
5025
5026 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5027 if (result == L2CAP_MR_COLLISION) {
5028 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5029 } else {
5030 /* Cleanup - cancel move */
5031 chan->move_id = chan->local_amp_id;
5032 l2cap_move_done(chan);
5033 }
5034 }
5035
5036 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5037
5038 l2cap_chan_unlock(chan);
5039 }
5040
5041 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5042 struct l2cap_cmd_hdr *cmd,
5043 u16 cmd_len, void *data)
5044 {
5045 struct l2cap_move_chan_rsp *rsp = data;
5046 u16 icid, result;
5047
5048 if (cmd_len != sizeof(*rsp))
5049 return -EPROTO;
5050
5051 icid = le16_to_cpu(rsp->icid);
5052 result = le16_to_cpu(rsp->result);
5053
5054 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5055
5056 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5057 l2cap_move_continue(conn, icid, result);
5058 else
5059 l2cap_move_fail(conn, cmd->ident, icid, result);
5060
5061 return 0;
5062 }
5063
5064 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5065 struct l2cap_cmd_hdr *cmd,
5066 u16 cmd_len, void *data)
5067 {
5068 struct l2cap_move_chan_cfm *cfm = data;
5069 struct l2cap_chan *chan;
5070 u16 icid, result;
5071
5072 if (cmd_len != sizeof(*cfm))
5073 return -EPROTO;
5074
5075 icid = le16_to_cpu(cfm->icid);
5076 result = le16_to_cpu(cfm->result);
5077
5078 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5079
5080 chan = l2cap_get_chan_by_dcid(conn, icid);
5081 if (!chan) {
5082 /* Spec requires a response even if the icid was not found */
5083 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5084 return 0;
5085 }
5086
5087 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5088 if (result == L2CAP_MC_CONFIRMED) {
5089 chan->local_amp_id = chan->move_id;
5090 if (!chan->local_amp_id)
5091 __release_logical_link(chan);
5092 } else {
5093 chan->move_id = chan->local_amp_id;
5094 }
5095
5096 l2cap_move_done(chan);
5097 }
5098
5099 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5100
5101 l2cap_chan_unlock(chan);
5102
5103 return 0;
5104 }
5105
5106 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5107 struct l2cap_cmd_hdr *cmd,
5108 u16 cmd_len, void *data)
5109 {
5110 struct l2cap_move_chan_cfm_rsp *rsp = data;
5111 struct l2cap_chan *chan;
5112 u16 icid;
5113
5114 if (cmd_len != sizeof(*rsp))
5115 return -EPROTO;
5116
5117 icid = le16_to_cpu(rsp->icid);
5118
5119 BT_DBG("icid 0x%4.4x", icid);
5120
5121 chan = l2cap_get_chan_by_scid(conn, icid);
5122 if (!chan)
5123 return 0;
5124
5125 __clear_chan_timer(chan);
5126
5127 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5128 chan->local_amp_id = chan->move_id;
5129
5130 if (!chan->local_amp_id && chan->hs_hchan)
5131 __release_logical_link(chan);
5132
5133 l2cap_move_done(chan);
5134 }
5135
5136 l2cap_chan_unlock(chan);
5137
5138 return 0;
5139 }
5140
5141 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5142 u16 to_multiplier)
5143 {
5144 u16 max_latency;
5145
5146 if (min > max || min < 6 || max > 3200)
5147 return -EINVAL;
5148
5149 if (to_multiplier < 10 || to_multiplier > 3200)
5150 return -EINVAL;
5151
5152 if (max >= to_multiplier * 8)
5153 return -EINVAL;
5154
5155 max_latency = (to_multiplier * 8 / max) - 1;
5156 if (latency > 499 || latency > max_latency)
5157 return -EINVAL;
5158
5159 return 0;
5160 }
5161
5162 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5163 struct l2cap_cmd_hdr *cmd,
5164 u8 *data)
5165 {
5166 struct hci_conn *hcon = conn->hcon;
5167 struct l2cap_conn_param_update_req *req;
5168 struct l2cap_conn_param_update_rsp rsp;
5169 u16 min, max, latency, to_multiplier, cmd_len;
5170 int err;
5171
5172 if (!(hcon->link_mode & HCI_LM_MASTER))
5173 return -EINVAL;
5174
5175 cmd_len = __le16_to_cpu(cmd->len);
5176 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5177 return -EPROTO;
5178
5179 req = (struct l2cap_conn_param_update_req *) data;
5180 min = __le16_to_cpu(req->min);
5181 max = __le16_to_cpu(req->max);
5182 latency = __le16_to_cpu(req->latency);
5183 to_multiplier = __le16_to_cpu(req->to_multiplier);
5184
5185 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5186 min, max, latency, to_multiplier);
5187
5188 memset(&rsp, 0, sizeof(rsp));
5189
5190 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5191 if (err)
5192 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5193 else
5194 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5195
5196 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5197 sizeof(rsp), &rsp);
5198
5199 if (!err)
5200 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5201
5202 return 0;
5203 }
5204
5205 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5206 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5207 u8 *data)
5208 {
5209 int err = 0;
5210
5211 switch (cmd->code) {
5212 case L2CAP_COMMAND_REJ:
5213 l2cap_command_rej(conn, cmd, cmd_len, data);
5214 break;
5215
5216 case L2CAP_CONN_REQ:
5217 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5218 break;
5219
5220 case L2CAP_CONN_RSP:
5221 case L2CAP_CREATE_CHAN_RSP:
5222 err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5223 break;
5224
5225 case L2CAP_CONF_REQ:
5226 err = l2cap_config_req(conn, cmd, cmd_len, data);
5227 break;
5228
5229 case L2CAP_CONF_RSP:
5230 err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5231 break;
5232
5233 case L2CAP_DISCONN_REQ:
5234 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5235 break;
5236
5237 case L2CAP_DISCONN_RSP:
5238 err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5239 break;
5240
5241 case L2CAP_ECHO_REQ:
5242 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5243 break;
5244
5245 case L2CAP_ECHO_RSP:
5246 break;
5247
5248 case L2CAP_INFO_REQ:
5249 err = l2cap_information_req(conn, cmd, cmd_len, data);
5250 break;
5251
5252 case L2CAP_INFO_RSP:
5253 err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5254 break;
5255
5256 case L2CAP_CREATE_CHAN_REQ:
5257 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5258 break;
5259
5260 case L2CAP_MOVE_CHAN_REQ:
5261 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5262 break;
5263
5264 case L2CAP_MOVE_CHAN_RSP:
5265 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5266 break;
5267
5268 case L2CAP_MOVE_CHAN_CFM:
5269 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5270 break;
5271
5272 case L2CAP_MOVE_CHAN_CFM_RSP:
5273 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5274 break;
5275
5276 default:
5277 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5278 err = -EINVAL;
5279 break;
5280 }
5281
5282 return err;
5283 }
5284
5285 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5286 struct l2cap_cmd_hdr *cmd, u8 *data)
5287 {
5288 switch (cmd->code) {
5289 case L2CAP_COMMAND_REJ:
5290 return 0;
5291
5292 case L2CAP_CONN_PARAM_UPDATE_REQ:
5293 return l2cap_conn_param_update_req(conn, cmd, data);
5294
5295 case L2CAP_CONN_PARAM_UPDATE_RSP:
5296 return 0;
5297
5298 default:
5299 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5300 return -EINVAL;
5301 }
5302 }
5303
5304 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5305 struct sk_buff *skb)
5306 {
5307 u8 *data = skb->data;
5308 int len = skb->len;
5309 struct l2cap_cmd_hdr cmd;
5310 int err;
5311
5312 l2cap_raw_recv(conn, skb);
5313
5314 while (len >= L2CAP_CMD_HDR_SIZE) {
5315 u16 cmd_len;
5316 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5317 data += L2CAP_CMD_HDR_SIZE;
5318 len -= L2CAP_CMD_HDR_SIZE;
5319
5320 cmd_len = le16_to_cpu(cmd.len);
5321
5322 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5323 cmd.ident);
5324
5325 if (cmd_len > len || !cmd.ident) {
5326 BT_DBG("corrupted command");
5327 break;
5328 }
5329
5330 err = l2cap_le_sig_cmd(conn, &cmd, data);
5331 if (err) {
5332 struct l2cap_cmd_rej_unk rej;
5333
5334 BT_ERR("Wrong link type (%d)", err);
5335
5336 /* FIXME: Map err to a valid reason */
5337 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5338 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5339 sizeof(rej), &rej);
5340 }
5341
5342 data += cmd_len;
5343 len -= cmd_len;
5344 }
5345
5346 kfree_skb(skb);
5347 }
5348
5349 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5350 struct sk_buff *skb)
5351 {
5352 u8 *data = skb->data;
5353 int len = skb->len;
5354 struct l2cap_cmd_hdr cmd;
5355 int err;
5356
5357 l2cap_raw_recv(conn, skb);
5358
5359 while (len >= L2CAP_CMD_HDR_SIZE) {
5360 u16 cmd_len;
5361 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5362 data += L2CAP_CMD_HDR_SIZE;
5363 len -= L2CAP_CMD_HDR_SIZE;
5364
5365 cmd_len = le16_to_cpu(cmd.len);
5366
5367 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5368 cmd.ident);
5369
5370 if (cmd_len > len || !cmd.ident) {
5371 BT_DBG("corrupted command");
5372 break;
5373 }
5374
5375 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5376 if (err) {
5377 struct l2cap_cmd_rej_unk rej;
5378
5379 BT_ERR("Wrong link type (%d)", err);
5380
5381 /* FIXME: Map err to a valid reason */
5382 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5383 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5384 sizeof(rej), &rej);
5385 }
5386
5387 data += cmd_len;
5388 len -= cmd_len;
5389 }
5390
5391 kfree_skb(skb);
5392 }
5393
5394 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5395 {
5396 u16 our_fcs, rcv_fcs;
5397 int hdr_size;
5398
5399 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5400 hdr_size = L2CAP_EXT_HDR_SIZE;
5401 else
5402 hdr_size = L2CAP_ENH_HDR_SIZE;
5403
5404 if (chan->fcs == L2CAP_FCS_CRC16) {
5405 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5406 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5407 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5408
5409 if (our_fcs != rcv_fcs)
5410 return -EBADMSG;
5411 }
5412 return 0;
5413 }
5414
5415 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5416 {
5417 struct l2cap_ctrl control;
5418
5419 BT_DBG("chan %p", chan);
5420
5421 memset(&control, 0, sizeof(control));
5422 control.sframe = 1;
5423 control.final = 1;
5424 control.reqseq = chan->buffer_seq;
5425 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5426
5427 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5428 control.super = L2CAP_SUPER_RNR;
5429 l2cap_send_sframe(chan, &control);
5430 }
5431
5432 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5433 chan->unacked_frames > 0)
5434 __set_retrans_timer(chan);
5435
5436 /* Send pending iframes */
5437 l2cap_ertm_send(chan);
5438
5439 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5440 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5441 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5442 * send it now.
5443 */
5444 control.super = L2CAP_SUPER_RR;
5445 l2cap_send_sframe(chan, &control);
5446 }
5447 }
5448
5449 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5450 struct sk_buff **last_frag)
5451 {
5452 /* skb->len reflects data in skb as well as all fragments
5453 * skb->data_len reflects only data in fragments
5454 */
5455 if (!skb_has_frag_list(skb))
5456 skb_shinfo(skb)->frag_list = new_frag;
5457
5458 new_frag->next = NULL;
5459
5460 (*last_frag)->next = new_frag;
5461 *last_frag = new_frag;
5462
5463 skb->len += new_frag->len;
5464 skb->data_len += new_frag->len;
5465 skb->truesize += new_frag->truesize;
5466 }
5467
5468 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5469 struct l2cap_ctrl *control)
5470 {
5471 int err = -EINVAL;
5472
5473 switch (control->sar) {
5474 case L2CAP_SAR_UNSEGMENTED:
5475 if (chan->sdu)
5476 break;
5477
5478 err = chan->ops->recv(chan, skb);
5479 break;
5480
5481 case L2CAP_SAR_START:
5482 if (chan->sdu)
5483 break;
5484
5485 chan->sdu_len = get_unaligned_le16(skb->data);
5486 skb_pull(skb, L2CAP_SDULEN_SIZE);
5487
5488 if (chan->sdu_len > chan->imtu) {
5489 err = -EMSGSIZE;
5490 break;
5491 }
5492
5493 if (skb->len >= chan->sdu_len)
5494 break;
5495
5496 chan->sdu = skb;
5497 chan->sdu_last_frag = skb;
5498
5499 skb = NULL;
5500 err = 0;
5501 break;
5502
5503 case L2CAP_SAR_CONTINUE:
5504 if (!chan->sdu)
5505 break;
5506
5507 append_skb_frag(chan->sdu, skb,
5508 &chan->sdu_last_frag);
5509 skb = NULL;
5510
5511 if (chan->sdu->len >= chan->sdu_len)
5512 break;
5513
5514 err = 0;
5515 break;
5516
5517 case L2CAP_SAR_END:
5518 if (!chan->sdu)
5519 break;
5520
5521 append_skb_frag(chan->sdu, skb,
5522 &chan->sdu_last_frag);
5523 skb = NULL;
5524
5525 if (chan->sdu->len != chan->sdu_len)
5526 break;
5527
5528 err = chan->ops->recv(chan, chan->sdu);
5529
5530 if (!err) {
5531 /* Reassembly complete */
5532 chan->sdu = NULL;
5533 chan->sdu_last_frag = NULL;
5534 chan->sdu_len = 0;
5535 }
5536 break;
5537 }
5538
5539 if (err) {
5540 kfree_skb(skb);
5541 kfree_skb(chan->sdu);
5542 chan->sdu = NULL;
5543 chan->sdu_last_frag = NULL;
5544 chan->sdu_len = 0;
5545 }
5546
5547 return err;
5548 }
5549
5550 static int l2cap_resegment(struct l2cap_chan *chan)
5551 {
5552 /* Placeholder */
5553 return 0;
5554 }
5555
5556 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5557 {
5558 u8 event;
5559
5560 if (chan->mode != L2CAP_MODE_ERTM)
5561 return;
5562
5563 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5564 l2cap_tx(chan, NULL, NULL, event);
5565 }
5566
5567 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5568 {
5569 int err = 0;
5570 /* Pass sequential frames to l2cap_reassemble_sdu()
5571 * until a gap is encountered.
5572 */
5573
5574 BT_DBG("chan %p", chan);
5575
5576 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5577 struct sk_buff *skb;
5578 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5579 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5580
5581 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5582
5583 if (!skb)
5584 break;
5585
5586 skb_unlink(skb, &chan->srej_q);
5587 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5588 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5589 if (err)
5590 break;
5591 }
5592
5593 if (skb_queue_empty(&chan->srej_q)) {
5594 chan->rx_state = L2CAP_RX_STATE_RECV;
5595 l2cap_send_ack(chan);
5596 }
5597
5598 return err;
5599 }
5600
5601 static void l2cap_handle_srej(struct l2cap_chan *chan,
5602 struct l2cap_ctrl *control)
5603 {
5604 struct sk_buff *skb;
5605
5606 BT_DBG("chan %p, control %p", chan, control);
5607
5608 if (control->reqseq == chan->next_tx_seq) {
5609 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5610 l2cap_send_disconn_req(chan, ECONNRESET);
5611 return;
5612 }
5613
5614 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5615
5616 if (skb == NULL) {
5617 BT_DBG("Seq %d not available for retransmission",
5618 control->reqseq);
5619 return;
5620 }
5621
5622 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5623 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5624 l2cap_send_disconn_req(chan, ECONNRESET);
5625 return;
5626 }
5627
5628 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5629
5630 if (control->poll) {
5631 l2cap_pass_to_tx(chan, control);
5632
5633 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5634 l2cap_retransmit(chan, control);
5635 l2cap_ertm_send(chan);
5636
5637 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5638 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5639 chan->srej_save_reqseq = control->reqseq;
5640 }
5641 } else {
5642 l2cap_pass_to_tx_fbit(chan, control);
5643
5644 if (control->final) {
5645 if (chan->srej_save_reqseq != control->reqseq ||
5646 !test_and_clear_bit(CONN_SREJ_ACT,
5647 &chan->conn_state))
5648 l2cap_retransmit(chan, control);
5649 } else {
5650 l2cap_retransmit(chan, control);
5651 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5652 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5653 chan->srej_save_reqseq = control->reqseq;
5654 }
5655 }
5656 }
5657 }
5658
5659 static void l2cap_handle_rej(struct l2cap_chan *chan,
5660 struct l2cap_ctrl *control)
5661 {
5662 struct sk_buff *skb;
5663
5664 BT_DBG("chan %p, control %p", chan, control);
5665
5666 if (control->reqseq == chan->next_tx_seq) {
5667 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5668 l2cap_send_disconn_req(chan, ECONNRESET);
5669 return;
5670 }
5671
5672 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5673
5674 if (chan->max_tx && skb &&
5675 bt_cb(skb)->control.retries >= chan->max_tx) {
5676 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5677 l2cap_send_disconn_req(chan, ECONNRESET);
5678 return;
5679 }
5680
5681 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5682
5683 l2cap_pass_to_tx(chan, control);
5684
5685 if (control->final) {
5686 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5687 l2cap_retransmit_all(chan, control);
5688 } else {
5689 l2cap_retransmit_all(chan, control);
5690 l2cap_ertm_send(chan);
5691 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5692 set_bit(CONN_REJ_ACT, &chan->conn_state);
5693 }
5694 }
5695
5696 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5697 {
5698 BT_DBG("chan %p, txseq %d", chan, txseq);
5699
5700 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5701 chan->expected_tx_seq);
5702
5703 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5704 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5705 chan->tx_win) {
5706 /* See notes below regarding "double poll" and
5707 * invalid packets.
5708 */
5709 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5710 BT_DBG("Invalid/Ignore - after SREJ");
5711 return L2CAP_TXSEQ_INVALID_IGNORE;
5712 } else {
5713 BT_DBG("Invalid - in window after SREJ sent");
5714 return L2CAP_TXSEQ_INVALID;
5715 }
5716 }
5717
5718 if (chan->srej_list.head == txseq) {
5719 BT_DBG("Expected SREJ");
5720 return L2CAP_TXSEQ_EXPECTED_SREJ;
5721 }
5722
5723 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5724 BT_DBG("Duplicate SREJ - txseq already stored");
5725 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5726 }
5727
5728 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5729 BT_DBG("Unexpected SREJ - not requested");
5730 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5731 }
5732 }
5733
5734 if (chan->expected_tx_seq == txseq) {
5735 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5736 chan->tx_win) {
5737 BT_DBG("Invalid - txseq outside tx window");
5738 return L2CAP_TXSEQ_INVALID;
5739 } else {
5740 BT_DBG("Expected");
5741 return L2CAP_TXSEQ_EXPECTED;
5742 }
5743 }
5744
5745 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5746 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5747 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5748 return L2CAP_TXSEQ_DUPLICATE;
5749 }
5750
5751 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5752 /* A source of invalid packets is a "double poll" condition,
5753 * where delays cause us to send multiple poll packets. If
5754 * the remote stack receives and processes both polls,
5755 * sequence numbers can wrap around in such a way that a
5756 * resent frame has a sequence number that looks like new data
5757 * with a sequence gap. This would trigger an erroneous SREJ
5758 * request.
5759 *
5760 * Fortunately, this is impossible with a tx window that's
5761 * less than half of the maximum sequence number, which allows
5762 * invalid frames to be safely ignored.
5763 *
5764 * With tx window sizes greater than half of the tx window
5765 * maximum, the frame is invalid and cannot be ignored. This
5766 * causes a disconnect.
5767 */
5768
5769 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5770 BT_DBG("Invalid/Ignore - txseq outside tx window");
5771 return L2CAP_TXSEQ_INVALID_IGNORE;
5772 } else {
5773 BT_DBG("Invalid - txseq outside tx window");
5774 return L2CAP_TXSEQ_INVALID;
5775 }
5776 } else {
5777 BT_DBG("Unexpected - txseq indicates missing frames");
5778 return L2CAP_TXSEQ_UNEXPECTED;
5779 }
5780 }
5781
5782 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5783 struct l2cap_ctrl *control,
5784 struct sk_buff *skb, u8 event)
5785 {
5786 int err = 0;
5787 bool skb_in_use = 0;
5788
5789 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5790 event);
5791
5792 switch (event) {
5793 case L2CAP_EV_RECV_IFRAME:
5794 switch (l2cap_classify_txseq(chan, control->txseq)) {
5795 case L2CAP_TXSEQ_EXPECTED:
5796 l2cap_pass_to_tx(chan, control);
5797
5798 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5799 BT_DBG("Busy, discarding expected seq %d",
5800 control->txseq);
5801 break;
5802 }
5803
5804 chan->expected_tx_seq = __next_seq(chan,
5805 control->txseq);
5806
5807 chan->buffer_seq = chan->expected_tx_seq;
5808 skb_in_use = 1;
5809
5810 err = l2cap_reassemble_sdu(chan, skb, control);
5811 if (err)
5812 break;
5813
5814 if (control->final) {
5815 if (!test_and_clear_bit(CONN_REJ_ACT,
5816 &chan->conn_state)) {
5817 control->final = 0;
5818 l2cap_retransmit_all(chan, control);
5819 l2cap_ertm_send(chan);
5820 }
5821 }
5822
5823 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5824 l2cap_send_ack(chan);
5825 break;
5826 case L2CAP_TXSEQ_UNEXPECTED:
5827 l2cap_pass_to_tx(chan, control);
5828
5829 /* Can't issue SREJ frames in the local busy state.
5830 * Drop this frame, it will be seen as missing
5831 * when local busy is exited.
5832 */
5833 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5834 BT_DBG("Busy, discarding unexpected seq %d",
5835 control->txseq);
5836 break;
5837 }
5838
5839 /* There was a gap in the sequence, so an SREJ
5840 * must be sent for each missing frame. The
5841 * current frame is stored for later use.
5842 */
5843 skb_queue_tail(&chan->srej_q, skb);
5844 skb_in_use = 1;
5845 BT_DBG("Queued %p (queue len %d)", skb,
5846 skb_queue_len(&chan->srej_q));
5847
5848 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5849 l2cap_seq_list_clear(&chan->srej_list);
5850 l2cap_send_srej(chan, control->txseq);
5851
5852 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5853 break;
5854 case L2CAP_TXSEQ_DUPLICATE:
5855 l2cap_pass_to_tx(chan, control);
5856 break;
5857 case L2CAP_TXSEQ_INVALID_IGNORE:
5858 break;
5859 case L2CAP_TXSEQ_INVALID:
5860 default:
5861 l2cap_send_disconn_req(chan, ECONNRESET);
5862 break;
5863 }
5864 break;
5865 case L2CAP_EV_RECV_RR:
5866 l2cap_pass_to_tx(chan, control);
5867 if (control->final) {
5868 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5869
5870 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5871 !__chan_is_moving(chan)) {
5872 control->final = 0;
5873 l2cap_retransmit_all(chan, control);
5874 }
5875
5876 l2cap_ertm_send(chan);
5877 } else if (control->poll) {
5878 l2cap_send_i_or_rr_or_rnr(chan);
5879 } else {
5880 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5881 &chan->conn_state) &&
5882 chan->unacked_frames)
5883 __set_retrans_timer(chan);
5884
5885 l2cap_ertm_send(chan);
5886 }
5887 break;
5888 case L2CAP_EV_RECV_RNR:
5889 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5890 l2cap_pass_to_tx(chan, control);
5891 if (control && control->poll) {
5892 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5893 l2cap_send_rr_or_rnr(chan, 0);
5894 }
5895 __clear_retrans_timer(chan);
5896 l2cap_seq_list_clear(&chan->retrans_list);
5897 break;
5898 case L2CAP_EV_RECV_REJ:
5899 l2cap_handle_rej(chan, control);
5900 break;
5901 case L2CAP_EV_RECV_SREJ:
5902 l2cap_handle_srej(chan, control);
5903 break;
5904 default:
5905 break;
5906 }
5907
5908 if (skb && !skb_in_use) {
5909 BT_DBG("Freeing %p", skb);
5910 kfree_skb(skb);
5911 }
5912
5913 return err;
5914 }
5915
5916 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5917 struct l2cap_ctrl *control,
5918 struct sk_buff *skb, u8 event)
5919 {
5920 int err = 0;
5921 u16 txseq = control->txseq;
5922 bool skb_in_use = 0;
5923
5924 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5925 event);
5926
5927 switch (event) {
5928 case L2CAP_EV_RECV_IFRAME:
5929 switch (l2cap_classify_txseq(chan, txseq)) {
5930 case L2CAP_TXSEQ_EXPECTED:
5931 /* Keep frame for reassembly later */
5932 l2cap_pass_to_tx(chan, control);
5933 skb_queue_tail(&chan->srej_q, skb);
5934 skb_in_use = 1;
5935 BT_DBG("Queued %p (queue len %d)", skb,
5936 skb_queue_len(&chan->srej_q));
5937
5938 chan->expected_tx_seq = __next_seq(chan, txseq);
5939 break;
5940 case L2CAP_TXSEQ_EXPECTED_SREJ:
5941 l2cap_seq_list_pop(&chan->srej_list);
5942
5943 l2cap_pass_to_tx(chan, control);
5944 skb_queue_tail(&chan->srej_q, skb);
5945 skb_in_use = 1;
5946 BT_DBG("Queued %p (queue len %d)", skb,
5947 skb_queue_len(&chan->srej_q));
5948
5949 err = l2cap_rx_queued_iframes(chan);
5950 if (err)
5951 break;
5952
5953 break;
5954 case L2CAP_TXSEQ_UNEXPECTED:
5955 /* Got a frame that can't be reassembled yet.
5956 * Save it for later, and send SREJs to cover
5957 * the missing frames.
5958 */
5959 skb_queue_tail(&chan->srej_q, skb);
5960 skb_in_use = 1;
5961 BT_DBG("Queued %p (queue len %d)", skb,
5962 skb_queue_len(&chan->srej_q));
5963
5964 l2cap_pass_to_tx(chan, control);
5965 l2cap_send_srej(chan, control->txseq);
5966 break;
5967 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5968 /* This frame was requested with an SREJ, but
5969 * some expected retransmitted frames are
5970 * missing. Request retransmission of missing
5971 * SREJ'd frames.
5972 */
5973 skb_queue_tail(&chan->srej_q, skb);
5974 skb_in_use = 1;
5975 BT_DBG("Queued %p (queue len %d)", skb,
5976 skb_queue_len(&chan->srej_q));
5977
5978 l2cap_pass_to_tx(chan, control);
5979 l2cap_send_srej_list(chan, control->txseq);
5980 break;
5981 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5982 /* We've already queued this frame. Drop this copy. */
5983 l2cap_pass_to_tx(chan, control);
5984 break;
5985 case L2CAP_TXSEQ_DUPLICATE:
5986 /* Expecting a later sequence number, so this frame
5987 * was already received. Ignore it completely.
5988 */
5989 break;
5990 case L2CAP_TXSEQ_INVALID_IGNORE:
5991 break;
5992 case L2CAP_TXSEQ_INVALID:
5993 default:
5994 l2cap_send_disconn_req(chan, ECONNRESET);
5995 break;
5996 }
5997 break;
5998 case L2CAP_EV_RECV_RR:
5999 l2cap_pass_to_tx(chan, control);
6000 if (control->final) {
6001 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6002
6003 if (!test_and_clear_bit(CONN_REJ_ACT,
6004 &chan->conn_state)) {
6005 control->final = 0;
6006 l2cap_retransmit_all(chan, control);
6007 }
6008
6009 l2cap_ertm_send(chan);
6010 } else if (control->poll) {
6011 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6012 &chan->conn_state) &&
6013 chan->unacked_frames) {
6014 __set_retrans_timer(chan);
6015 }
6016
6017 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6018 l2cap_send_srej_tail(chan);
6019 } else {
6020 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6021 &chan->conn_state) &&
6022 chan->unacked_frames)
6023 __set_retrans_timer(chan);
6024
6025 l2cap_send_ack(chan);
6026 }
6027 break;
6028 case L2CAP_EV_RECV_RNR:
6029 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6030 l2cap_pass_to_tx(chan, control);
6031 if (control->poll) {
6032 l2cap_send_srej_tail(chan);
6033 } else {
6034 struct l2cap_ctrl rr_control;
6035 memset(&rr_control, 0, sizeof(rr_control));
6036 rr_control.sframe = 1;
6037 rr_control.super = L2CAP_SUPER_RR;
6038 rr_control.reqseq = chan->buffer_seq;
6039 l2cap_send_sframe(chan, &rr_control);
6040 }
6041
6042 break;
6043 case L2CAP_EV_RECV_REJ:
6044 l2cap_handle_rej(chan, control);
6045 break;
6046 case L2CAP_EV_RECV_SREJ:
6047 l2cap_handle_srej(chan, control);
6048 break;
6049 }
6050
6051 if (skb && !skb_in_use) {
6052 BT_DBG("Freeing %p", skb);
6053 kfree_skb(skb);
6054 }
6055
6056 return err;
6057 }
6058
6059 static int l2cap_finish_move(struct l2cap_chan *chan)
6060 {
6061 BT_DBG("chan %p", chan);
6062
6063 chan->rx_state = L2CAP_RX_STATE_RECV;
6064
6065 if (chan->hs_hcon)
6066 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6067 else
6068 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6069
6070 return l2cap_resegment(chan);
6071 }
6072
6073 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6074 struct l2cap_ctrl *control,
6075 struct sk_buff *skb, u8 event)
6076 {
6077 int err;
6078
6079 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6080 event);
6081
6082 if (!control->poll)
6083 return -EPROTO;
6084
6085 l2cap_process_reqseq(chan, control->reqseq);
6086
6087 if (!skb_queue_empty(&chan->tx_q))
6088 chan->tx_send_head = skb_peek(&chan->tx_q);
6089 else
6090 chan->tx_send_head = NULL;
6091
6092 /* Rewind next_tx_seq to the point expected
6093 * by the receiver.
6094 */
6095 chan->next_tx_seq = control->reqseq;
6096 chan->unacked_frames = 0;
6097
6098 err = l2cap_finish_move(chan);
6099 if (err)
6100 return err;
6101
6102 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6103 l2cap_send_i_or_rr_or_rnr(chan);
6104
6105 if (event == L2CAP_EV_RECV_IFRAME)
6106 return -EPROTO;
6107
6108 return l2cap_rx_state_recv(chan, control, NULL, event);
6109 }
6110
6111 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6112 struct l2cap_ctrl *control,
6113 struct sk_buff *skb, u8 event)
6114 {
6115 int err;
6116
6117 if (!control->final)
6118 return -EPROTO;
6119
6120 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6121
6122 chan->rx_state = L2CAP_RX_STATE_RECV;
6123 l2cap_process_reqseq(chan, control->reqseq);
6124
6125 if (!skb_queue_empty(&chan->tx_q))
6126 chan->tx_send_head = skb_peek(&chan->tx_q);
6127 else
6128 chan->tx_send_head = NULL;
6129
6130 /* Rewind next_tx_seq to the point expected
6131 * by the receiver.
6132 */
6133 chan->next_tx_seq = control->reqseq;
6134 chan->unacked_frames = 0;
6135
6136 if (chan->hs_hcon)
6137 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6138 else
6139 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6140
6141 err = l2cap_resegment(chan);
6142
6143 if (!err)
6144 err = l2cap_rx_state_recv(chan, control, skb, event);
6145
6146 return err;
6147 }
6148
6149 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6150 {
6151 /* Make sure reqseq is for a packet that has been sent but not acked */
6152 u16 unacked;
6153
6154 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6155 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6156 }
6157
6158 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6159 struct sk_buff *skb, u8 event)
6160 {
6161 int err = 0;
6162
6163 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6164 control, skb, event, chan->rx_state);
6165
6166 if (__valid_reqseq(chan, control->reqseq)) {
6167 switch (chan->rx_state) {
6168 case L2CAP_RX_STATE_RECV:
6169 err = l2cap_rx_state_recv(chan, control, skb, event);
6170 break;
6171 case L2CAP_RX_STATE_SREJ_SENT:
6172 err = l2cap_rx_state_srej_sent(chan, control, skb,
6173 event);
6174 break;
6175 case L2CAP_RX_STATE_WAIT_P:
6176 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6177 break;
6178 case L2CAP_RX_STATE_WAIT_F:
6179 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6180 break;
6181 default:
6182 /* shut it down */
6183 break;
6184 }
6185 } else {
6186 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6187 control->reqseq, chan->next_tx_seq,
6188 chan->expected_ack_seq);
6189 l2cap_send_disconn_req(chan, ECONNRESET);
6190 }
6191
6192 return err;
6193 }
6194
6195 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6196 struct sk_buff *skb)
6197 {
6198 int err = 0;
6199
6200 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6201 chan->rx_state);
6202
6203 if (l2cap_classify_txseq(chan, control->txseq) ==
6204 L2CAP_TXSEQ_EXPECTED) {
6205 l2cap_pass_to_tx(chan, control);
6206
6207 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6208 __next_seq(chan, chan->buffer_seq));
6209
6210 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6211
6212 l2cap_reassemble_sdu(chan, skb, control);
6213 } else {
6214 if (chan->sdu) {
6215 kfree_skb(chan->sdu);
6216 chan->sdu = NULL;
6217 }
6218 chan->sdu_last_frag = NULL;
6219 chan->sdu_len = 0;
6220
6221 if (skb) {
6222 BT_DBG("Freeing %p", skb);
6223 kfree_skb(skb);
6224 }
6225 }
6226
6227 chan->last_acked_seq = control->txseq;
6228 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6229
6230 return err;
6231 }
6232
6233 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6234 {
6235 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6236 u16 len;
6237 u8 event;
6238
6239 __unpack_control(chan, skb);
6240
6241 len = skb->len;
6242
6243 /*
6244 * We can just drop the corrupted I-frame here.
6245 * Receiver will miss it and start proper recovery
6246 * procedures and ask for retransmission.
6247 */
6248 if (l2cap_check_fcs(chan, skb))
6249 goto drop;
6250
6251 if (!control->sframe && control->sar == L2CAP_SAR_START)
6252 len -= L2CAP_SDULEN_SIZE;
6253
6254 if (chan->fcs == L2CAP_FCS_CRC16)
6255 len -= L2CAP_FCS_SIZE;
6256
6257 if (len > chan->mps) {
6258 l2cap_send_disconn_req(chan, ECONNRESET);
6259 goto drop;
6260 }
6261
6262 if (!control->sframe) {
6263 int err;
6264
6265 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6266 control->sar, control->reqseq, control->final,
6267 control->txseq);
6268
6269 /* Validate F-bit - F=0 always valid, F=1 only
6270 * valid in TX WAIT_F
6271 */
6272 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6273 goto drop;
6274
6275 if (chan->mode != L2CAP_MODE_STREAMING) {
6276 event = L2CAP_EV_RECV_IFRAME;
6277 err = l2cap_rx(chan, control, skb, event);
6278 } else {
6279 err = l2cap_stream_rx(chan, control, skb);
6280 }
6281
6282 if (err)
6283 l2cap_send_disconn_req(chan, ECONNRESET);
6284 } else {
6285 const u8 rx_func_to_event[4] = {
6286 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6287 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6288 };
6289
6290 /* Only I-frames are expected in streaming mode */
6291 if (chan->mode == L2CAP_MODE_STREAMING)
6292 goto drop;
6293
6294 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6295 control->reqseq, control->final, control->poll,
6296 control->super);
6297
6298 if (len != 0) {
6299 BT_ERR("Trailing bytes: %d in sframe", len);
6300 l2cap_send_disconn_req(chan, ECONNRESET);
6301 goto drop;
6302 }
6303
6304 /* Validate F and P bits */
6305 if (control->final && (control->poll ||
6306 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6307 goto drop;
6308
6309 event = rx_func_to_event[control->super];
6310 if (l2cap_rx(chan, control, skb, event))
6311 l2cap_send_disconn_req(chan, ECONNRESET);
6312 }
6313
6314 return 0;
6315
6316 drop:
6317 kfree_skb(skb);
6318 return 0;
6319 }
6320
6321 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6322 struct sk_buff *skb)
6323 {
6324 struct l2cap_chan *chan;
6325
6326 chan = l2cap_get_chan_by_scid(conn, cid);
6327 if (!chan) {
6328 if (cid == L2CAP_CID_A2MP) {
6329 chan = a2mp_channel_create(conn, skb);
6330 if (!chan) {
6331 kfree_skb(skb);
6332 return;
6333 }
6334
6335 l2cap_chan_lock(chan);
6336 } else {
6337 BT_DBG("unknown cid 0x%4.4x", cid);
6338 /* Drop packet and return */
6339 kfree_skb(skb);
6340 return;
6341 }
6342 }
6343
6344 BT_DBG("chan %p, len %d", chan, skb->len);
6345
6346 if (chan->state != BT_CONNECTED)
6347 goto drop;
6348
6349 switch (chan->mode) {
6350 case L2CAP_MODE_BASIC:
6351 /* If socket recv buffers overflows we drop data here
6352 * which is *bad* because L2CAP has to be reliable.
6353 * But we don't have any other choice. L2CAP doesn't
6354 * provide flow control mechanism. */
6355
6356 if (chan->imtu < skb->len)
6357 goto drop;
6358
6359 if (!chan->ops->recv(chan, skb))
6360 goto done;
6361 break;
6362
6363 case L2CAP_MODE_ERTM:
6364 case L2CAP_MODE_STREAMING:
6365 l2cap_data_rcv(chan, skb);
6366 goto done;
6367
6368 default:
6369 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6370 break;
6371 }
6372
6373 drop:
6374 kfree_skb(skb);
6375
6376 done:
6377 l2cap_chan_unlock(chan);
6378 }
6379
6380 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6381 struct sk_buff *skb)
6382 {
6383 struct l2cap_chan *chan;
6384
6385 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6386 if (!chan)
6387 goto drop;
6388
6389 BT_DBG("chan %p, len %d", chan, skb->len);
6390
6391 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6392 goto drop;
6393
6394 if (chan->imtu < skb->len)
6395 goto drop;
6396
6397 if (!chan->ops->recv(chan, skb))
6398 return;
6399
6400 drop:
6401 kfree_skb(skb);
6402 }
6403
6404 static void l2cap_att_channel(struct l2cap_conn *conn,
6405 struct sk_buff *skb)
6406 {
6407 struct l2cap_chan *chan;
6408
6409 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6410 conn->src, conn->dst);
6411 if (!chan)
6412 goto drop;
6413
6414 BT_DBG("chan %p, len %d", chan, skb->len);
6415
6416 if (chan->imtu < skb->len)
6417 goto drop;
6418
6419 if (!chan->ops->recv(chan, skb))
6420 return;
6421
6422 drop:
6423 kfree_skb(skb);
6424 }
6425
6426 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6427 {
6428 struct l2cap_hdr *lh = (void *) skb->data;
6429 u16 cid, len;
6430 __le16 psm;
6431
6432 skb_pull(skb, L2CAP_HDR_SIZE);
6433 cid = __le16_to_cpu(lh->cid);
6434 len = __le16_to_cpu(lh->len);
6435
6436 if (len != skb->len) {
6437 kfree_skb(skb);
6438 return;
6439 }
6440
6441 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6442
6443 switch (cid) {
6444 case L2CAP_CID_LE_SIGNALING:
6445 l2cap_le_sig_channel(conn, skb);
6446 break;
6447 case L2CAP_CID_SIGNALING:
6448 l2cap_sig_channel(conn, skb);
6449 break;
6450
6451 case L2CAP_CID_CONN_LESS:
6452 psm = get_unaligned((__le16 *) skb->data);
6453 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6454 l2cap_conless_channel(conn, psm, skb);
6455 break;
6456
6457 case L2CAP_CID_ATT:
6458 l2cap_att_channel(conn, skb);
6459 break;
6460
6461 case L2CAP_CID_SMP:
6462 if (smp_sig_channel(conn, skb))
6463 l2cap_conn_del(conn->hcon, EACCES);
6464 break;
6465
6466 default:
6467 l2cap_data_channel(conn, cid, skb);
6468 break;
6469 }
6470 }
6471
6472 /* ---- L2CAP interface with lower layer (HCI) ---- */
6473
6474 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6475 {
6476 int exact = 0, lm1 = 0, lm2 = 0;
6477 struct l2cap_chan *c;
6478
6479 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6480
6481 /* Find listening sockets and check their link_mode */
6482 read_lock(&chan_list_lock);
6483 list_for_each_entry(c, &chan_list, global_l) {
6484 struct sock *sk = c->sk;
6485
6486 if (c->state != BT_LISTEN)
6487 continue;
6488
6489 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6490 lm1 |= HCI_LM_ACCEPT;
6491 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6492 lm1 |= HCI_LM_MASTER;
6493 exact++;
6494 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6495 lm2 |= HCI_LM_ACCEPT;
6496 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6497 lm2 |= HCI_LM_MASTER;
6498 }
6499 }
6500 read_unlock(&chan_list_lock);
6501
6502 return exact ? lm1 : lm2;
6503 }
6504
6505 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6506 {
6507 struct l2cap_conn *conn;
6508
6509 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6510
6511 if (!status) {
6512 conn = l2cap_conn_add(hcon);
6513 if (conn)
6514 l2cap_conn_ready(conn);
6515 } else {
6516 l2cap_conn_del(hcon, bt_to_errno(status));
6517 }
6518 }
6519
6520 int l2cap_disconn_ind(struct hci_conn *hcon)
6521 {
6522 struct l2cap_conn *conn = hcon->l2cap_data;
6523
6524 BT_DBG("hcon %p", hcon);
6525
6526 if (!conn)
6527 return HCI_ERROR_REMOTE_USER_TERM;
6528 return conn->disc_reason;
6529 }
6530
6531 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6532 {
6533 BT_DBG("hcon %p reason %d", hcon, reason);
6534
6535 l2cap_conn_del(hcon, bt_to_errno(reason));
6536 }
6537
6538 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6539 {
6540 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6541 return;
6542
6543 if (encrypt == 0x00) {
6544 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6545 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6546 } else if (chan->sec_level == BT_SECURITY_HIGH)
6547 l2cap_chan_close(chan, ECONNREFUSED);
6548 } else {
6549 if (chan->sec_level == BT_SECURITY_MEDIUM)
6550 __clear_chan_timer(chan);
6551 }
6552 }
6553
6554 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6555 {
6556 struct l2cap_conn *conn = hcon->l2cap_data;
6557 struct l2cap_chan *chan;
6558
6559 if (!conn)
6560 return 0;
6561
6562 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6563
6564 if (hcon->type == LE_LINK) {
6565 if (!status && encrypt)
6566 smp_distribute_keys(conn, 0);
6567 cancel_delayed_work(&conn->security_timer);
6568 }
6569
6570 mutex_lock(&conn->chan_lock);
6571
6572 list_for_each_entry(chan, &conn->chan_l, list) {
6573 l2cap_chan_lock(chan);
6574
6575 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6576 state_to_string(chan->state));
6577
6578 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6579 l2cap_chan_unlock(chan);
6580 continue;
6581 }
6582
6583 if (chan->scid == L2CAP_CID_ATT) {
6584 if (!status && encrypt) {
6585 chan->sec_level = hcon->sec_level;
6586 l2cap_chan_ready(chan);
6587 }
6588
6589 l2cap_chan_unlock(chan);
6590 continue;
6591 }
6592
6593 if (!__l2cap_no_conn_pending(chan)) {
6594 l2cap_chan_unlock(chan);
6595 continue;
6596 }
6597
6598 if (!status && (chan->state == BT_CONNECTED ||
6599 chan->state == BT_CONFIG)) {
6600 struct sock *sk = chan->sk;
6601
6602 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6603 sk->sk_state_change(sk);
6604
6605 l2cap_check_encryption(chan, encrypt);
6606 l2cap_chan_unlock(chan);
6607 continue;
6608 }
6609
6610 if (chan->state == BT_CONNECT) {
6611 if (!status) {
6612 l2cap_start_connection(chan);
6613 } else {
6614 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6615 }
6616 } else if (chan->state == BT_CONNECT2) {
6617 struct sock *sk = chan->sk;
6618 struct l2cap_conn_rsp rsp;
6619 __u16 res, stat;
6620
6621 lock_sock(sk);
6622
6623 if (!status) {
6624 if (test_bit(BT_SK_DEFER_SETUP,
6625 &bt_sk(sk)->flags)) {
6626 res = L2CAP_CR_PEND;
6627 stat = L2CAP_CS_AUTHOR_PEND;
6628 chan->ops->defer(chan);
6629 } else {
6630 __l2cap_state_change(chan, BT_CONFIG);
6631 res = L2CAP_CR_SUCCESS;
6632 stat = L2CAP_CS_NO_INFO;
6633 }
6634 } else {
6635 __l2cap_state_change(chan, BT_DISCONN);
6636 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6637 res = L2CAP_CR_SEC_BLOCK;
6638 stat = L2CAP_CS_NO_INFO;
6639 }
6640
6641 release_sock(sk);
6642
6643 rsp.scid = cpu_to_le16(chan->dcid);
6644 rsp.dcid = cpu_to_le16(chan->scid);
6645 rsp.result = cpu_to_le16(res);
6646 rsp.status = cpu_to_le16(stat);
6647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6648 sizeof(rsp), &rsp);
6649
6650 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6651 res == L2CAP_CR_SUCCESS) {
6652 char buf[128];
6653 set_bit(CONF_REQ_SENT, &chan->conf_state);
6654 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6655 L2CAP_CONF_REQ,
6656 l2cap_build_conf_req(chan, buf),
6657 buf);
6658 chan->num_conf_req++;
6659 }
6660 }
6661
6662 l2cap_chan_unlock(chan);
6663 }
6664
6665 mutex_unlock(&conn->chan_lock);
6666
6667 return 0;
6668 }
6669
6670 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6671 {
6672 struct l2cap_conn *conn = hcon->l2cap_data;
6673 struct l2cap_hdr *hdr;
6674 int len;
6675
6676 /* For AMP controller do not create l2cap conn */
6677 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6678 goto drop;
6679
6680 if (!conn)
6681 conn = l2cap_conn_add(hcon);
6682
6683 if (!conn)
6684 goto drop;
6685
6686 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6687
6688 switch (flags) {
6689 case ACL_START:
6690 case ACL_START_NO_FLUSH:
6691 case ACL_COMPLETE:
6692 if (conn->rx_len) {
6693 BT_ERR("Unexpected start frame (len %d)", skb->len);
6694 kfree_skb(conn->rx_skb);
6695 conn->rx_skb = NULL;
6696 conn->rx_len = 0;
6697 l2cap_conn_unreliable(conn, ECOMM);
6698 }
6699
6700 /* Start fragment always begin with Basic L2CAP header */
6701 if (skb->len < L2CAP_HDR_SIZE) {
6702 BT_ERR("Frame is too short (len %d)", skb->len);
6703 l2cap_conn_unreliable(conn, ECOMM);
6704 goto drop;
6705 }
6706
6707 hdr = (struct l2cap_hdr *) skb->data;
6708 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6709
6710 if (len == skb->len) {
6711 /* Complete frame received */
6712 l2cap_recv_frame(conn, skb);
6713 return 0;
6714 }
6715
6716 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6717
6718 if (skb->len > len) {
6719 BT_ERR("Frame is too long (len %d, expected len %d)",
6720 skb->len, len);
6721 l2cap_conn_unreliable(conn, ECOMM);
6722 goto drop;
6723 }
6724
6725 /* Allocate skb for the complete frame (with header) */
6726 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6727 if (!conn->rx_skb)
6728 goto drop;
6729
6730 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6731 skb->len);
6732 conn->rx_len = len - skb->len;
6733 break;
6734
6735 case ACL_CONT:
6736 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6737
6738 if (!conn->rx_len) {
6739 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6740 l2cap_conn_unreliable(conn, ECOMM);
6741 goto drop;
6742 }
6743
6744 if (skb->len > conn->rx_len) {
6745 BT_ERR("Fragment is too long (len %d, expected %d)",
6746 skb->len, conn->rx_len);
6747 kfree_skb(conn->rx_skb);
6748 conn->rx_skb = NULL;
6749 conn->rx_len = 0;
6750 l2cap_conn_unreliable(conn, ECOMM);
6751 goto drop;
6752 }
6753
6754 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6755 skb->len);
6756 conn->rx_len -= skb->len;
6757
6758 if (!conn->rx_len) {
6759 /* Complete frame received */
6760 l2cap_recv_frame(conn, conn->rx_skb);
6761 conn->rx_skb = NULL;
6762 }
6763 break;
6764 }
6765
6766 drop:
6767 kfree_skb(skb);
6768 return 0;
6769 }
6770
6771 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6772 {
6773 struct l2cap_chan *c;
6774
6775 read_lock(&chan_list_lock);
6776
6777 list_for_each_entry(c, &chan_list, global_l) {
6778 struct sock *sk = c->sk;
6779
6780 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6781 &bt_sk(sk)->src, &bt_sk(sk)->dst,
6782 c->state, __le16_to_cpu(c->psm),
6783 c->scid, c->dcid, c->imtu, c->omtu,
6784 c->sec_level, c->mode);
6785 }
6786
6787 read_unlock(&chan_list_lock);
6788
6789 return 0;
6790 }
6791
6792 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6793 {
6794 return single_open(file, l2cap_debugfs_show, inode->i_private);
6795 }
6796
6797 static const struct file_operations l2cap_debugfs_fops = {
6798 .open = l2cap_debugfs_open,
6799 .read = seq_read,
6800 .llseek = seq_lseek,
6801 .release = single_release,
6802 };
6803
6804 static struct dentry *l2cap_debugfs;
6805
6806 int __init l2cap_init(void)
6807 {
6808 int err;
6809
6810 err = l2cap_init_sockets();
6811 if (err < 0)
6812 return err;
6813
6814 if (bt_debugfs) {
6815 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6816 NULL, &l2cap_debugfs_fops);
6817 if (!l2cap_debugfs)
6818 BT_ERR("Failed to create L2CAP debug file");
6819 }
6820
6821 return 0;
6822 }
6823
6824 void l2cap_exit(void)
6825 {
6826 debugfs_remove(l2cap_debugfs);
6827 l2cap_cleanup_sockets();
6828 }
6829
6830 module_param(disable_ertm, bool, 0644);
6831 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");