]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/l2cap_core.c
Bluetooth: Use connection address for reporting connection failures
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 #include "6lowpan.h"
44
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46
47 bool disable_ertm;
48
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 void *data);
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 struct sk_buff_head *skbs, u8 event);
67
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 {
70 if (hcon->type == LE_LINK) {
71 if (type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
73 else
74 return BDADDR_LE_RANDOM;
75 }
76
77 return BDADDR_BREDR;
78 }
79
80 /* ---- L2CAP channels ---- */
81
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
83 u16 cid)
84 {
85 struct l2cap_chan *c;
86
87 list_for_each_entry(c, &conn->chan_l, list) {
88 if (c->dcid == cid)
89 return c;
90 }
91 return NULL;
92 }
93
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 u16 cid)
96 {
97 struct l2cap_chan *c;
98
99 list_for_each_entry(c, &conn->chan_l, list) {
100 if (c->scid == cid)
101 return c;
102 }
103 return NULL;
104 }
105
106 /* Find channel with given SCID.
107 * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 u16 cid)
110 {
111 struct l2cap_chan *c;
112
113 mutex_lock(&conn->chan_lock);
114 c = __l2cap_get_chan_by_scid(conn, cid);
115 if (c)
116 l2cap_chan_lock(c);
117 mutex_unlock(&conn->chan_lock);
118
119 return c;
120 }
121
122 /* Find channel with given DCID.
123 * Returns locked channel.
124 */
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126 u16 cid)
127 {
128 struct l2cap_chan *c;
129
130 mutex_lock(&conn->chan_lock);
131 c = __l2cap_get_chan_by_dcid(conn, cid);
132 if (c)
133 l2cap_chan_lock(c);
134 mutex_unlock(&conn->chan_lock);
135
136 return c;
137 }
138
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140 u8 ident)
141 {
142 struct l2cap_chan *c;
143
144 list_for_each_entry(c, &conn->chan_l, list) {
145 if (c->ident == ident)
146 return c;
147 }
148 return NULL;
149 }
150
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152 u8 ident)
153 {
154 struct l2cap_chan *c;
155
156 mutex_lock(&conn->chan_lock);
157 c = __l2cap_get_chan_by_ident(conn, ident);
158 if (c)
159 l2cap_chan_lock(c);
160 mutex_unlock(&conn->chan_lock);
161
162 return c;
163 }
164
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 {
167 struct l2cap_chan *c;
168
169 list_for_each_entry(c, &chan_list, global_l) {
170 if (c->sport == psm && !bacmp(&c->src, src))
171 return c;
172 }
173 return NULL;
174 }
175
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
177 {
178 int err;
179
180 write_lock(&chan_list_lock);
181
182 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 err = -EADDRINUSE;
184 goto done;
185 }
186
187 if (psm) {
188 chan->psm = psm;
189 chan->sport = psm;
190 err = 0;
191 } else {
192 u16 p;
193
194 err = -EINVAL;
195 for (p = 0x1001; p < 0x1100; p += 2)
196 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 chan->psm = cpu_to_le16(p);
198 chan->sport = cpu_to_le16(p);
199 err = 0;
200 break;
201 }
202 }
203
204 done:
205 write_unlock(&chan_list_lock);
206 return err;
207 }
208
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 {
211 write_lock(&chan_list_lock);
212
213 chan->scid = scid;
214
215 write_unlock(&chan_list_lock);
216
217 return 0;
218 }
219
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222 u16 cid, dyn_end;
223
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
226 else
227 dyn_end = L2CAP_CID_DYN_END;
228
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
231 return cid;
232 }
233
234 return 0;
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
241
242 chan->state = state;
243 chan->ops->state_change(chan, state, 0);
244 }
245
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 int state, int err)
248 {
249 chan->state = state;
250 chan->ops->state_change(chan, chan->state, err);
251 }
252
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255 chan->ops->state_change(chan, chan->state, err);
256 }
257
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
264 }
265 }
266
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
273 }
274 }
275
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 u16 seq)
278 {
279 struct sk_buff *skb;
280
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
283 return skb;
284 }
285
286 return NULL;
287 }
288
289 /* ---- L2CAP sequence number lists ---- */
290
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
297 * allocs or frees.
298 */
299
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302 size_t alloc_size, i;
303
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
307 */
308 alloc_size = roundup_pow_of_two(size);
309
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 if (!seq_list->list)
312 return -ENOMEM;
313
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319
320 return 0;
321 }
322
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325 kfree(seq_list->list);
326 }
327
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 u16 seq)
330 {
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
339
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 }
347
348 return seq;
349 }
350
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353 u16 i;
354
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 return;
357
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367 u16 mask = seq_list->mask;
368
369 /* All appends happen in constant time */
370
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 return;
373
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
376 else
377 seq_list->list[seq_list->tail & mask] = seq;
378
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 chan_timer.work);
387 struct l2cap_conn *conn = chan->conn;
388 int reason;
389
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
394
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
400 else
401 reason = ETIMEDOUT;
402
403 l2cap_chan_close(chan, reason);
404
405 l2cap_chan_unlock(chan);
406
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
409
410 l2cap_chan_put(chan);
411 }
412
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415 struct l2cap_chan *chan;
416
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 if (!chan)
419 return NULL;
420
421 mutex_init(&chan->lock);
422
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
426
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428
429 chan->state = BT_OPEN;
430
431 kref_init(&chan->kref);
432
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435
436 BT_DBG("chan %p", chan);
437
438 return chan;
439 }
440
441 static void l2cap_chan_destroy(struct kref *kref)
442 {
443 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
444
445 BT_DBG("chan %p", chan);
446
447 write_lock(&chan_list_lock);
448 list_del(&chan->global_l);
449 write_unlock(&chan_list_lock);
450
451 kfree(chan);
452 }
453
454 void l2cap_chan_hold(struct l2cap_chan *c)
455 {
456 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
457
458 kref_get(&c->kref);
459 }
460
461 void l2cap_chan_put(struct l2cap_chan *c)
462 {
463 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
464
465 kref_put(&c->kref, l2cap_chan_destroy);
466 }
467
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469 {
470 chan->fcs = L2CAP_FCS_CRC16;
471 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->sec_level = BT_SECURITY_LOW;
476
477 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
478 }
479
480 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
481 {
482 chan->sdu = NULL;
483 chan->sdu_last_frag = NULL;
484 chan->sdu_len = 0;
485 chan->tx_credits = 0;
486 chan->rx_credits = le_max_credits;
487 chan->mps = min_t(u16, chan->imtu, le_default_mps);
488
489 skb_queue_head_init(&chan->tx_q);
490 }
491
492 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
493 {
494 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
495 __le16_to_cpu(chan->psm), chan->dcid);
496
497 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
498
499 chan->conn = conn;
500
501 switch (chan->chan_type) {
502 case L2CAP_CHAN_CONN_ORIENTED:
503 /* Alloc CID for connection-oriented socket */
504 chan->scid = l2cap_alloc_cid(conn);
505 if (conn->hcon->type == ACL_LINK)
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 break;
508
509 case L2CAP_CHAN_CONN_LESS:
510 /* Connectionless socket */
511 chan->scid = L2CAP_CID_CONN_LESS;
512 chan->dcid = L2CAP_CID_CONN_LESS;
513 chan->omtu = L2CAP_DEFAULT_MTU;
514 break;
515
516 case L2CAP_CHAN_FIXED:
517 /* Caller will set CID and CID specific MTU values */
518 break;
519
520 default:
521 /* Raw socket can send/recv signalling messages only */
522 chan->scid = L2CAP_CID_SIGNALING;
523 chan->dcid = L2CAP_CID_SIGNALING;
524 chan->omtu = L2CAP_DEFAULT_MTU;
525 }
526
527 chan->local_id = L2CAP_BESTEFFORT_ID;
528 chan->local_stype = L2CAP_SERV_BESTEFFORT;
529 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
530 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
531 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
532 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
533
534 l2cap_chan_hold(chan);
535
536 hci_conn_hold(conn->hcon);
537
538 list_add(&chan->list, &conn->chan_l);
539 }
540
541 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
542 {
543 mutex_lock(&conn->chan_lock);
544 __l2cap_chan_add(conn, chan);
545 mutex_unlock(&conn->chan_lock);
546 }
547
548 void l2cap_chan_del(struct l2cap_chan *chan, int err)
549 {
550 struct l2cap_conn *conn = chan->conn;
551
552 __clear_chan_timer(chan);
553
554 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
555
556 if (conn) {
557 struct amp_mgr *mgr = conn->hcon->amp_mgr;
558 /* Delete from channel list */
559 list_del(&chan->list);
560
561 l2cap_chan_put(chan);
562
563 chan->conn = NULL;
564
565 if (chan->scid != L2CAP_CID_A2MP)
566 hci_conn_drop(conn->hcon);
567
568 if (mgr && mgr->bredr_chan == chan)
569 mgr->bredr_chan = NULL;
570 }
571
572 if (chan->hs_hchan) {
573 struct hci_chan *hs_hchan = chan->hs_hchan;
574
575 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
576 amp_disconnect_logical_link(hs_hchan);
577 }
578
579 chan->ops->teardown(chan, err);
580
581 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
582 return;
583
584 switch(chan->mode) {
585 case L2CAP_MODE_BASIC:
586 break;
587
588 case L2CAP_MODE_LE_FLOWCTL:
589 skb_queue_purge(&chan->tx_q);
590 break;
591
592 case L2CAP_MODE_ERTM:
593 __clear_retrans_timer(chan);
594 __clear_monitor_timer(chan);
595 __clear_ack_timer(chan);
596
597 skb_queue_purge(&chan->srej_q);
598
599 l2cap_seq_list_free(&chan->srej_list);
600 l2cap_seq_list_free(&chan->retrans_list);
601
602 /* fall through */
603
604 case L2CAP_MODE_STREAMING:
605 skb_queue_purge(&chan->tx_q);
606 break;
607 }
608
609 return;
610 }
611
612 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
613 {
614 struct l2cap_conn *conn = hcon->l2cap_data;
615 struct l2cap_chan *chan;
616
617 mutex_lock(&conn->chan_lock);
618
619 list_for_each_entry(chan, &conn->chan_l, list) {
620 l2cap_chan_lock(chan);
621 bacpy(&chan->dst, &hcon->dst);
622 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
623 l2cap_chan_unlock(chan);
624 }
625
626 mutex_unlock(&conn->chan_lock);
627 }
628
629 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
630 {
631 struct l2cap_conn *conn = chan->conn;
632 struct l2cap_le_conn_rsp rsp;
633 u16 result;
634
635 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
636 result = L2CAP_CR_AUTHORIZATION;
637 else
638 result = L2CAP_CR_BAD_PSM;
639
640 l2cap_state_change(chan, BT_DISCONN);
641
642 rsp.dcid = cpu_to_le16(chan->scid);
643 rsp.mtu = cpu_to_le16(chan->imtu);
644 rsp.mps = cpu_to_le16(chan->mps);
645 rsp.credits = cpu_to_le16(chan->rx_credits);
646 rsp.result = cpu_to_le16(result);
647
648 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
649 &rsp);
650 }
651
652 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
653 {
654 struct l2cap_conn *conn = chan->conn;
655 struct l2cap_conn_rsp rsp;
656 u16 result;
657
658 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
659 result = L2CAP_CR_SEC_BLOCK;
660 else
661 result = L2CAP_CR_BAD_PSM;
662
663 l2cap_state_change(chan, BT_DISCONN);
664
665 rsp.scid = cpu_to_le16(chan->dcid);
666 rsp.dcid = cpu_to_le16(chan->scid);
667 rsp.result = cpu_to_le16(result);
668 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
669
670 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
671 }
672
673 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
674 {
675 struct l2cap_conn *conn = chan->conn;
676
677 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
678
679 switch (chan->state) {
680 case BT_LISTEN:
681 chan->ops->teardown(chan, 0);
682 break;
683
684 case BT_CONNECTED:
685 case BT_CONFIG:
686 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
687 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
688 l2cap_send_disconn_req(chan, reason);
689 } else
690 l2cap_chan_del(chan, reason);
691 break;
692
693 case BT_CONNECT2:
694 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
695 if (conn->hcon->type == ACL_LINK)
696 l2cap_chan_connect_reject(chan);
697 else if (conn->hcon->type == LE_LINK)
698 l2cap_chan_le_connect_reject(chan);
699 }
700
701 l2cap_chan_del(chan, reason);
702 break;
703
704 case BT_CONNECT:
705 case BT_DISCONN:
706 l2cap_chan_del(chan, reason);
707 break;
708
709 default:
710 chan->ops->teardown(chan, 0);
711 break;
712 }
713 }
714
715 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
716 {
717 switch (chan->chan_type) {
718 case L2CAP_CHAN_RAW:
719 switch (chan->sec_level) {
720 case BT_SECURITY_HIGH:
721 case BT_SECURITY_FIPS:
722 return HCI_AT_DEDICATED_BONDING_MITM;
723 case BT_SECURITY_MEDIUM:
724 return HCI_AT_DEDICATED_BONDING;
725 default:
726 return HCI_AT_NO_BONDING;
727 }
728 break;
729 case L2CAP_CHAN_CONN_LESS:
730 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
731 if (chan->sec_level == BT_SECURITY_LOW)
732 chan->sec_level = BT_SECURITY_SDP;
733 }
734 if (chan->sec_level == BT_SECURITY_HIGH ||
735 chan->sec_level == BT_SECURITY_FIPS)
736 return HCI_AT_NO_BONDING_MITM;
737 else
738 return HCI_AT_NO_BONDING;
739 break;
740 case L2CAP_CHAN_CONN_ORIENTED:
741 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
744
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
748 else
749 return HCI_AT_NO_BONDING;
750 }
751 /* fall through */
752 default:
753 switch (chan->sec_level) {
754 case BT_SECURITY_HIGH:
755 case BT_SECURITY_FIPS:
756 return HCI_AT_GENERAL_BONDING_MITM;
757 case BT_SECURITY_MEDIUM:
758 return HCI_AT_GENERAL_BONDING;
759 default:
760 return HCI_AT_NO_BONDING;
761 }
762 break;
763 }
764 }
765
766 /* Service level security */
767 int l2cap_chan_check_security(struct l2cap_chan *chan)
768 {
769 struct l2cap_conn *conn = chan->conn;
770 __u8 auth_type;
771
772 if (conn->hcon->type == LE_LINK)
773 return smp_conn_security(conn->hcon, chan->sec_level);
774
775 auth_type = l2cap_get_auth_type(chan);
776
777 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
778 }
779
780 static u8 l2cap_get_ident(struct l2cap_conn *conn)
781 {
782 u8 id;
783
784 /* Get next available identificator.
785 * 1 - 128 are used by kernel.
786 * 129 - 199 are reserved.
787 * 200 - 254 are used by utilities like l2ping, etc.
788 */
789
790 spin_lock(&conn->lock);
791
792 if (++conn->tx_ident > 128)
793 conn->tx_ident = 1;
794
795 id = conn->tx_ident;
796
797 spin_unlock(&conn->lock);
798
799 return id;
800 }
801
802 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
803 void *data)
804 {
805 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
806 u8 flags;
807
808 BT_DBG("code 0x%2.2x", code);
809
810 if (!skb)
811 return;
812
813 if (lmp_no_flush_capable(conn->hcon->hdev))
814 flags = ACL_START_NO_FLUSH;
815 else
816 flags = ACL_START;
817
818 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
819 skb->priority = HCI_PRIO_MAX;
820
821 hci_send_acl(conn->hchan, skb, flags);
822 }
823
824 static bool __chan_is_moving(struct l2cap_chan *chan)
825 {
826 return chan->move_state != L2CAP_MOVE_STABLE &&
827 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
828 }
829
830 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
831 {
832 struct hci_conn *hcon = chan->conn->hcon;
833 u16 flags;
834
835 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
836 skb->priority);
837
838 if (chan->hs_hcon && !__chan_is_moving(chan)) {
839 if (chan->hs_hchan)
840 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
841 else
842 kfree_skb(skb);
843
844 return;
845 }
846
847 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
848 lmp_no_flush_capable(hcon->hdev))
849 flags = ACL_START_NO_FLUSH;
850 else
851 flags = ACL_START;
852
853 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
854 hci_send_acl(chan->conn->hchan, skb, flags);
855 }
856
857 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
858 {
859 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
860 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
861
862 if (enh & L2CAP_CTRL_FRAME_TYPE) {
863 /* S-Frame */
864 control->sframe = 1;
865 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
866 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
867
868 control->sar = 0;
869 control->txseq = 0;
870 } else {
871 /* I-Frame */
872 control->sframe = 0;
873 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
874 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
875
876 control->poll = 0;
877 control->super = 0;
878 }
879 }
880
881 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
882 {
883 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
884 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
885
886 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
887 /* S-Frame */
888 control->sframe = 1;
889 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
890 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
891
892 control->sar = 0;
893 control->txseq = 0;
894 } else {
895 /* I-Frame */
896 control->sframe = 0;
897 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
898 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
899
900 control->poll = 0;
901 control->super = 0;
902 }
903 }
904
905 static inline void __unpack_control(struct l2cap_chan *chan,
906 struct sk_buff *skb)
907 {
908 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
909 __unpack_extended_control(get_unaligned_le32(skb->data),
910 &bt_cb(skb)->control);
911 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
912 } else {
913 __unpack_enhanced_control(get_unaligned_le16(skb->data),
914 &bt_cb(skb)->control);
915 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
916 }
917 }
918
919 static u32 __pack_extended_control(struct l2cap_ctrl *control)
920 {
921 u32 packed;
922
923 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
924 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
925
926 if (control->sframe) {
927 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
928 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
929 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
930 } else {
931 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
932 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
933 }
934
935 return packed;
936 }
937
938 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
939 {
940 u16 packed;
941
942 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
943 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
944
945 if (control->sframe) {
946 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
947 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
948 packed |= L2CAP_CTRL_FRAME_TYPE;
949 } else {
950 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
951 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
952 }
953
954 return packed;
955 }
956
957 static inline void __pack_control(struct l2cap_chan *chan,
958 struct l2cap_ctrl *control,
959 struct sk_buff *skb)
960 {
961 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
962 put_unaligned_le32(__pack_extended_control(control),
963 skb->data + L2CAP_HDR_SIZE);
964 } else {
965 put_unaligned_le16(__pack_enhanced_control(control),
966 skb->data + L2CAP_HDR_SIZE);
967 }
968 }
969
970 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
971 {
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
973 return L2CAP_EXT_HDR_SIZE;
974 else
975 return L2CAP_ENH_HDR_SIZE;
976 }
977
978 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
979 u32 control)
980 {
981 struct sk_buff *skb;
982 struct l2cap_hdr *lh;
983 int hlen = __ertm_hdr_size(chan);
984
985 if (chan->fcs == L2CAP_FCS_CRC16)
986 hlen += L2CAP_FCS_SIZE;
987
988 skb = bt_skb_alloc(hlen, GFP_KERNEL);
989
990 if (!skb)
991 return ERR_PTR(-ENOMEM);
992
993 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
994 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
995 lh->cid = cpu_to_le16(chan->dcid);
996
997 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
998 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
999 else
1000 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1001
1002 if (chan->fcs == L2CAP_FCS_CRC16) {
1003 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1004 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1005 }
1006
1007 skb->priority = HCI_PRIO_MAX;
1008 return skb;
1009 }
1010
1011 static void l2cap_send_sframe(struct l2cap_chan *chan,
1012 struct l2cap_ctrl *control)
1013 {
1014 struct sk_buff *skb;
1015 u32 control_field;
1016
1017 BT_DBG("chan %p, control %p", chan, control);
1018
1019 if (!control->sframe)
1020 return;
1021
1022 if (__chan_is_moving(chan))
1023 return;
1024
1025 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1026 !control->poll)
1027 control->final = 1;
1028
1029 if (control->super == L2CAP_SUPER_RR)
1030 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1031 else if (control->super == L2CAP_SUPER_RNR)
1032 set_bit(CONN_RNR_SENT, &chan->conn_state);
1033
1034 if (control->super != L2CAP_SUPER_SREJ) {
1035 chan->last_acked_seq = control->reqseq;
1036 __clear_ack_timer(chan);
1037 }
1038
1039 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1040 control->final, control->poll, control->super);
1041
1042 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1043 control_field = __pack_extended_control(control);
1044 else
1045 control_field = __pack_enhanced_control(control);
1046
1047 skb = l2cap_create_sframe_pdu(chan, control_field);
1048 if (!IS_ERR(skb))
1049 l2cap_do_send(chan, skb);
1050 }
1051
1052 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1053 {
1054 struct l2cap_ctrl control;
1055
1056 BT_DBG("chan %p, poll %d", chan, poll);
1057
1058 memset(&control, 0, sizeof(control));
1059 control.sframe = 1;
1060 control.poll = poll;
1061
1062 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1063 control.super = L2CAP_SUPER_RNR;
1064 else
1065 control.super = L2CAP_SUPER_RR;
1066
1067 control.reqseq = chan->buffer_seq;
1068 l2cap_send_sframe(chan, &control);
1069 }
1070
1071 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1072 {
1073 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1074 }
1075
1076 static bool __amp_capable(struct l2cap_chan *chan)
1077 {
1078 struct l2cap_conn *conn = chan->conn;
1079 struct hci_dev *hdev;
1080 bool amp_available = false;
1081
1082 if (!conn->hs_enabled)
1083 return false;
1084
1085 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1086 return false;
1087
1088 read_lock(&hci_dev_list_lock);
1089 list_for_each_entry(hdev, &hci_dev_list, list) {
1090 if (hdev->amp_type != AMP_TYPE_BREDR &&
1091 test_bit(HCI_UP, &hdev->flags)) {
1092 amp_available = true;
1093 break;
1094 }
1095 }
1096 read_unlock(&hci_dev_list_lock);
1097
1098 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1099 return amp_available;
1100
1101 return false;
1102 }
1103
1104 static bool l2cap_check_efs(struct l2cap_chan *chan)
1105 {
1106 /* Check EFS parameters */
1107 return true;
1108 }
1109
1110 void l2cap_send_conn_req(struct l2cap_chan *chan)
1111 {
1112 struct l2cap_conn *conn = chan->conn;
1113 struct l2cap_conn_req req;
1114
1115 req.scid = cpu_to_le16(chan->scid);
1116 req.psm = chan->psm;
1117
1118 chan->ident = l2cap_get_ident(conn);
1119
1120 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1121
1122 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1123 }
1124
1125 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1126 {
1127 struct l2cap_create_chan_req req;
1128 req.scid = cpu_to_le16(chan->scid);
1129 req.psm = chan->psm;
1130 req.amp_id = amp_id;
1131
1132 chan->ident = l2cap_get_ident(chan->conn);
1133
1134 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1135 sizeof(req), &req);
1136 }
1137
1138 static void l2cap_move_setup(struct l2cap_chan *chan)
1139 {
1140 struct sk_buff *skb;
1141
1142 BT_DBG("chan %p", chan);
1143
1144 if (chan->mode != L2CAP_MODE_ERTM)
1145 return;
1146
1147 __clear_retrans_timer(chan);
1148 __clear_monitor_timer(chan);
1149 __clear_ack_timer(chan);
1150
1151 chan->retry_count = 0;
1152 skb_queue_walk(&chan->tx_q, skb) {
1153 if (bt_cb(skb)->control.retries)
1154 bt_cb(skb)->control.retries = 1;
1155 else
1156 break;
1157 }
1158
1159 chan->expected_tx_seq = chan->buffer_seq;
1160
1161 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1162 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1163 l2cap_seq_list_clear(&chan->retrans_list);
1164 l2cap_seq_list_clear(&chan->srej_list);
1165 skb_queue_purge(&chan->srej_q);
1166
1167 chan->tx_state = L2CAP_TX_STATE_XMIT;
1168 chan->rx_state = L2CAP_RX_STATE_MOVE;
1169
1170 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1171 }
1172
1173 static void l2cap_move_done(struct l2cap_chan *chan)
1174 {
1175 u8 move_role = chan->move_role;
1176 BT_DBG("chan %p", chan);
1177
1178 chan->move_state = L2CAP_MOVE_STABLE;
1179 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1180
1181 if (chan->mode != L2CAP_MODE_ERTM)
1182 return;
1183
1184 switch (move_role) {
1185 case L2CAP_MOVE_ROLE_INITIATOR:
1186 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1187 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1188 break;
1189 case L2CAP_MOVE_ROLE_RESPONDER:
1190 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1191 break;
1192 }
1193 }
1194
1195 static void l2cap_chan_ready(struct l2cap_chan *chan)
1196 {
1197 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1198 chan->conf_state = 0;
1199 __clear_chan_timer(chan);
1200
1201 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1202 chan->ops->suspend(chan);
1203
1204 chan->state = BT_CONNECTED;
1205
1206 chan->ops->ready(chan);
1207 }
1208
1209 static void l2cap_le_connect(struct l2cap_chan *chan)
1210 {
1211 struct l2cap_conn *conn = chan->conn;
1212 struct l2cap_le_conn_req req;
1213
1214 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1215 return;
1216
1217 req.psm = chan->psm;
1218 req.scid = cpu_to_le16(chan->scid);
1219 req.mtu = cpu_to_le16(chan->imtu);
1220 req.mps = cpu_to_le16(chan->mps);
1221 req.credits = cpu_to_le16(chan->rx_credits);
1222
1223 chan->ident = l2cap_get_ident(conn);
1224
1225 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1226 sizeof(req), &req);
1227 }
1228
1229 static void l2cap_le_start(struct l2cap_chan *chan)
1230 {
1231 struct l2cap_conn *conn = chan->conn;
1232
1233 if (!smp_conn_security(conn->hcon, chan->sec_level))
1234 return;
1235
1236 if (!chan->psm) {
1237 l2cap_chan_ready(chan);
1238 return;
1239 }
1240
1241 if (chan->state == BT_CONNECT)
1242 l2cap_le_connect(chan);
1243 }
1244
1245 static void l2cap_start_connection(struct l2cap_chan *chan)
1246 {
1247 if (__amp_capable(chan)) {
1248 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1249 a2mp_discover_amp(chan);
1250 } else if (chan->conn->hcon->type == LE_LINK) {
1251 l2cap_le_start(chan);
1252 } else {
1253 l2cap_send_conn_req(chan);
1254 }
1255 }
1256
1257 static void l2cap_do_start(struct l2cap_chan *chan)
1258 {
1259 struct l2cap_conn *conn = chan->conn;
1260
1261 if (conn->hcon->type == LE_LINK) {
1262 l2cap_le_start(chan);
1263 return;
1264 }
1265
1266 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1267 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1268 return;
1269
1270 if (l2cap_chan_check_security(chan) &&
1271 __l2cap_no_conn_pending(chan)) {
1272 l2cap_start_connection(chan);
1273 }
1274 } else {
1275 struct l2cap_info_req req;
1276 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1277
1278 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1279 conn->info_ident = l2cap_get_ident(conn);
1280
1281 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1282
1283 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1284 sizeof(req), &req);
1285 }
1286 }
1287
1288 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1289 {
1290 u32 local_feat_mask = l2cap_feat_mask;
1291 if (!disable_ertm)
1292 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1293
1294 switch (mode) {
1295 case L2CAP_MODE_ERTM:
1296 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1297 case L2CAP_MODE_STREAMING:
1298 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1299 default:
1300 return 0x00;
1301 }
1302 }
1303
1304 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1305 {
1306 struct l2cap_conn *conn = chan->conn;
1307 struct l2cap_disconn_req req;
1308
1309 if (!conn)
1310 return;
1311
1312 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1313 __clear_retrans_timer(chan);
1314 __clear_monitor_timer(chan);
1315 __clear_ack_timer(chan);
1316 }
1317
1318 if (chan->scid == L2CAP_CID_A2MP) {
1319 l2cap_state_change(chan, BT_DISCONN);
1320 return;
1321 }
1322
1323 req.dcid = cpu_to_le16(chan->dcid);
1324 req.scid = cpu_to_le16(chan->scid);
1325 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1326 sizeof(req), &req);
1327
1328 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1329 }
1330
1331 /* ---- L2CAP connections ---- */
1332 static void l2cap_conn_start(struct l2cap_conn *conn)
1333 {
1334 struct l2cap_chan *chan, *tmp;
1335
1336 BT_DBG("conn %p", conn);
1337
1338 mutex_lock(&conn->chan_lock);
1339
1340 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1341 l2cap_chan_lock(chan);
1342
1343 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1344 l2cap_chan_unlock(chan);
1345 continue;
1346 }
1347
1348 if (chan->state == BT_CONNECT) {
1349 if (!l2cap_chan_check_security(chan) ||
1350 !__l2cap_no_conn_pending(chan)) {
1351 l2cap_chan_unlock(chan);
1352 continue;
1353 }
1354
1355 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1356 && test_bit(CONF_STATE2_DEVICE,
1357 &chan->conf_state)) {
1358 l2cap_chan_close(chan, ECONNRESET);
1359 l2cap_chan_unlock(chan);
1360 continue;
1361 }
1362
1363 l2cap_start_connection(chan);
1364
1365 } else if (chan->state == BT_CONNECT2) {
1366 struct l2cap_conn_rsp rsp;
1367 char buf[128];
1368 rsp.scid = cpu_to_le16(chan->dcid);
1369 rsp.dcid = cpu_to_le16(chan->scid);
1370
1371 if (l2cap_chan_check_security(chan)) {
1372 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1373 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1374 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1375 chan->ops->defer(chan);
1376
1377 } else {
1378 l2cap_state_change(chan, BT_CONFIG);
1379 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1380 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1381 }
1382 } else {
1383 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1384 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1385 }
1386
1387 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1388 sizeof(rsp), &rsp);
1389
1390 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1391 rsp.result != L2CAP_CR_SUCCESS) {
1392 l2cap_chan_unlock(chan);
1393 continue;
1394 }
1395
1396 set_bit(CONF_REQ_SENT, &chan->conf_state);
1397 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1398 l2cap_build_conf_req(chan, buf), buf);
1399 chan->num_conf_req++;
1400 }
1401
1402 l2cap_chan_unlock(chan);
1403 }
1404
1405 mutex_unlock(&conn->chan_lock);
1406 }
1407
1408 /* Find socket with cid and source/destination bdaddr.
1409 * Returns closest match, locked.
1410 */
1411 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1412 bdaddr_t *src,
1413 bdaddr_t *dst)
1414 {
1415 struct l2cap_chan *c, *c1 = NULL;
1416
1417 read_lock(&chan_list_lock);
1418
1419 list_for_each_entry(c, &chan_list, global_l) {
1420 if (state && c->state != state)
1421 continue;
1422
1423 if (c->scid == cid) {
1424 int src_match, dst_match;
1425 int src_any, dst_any;
1426
1427 /* Exact match. */
1428 src_match = !bacmp(&c->src, src);
1429 dst_match = !bacmp(&c->dst, dst);
1430 if (src_match && dst_match) {
1431 read_unlock(&chan_list_lock);
1432 return c;
1433 }
1434
1435 /* Closest match */
1436 src_any = !bacmp(&c->src, BDADDR_ANY);
1437 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1438 if ((src_match && dst_any) || (src_any && dst_match) ||
1439 (src_any && dst_any))
1440 c1 = c;
1441 }
1442 }
1443
1444 read_unlock(&chan_list_lock);
1445
1446 return c1;
1447 }
1448
1449 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1450 {
1451 struct hci_conn *hcon = conn->hcon;
1452 struct l2cap_chan *chan, *pchan;
1453 u8 dst_type;
1454
1455 BT_DBG("");
1456
1457 bt_6lowpan_add_conn(conn);
1458
1459 /* Check if we have socket listening on cid */
1460 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1461 &hcon->src, &hcon->dst);
1462 if (!pchan)
1463 return;
1464
1465 /* Client ATT sockets should override the server one */
1466 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1467 return;
1468
1469 dst_type = bdaddr_type(hcon, hcon->dst_type);
1470
1471 /* If device is blocked, do not create a channel for it */
1472 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1473 return;
1474
1475 l2cap_chan_lock(pchan);
1476
1477 chan = pchan->ops->new_connection(pchan);
1478 if (!chan)
1479 goto clean;
1480
1481 bacpy(&chan->src, &hcon->src);
1482 bacpy(&chan->dst, &hcon->dst);
1483 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1484 chan->dst_type = dst_type;
1485
1486 __l2cap_chan_add(conn, chan);
1487
1488 clean:
1489 l2cap_chan_unlock(pchan);
1490 }
1491
1492 static void l2cap_conn_ready(struct l2cap_conn *conn)
1493 {
1494 struct l2cap_chan *chan;
1495 struct hci_conn *hcon = conn->hcon;
1496
1497 BT_DBG("conn %p", conn);
1498
1499 /* For outgoing pairing which doesn't necessarily have an
1500 * associated socket (e.g. mgmt_pair_device).
1501 */
1502 if (hcon->out && hcon->type == LE_LINK)
1503 smp_conn_security(hcon, hcon->pending_sec_level);
1504
1505 mutex_lock(&conn->chan_lock);
1506
1507 if (hcon->type == LE_LINK)
1508 l2cap_le_conn_ready(conn);
1509
1510 list_for_each_entry(chan, &conn->chan_l, list) {
1511
1512 l2cap_chan_lock(chan);
1513
1514 if (chan->scid == L2CAP_CID_A2MP) {
1515 l2cap_chan_unlock(chan);
1516 continue;
1517 }
1518
1519 if (hcon->type == LE_LINK) {
1520 l2cap_le_start(chan);
1521 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1522 l2cap_chan_ready(chan);
1523
1524 } else if (chan->state == BT_CONNECT) {
1525 l2cap_do_start(chan);
1526 }
1527
1528 l2cap_chan_unlock(chan);
1529 }
1530
1531 mutex_unlock(&conn->chan_lock);
1532
1533 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1534 }
1535
1536 /* Notify sockets that we cannot guaranty reliability anymore */
1537 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1538 {
1539 struct l2cap_chan *chan;
1540
1541 BT_DBG("conn %p", conn);
1542
1543 mutex_lock(&conn->chan_lock);
1544
1545 list_for_each_entry(chan, &conn->chan_l, list) {
1546 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1547 l2cap_chan_set_err(chan, err);
1548 }
1549
1550 mutex_unlock(&conn->chan_lock);
1551 }
1552
1553 static void l2cap_info_timeout(struct work_struct *work)
1554 {
1555 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1556 info_timer.work);
1557
1558 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1559 conn->info_ident = 0;
1560
1561 l2cap_conn_start(conn);
1562 }
1563
1564 /*
1565 * l2cap_user
1566 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1567 * callback is called during registration. The ->remove callback is called
1568 * during unregistration.
1569 * An l2cap_user object can either be explicitly unregistered or when the
1570 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1571 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1572 * External modules must own a reference to the l2cap_conn object if they intend
1573 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1574 * any time if they don't.
1575 */
1576
1577 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1578 {
1579 struct hci_dev *hdev = conn->hcon->hdev;
1580 int ret;
1581
1582 /* We need to check whether l2cap_conn is registered. If it is not, we
1583 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1584 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1585 * relies on the parent hci_conn object to be locked. This itself relies
1586 * on the hci_dev object to be locked. So we must lock the hci device
1587 * here, too. */
1588
1589 hci_dev_lock(hdev);
1590
1591 if (user->list.next || user->list.prev) {
1592 ret = -EINVAL;
1593 goto out_unlock;
1594 }
1595
1596 /* conn->hchan is NULL after l2cap_conn_del() was called */
1597 if (!conn->hchan) {
1598 ret = -ENODEV;
1599 goto out_unlock;
1600 }
1601
1602 ret = user->probe(conn, user);
1603 if (ret)
1604 goto out_unlock;
1605
1606 list_add(&user->list, &conn->users);
1607 ret = 0;
1608
1609 out_unlock:
1610 hci_dev_unlock(hdev);
1611 return ret;
1612 }
1613 EXPORT_SYMBOL(l2cap_register_user);
1614
1615 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1616 {
1617 struct hci_dev *hdev = conn->hcon->hdev;
1618
1619 hci_dev_lock(hdev);
1620
1621 if (!user->list.next || !user->list.prev)
1622 goto out_unlock;
1623
1624 list_del(&user->list);
1625 user->list.next = NULL;
1626 user->list.prev = NULL;
1627 user->remove(conn, user);
1628
1629 out_unlock:
1630 hci_dev_unlock(hdev);
1631 }
1632 EXPORT_SYMBOL(l2cap_unregister_user);
1633
1634 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1635 {
1636 struct l2cap_user *user;
1637
1638 while (!list_empty(&conn->users)) {
1639 user = list_first_entry(&conn->users, struct l2cap_user, list);
1640 list_del(&user->list);
1641 user->list.next = NULL;
1642 user->list.prev = NULL;
1643 user->remove(conn, user);
1644 }
1645 }
1646
1647 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1648 {
1649 struct l2cap_conn *conn = hcon->l2cap_data;
1650 struct l2cap_chan *chan, *l;
1651
1652 if (!conn)
1653 return;
1654
1655 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1656
1657 kfree_skb(conn->rx_skb);
1658
1659 skb_queue_purge(&conn->pending_rx);
1660 flush_work(&conn->pending_rx_work);
1661
1662 l2cap_unregister_all_users(conn);
1663
1664 mutex_lock(&conn->chan_lock);
1665
1666 /* Kill channels */
1667 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1668 l2cap_chan_hold(chan);
1669 l2cap_chan_lock(chan);
1670
1671 l2cap_chan_del(chan, err);
1672
1673 l2cap_chan_unlock(chan);
1674
1675 chan->ops->close(chan);
1676 l2cap_chan_put(chan);
1677 }
1678
1679 mutex_unlock(&conn->chan_lock);
1680
1681 hci_chan_del(conn->hchan);
1682
1683 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1684 cancel_delayed_work_sync(&conn->info_timer);
1685
1686 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1687 cancel_delayed_work_sync(&conn->security_timer);
1688 smp_chan_destroy(conn);
1689 }
1690
1691 hcon->l2cap_data = NULL;
1692 conn->hchan = NULL;
1693 l2cap_conn_put(conn);
1694 }
1695
1696 static void security_timeout(struct work_struct *work)
1697 {
1698 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1699 security_timer.work);
1700
1701 BT_DBG("conn %p", conn);
1702
1703 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1704 smp_chan_destroy(conn);
1705 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1706 }
1707 }
1708
1709 static void l2cap_conn_free(struct kref *ref)
1710 {
1711 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1712
1713 hci_conn_put(conn->hcon);
1714 kfree(conn);
1715 }
1716
1717 void l2cap_conn_get(struct l2cap_conn *conn)
1718 {
1719 kref_get(&conn->ref);
1720 }
1721 EXPORT_SYMBOL(l2cap_conn_get);
1722
1723 void l2cap_conn_put(struct l2cap_conn *conn)
1724 {
1725 kref_put(&conn->ref, l2cap_conn_free);
1726 }
1727 EXPORT_SYMBOL(l2cap_conn_put);
1728
1729 /* ---- Socket interface ---- */
1730
1731 /* Find socket with psm and source / destination bdaddr.
1732 * Returns closest match.
1733 */
1734 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1735 bdaddr_t *src,
1736 bdaddr_t *dst,
1737 u8 link_type)
1738 {
1739 struct l2cap_chan *c, *c1 = NULL;
1740
1741 read_lock(&chan_list_lock);
1742
1743 list_for_each_entry(c, &chan_list, global_l) {
1744 if (state && c->state != state)
1745 continue;
1746
1747 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1748 continue;
1749
1750 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1751 continue;
1752
1753 if (c->psm == psm) {
1754 int src_match, dst_match;
1755 int src_any, dst_any;
1756
1757 /* Exact match. */
1758 src_match = !bacmp(&c->src, src);
1759 dst_match = !bacmp(&c->dst, dst);
1760 if (src_match && dst_match) {
1761 read_unlock(&chan_list_lock);
1762 return c;
1763 }
1764
1765 /* Closest match */
1766 src_any = !bacmp(&c->src, BDADDR_ANY);
1767 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1768 if ((src_match && dst_any) || (src_any && dst_match) ||
1769 (src_any && dst_any))
1770 c1 = c;
1771 }
1772 }
1773
1774 read_unlock(&chan_list_lock);
1775
1776 return c1;
1777 }
1778
1779 static void l2cap_monitor_timeout(struct work_struct *work)
1780 {
1781 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1782 monitor_timer.work);
1783
1784 BT_DBG("chan %p", chan);
1785
1786 l2cap_chan_lock(chan);
1787
1788 if (!chan->conn) {
1789 l2cap_chan_unlock(chan);
1790 l2cap_chan_put(chan);
1791 return;
1792 }
1793
1794 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1795
1796 l2cap_chan_unlock(chan);
1797 l2cap_chan_put(chan);
1798 }
1799
1800 static void l2cap_retrans_timeout(struct work_struct *work)
1801 {
1802 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1803 retrans_timer.work);
1804
1805 BT_DBG("chan %p", chan);
1806
1807 l2cap_chan_lock(chan);
1808
1809 if (!chan->conn) {
1810 l2cap_chan_unlock(chan);
1811 l2cap_chan_put(chan);
1812 return;
1813 }
1814
1815 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1816 l2cap_chan_unlock(chan);
1817 l2cap_chan_put(chan);
1818 }
1819
1820 static void l2cap_streaming_send(struct l2cap_chan *chan,
1821 struct sk_buff_head *skbs)
1822 {
1823 struct sk_buff *skb;
1824 struct l2cap_ctrl *control;
1825
1826 BT_DBG("chan %p, skbs %p", chan, skbs);
1827
1828 if (__chan_is_moving(chan))
1829 return;
1830
1831 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1832
1833 while (!skb_queue_empty(&chan->tx_q)) {
1834
1835 skb = skb_dequeue(&chan->tx_q);
1836
1837 bt_cb(skb)->control.retries = 1;
1838 control = &bt_cb(skb)->control;
1839
1840 control->reqseq = 0;
1841 control->txseq = chan->next_tx_seq;
1842
1843 __pack_control(chan, control, skb);
1844
1845 if (chan->fcs == L2CAP_FCS_CRC16) {
1846 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1847 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1848 }
1849
1850 l2cap_do_send(chan, skb);
1851
1852 BT_DBG("Sent txseq %u", control->txseq);
1853
1854 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1855 chan->frames_sent++;
1856 }
1857 }
1858
1859 static int l2cap_ertm_send(struct l2cap_chan *chan)
1860 {
1861 struct sk_buff *skb, *tx_skb;
1862 struct l2cap_ctrl *control;
1863 int sent = 0;
1864
1865 BT_DBG("chan %p", chan);
1866
1867 if (chan->state != BT_CONNECTED)
1868 return -ENOTCONN;
1869
1870 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1871 return 0;
1872
1873 if (__chan_is_moving(chan))
1874 return 0;
1875
1876 while (chan->tx_send_head &&
1877 chan->unacked_frames < chan->remote_tx_win &&
1878 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1879
1880 skb = chan->tx_send_head;
1881
1882 bt_cb(skb)->control.retries = 1;
1883 control = &bt_cb(skb)->control;
1884
1885 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1886 control->final = 1;
1887
1888 control->reqseq = chan->buffer_seq;
1889 chan->last_acked_seq = chan->buffer_seq;
1890 control->txseq = chan->next_tx_seq;
1891
1892 __pack_control(chan, control, skb);
1893
1894 if (chan->fcs == L2CAP_FCS_CRC16) {
1895 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1897 }
1898
1899 /* Clone after data has been modified. Data is assumed to be
1900 read-only (for locking purposes) on cloned sk_buffs.
1901 */
1902 tx_skb = skb_clone(skb, GFP_KERNEL);
1903
1904 if (!tx_skb)
1905 break;
1906
1907 __set_retrans_timer(chan);
1908
1909 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1910 chan->unacked_frames++;
1911 chan->frames_sent++;
1912 sent++;
1913
1914 if (skb_queue_is_last(&chan->tx_q, skb))
1915 chan->tx_send_head = NULL;
1916 else
1917 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1918
1919 l2cap_do_send(chan, tx_skb);
1920 BT_DBG("Sent txseq %u", control->txseq);
1921 }
1922
1923 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1924 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1925
1926 return sent;
1927 }
1928
1929 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1930 {
1931 struct l2cap_ctrl control;
1932 struct sk_buff *skb;
1933 struct sk_buff *tx_skb;
1934 u16 seq;
1935
1936 BT_DBG("chan %p", chan);
1937
1938 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1939 return;
1940
1941 if (__chan_is_moving(chan))
1942 return;
1943
1944 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1945 seq = l2cap_seq_list_pop(&chan->retrans_list);
1946
1947 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1948 if (!skb) {
1949 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1950 seq);
1951 continue;
1952 }
1953
1954 bt_cb(skb)->control.retries++;
1955 control = bt_cb(skb)->control;
1956
1957 if (chan->max_tx != 0 &&
1958 bt_cb(skb)->control.retries > chan->max_tx) {
1959 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1960 l2cap_send_disconn_req(chan, ECONNRESET);
1961 l2cap_seq_list_clear(&chan->retrans_list);
1962 break;
1963 }
1964
1965 control.reqseq = chan->buffer_seq;
1966 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1967 control.final = 1;
1968 else
1969 control.final = 0;
1970
1971 if (skb_cloned(skb)) {
1972 /* Cloned sk_buffs are read-only, so we need a
1973 * writeable copy
1974 */
1975 tx_skb = skb_copy(skb, GFP_KERNEL);
1976 } else {
1977 tx_skb = skb_clone(skb, GFP_KERNEL);
1978 }
1979
1980 if (!tx_skb) {
1981 l2cap_seq_list_clear(&chan->retrans_list);
1982 break;
1983 }
1984
1985 /* Update skb contents */
1986 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1987 put_unaligned_le32(__pack_extended_control(&control),
1988 tx_skb->data + L2CAP_HDR_SIZE);
1989 } else {
1990 put_unaligned_le16(__pack_enhanced_control(&control),
1991 tx_skb->data + L2CAP_HDR_SIZE);
1992 }
1993
1994 if (chan->fcs == L2CAP_FCS_CRC16) {
1995 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1996 put_unaligned_le16(fcs, skb_put(tx_skb,
1997 L2CAP_FCS_SIZE));
1998 }
1999
2000 l2cap_do_send(chan, tx_skb);
2001
2002 BT_DBG("Resent txseq %d", control.txseq);
2003
2004 chan->last_acked_seq = chan->buffer_seq;
2005 }
2006 }
2007
2008 static void l2cap_retransmit(struct l2cap_chan *chan,
2009 struct l2cap_ctrl *control)
2010 {
2011 BT_DBG("chan %p, control %p", chan, control);
2012
2013 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2014 l2cap_ertm_resend(chan);
2015 }
2016
2017 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2018 struct l2cap_ctrl *control)
2019 {
2020 struct sk_buff *skb;
2021
2022 BT_DBG("chan %p, control %p", chan, control);
2023
2024 if (control->poll)
2025 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2026
2027 l2cap_seq_list_clear(&chan->retrans_list);
2028
2029 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2030 return;
2031
2032 if (chan->unacked_frames) {
2033 skb_queue_walk(&chan->tx_q, skb) {
2034 if (bt_cb(skb)->control.txseq == control->reqseq ||
2035 skb == chan->tx_send_head)
2036 break;
2037 }
2038
2039 skb_queue_walk_from(&chan->tx_q, skb) {
2040 if (skb == chan->tx_send_head)
2041 break;
2042
2043 l2cap_seq_list_append(&chan->retrans_list,
2044 bt_cb(skb)->control.txseq);
2045 }
2046
2047 l2cap_ertm_resend(chan);
2048 }
2049 }
2050
2051 static void l2cap_send_ack(struct l2cap_chan *chan)
2052 {
2053 struct l2cap_ctrl control;
2054 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2055 chan->last_acked_seq);
2056 int threshold;
2057
2058 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2059 chan, chan->last_acked_seq, chan->buffer_seq);
2060
2061 memset(&control, 0, sizeof(control));
2062 control.sframe = 1;
2063
2064 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2065 chan->rx_state == L2CAP_RX_STATE_RECV) {
2066 __clear_ack_timer(chan);
2067 control.super = L2CAP_SUPER_RNR;
2068 control.reqseq = chan->buffer_seq;
2069 l2cap_send_sframe(chan, &control);
2070 } else {
2071 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2072 l2cap_ertm_send(chan);
2073 /* If any i-frames were sent, they included an ack */
2074 if (chan->buffer_seq == chan->last_acked_seq)
2075 frames_to_ack = 0;
2076 }
2077
2078 /* Ack now if the window is 3/4ths full.
2079 * Calculate without mul or div
2080 */
2081 threshold = chan->ack_win;
2082 threshold += threshold << 1;
2083 threshold >>= 2;
2084
2085 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2086 threshold);
2087
2088 if (frames_to_ack >= threshold) {
2089 __clear_ack_timer(chan);
2090 control.super = L2CAP_SUPER_RR;
2091 control.reqseq = chan->buffer_seq;
2092 l2cap_send_sframe(chan, &control);
2093 frames_to_ack = 0;
2094 }
2095
2096 if (frames_to_ack)
2097 __set_ack_timer(chan);
2098 }
2099 }
2100
2101 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2102 struct msghdr *msg, int len,
2103 int count, struct sk_buff *skb)
2104 {
2105 struct l2cap_conn *conn = chan->conn;
2106 struct sk_buff **frag;
2107 int sent = 0;
2108
2109 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2110 return -EFAULT;
2111
2112 sent += count;
2113 len -= count;
2114
2115 /* Continuation fragments (no L2CAP header) */
2116 frag = &skb_shinfo(skb)->frag_list;
2117 while (len) {
2118 struct sk_buff *tmp;
2119
2120 count = min_t(unsigned int, conn->mtu, len);
2121
2122 tmp = chan->ops->alloc_skb(chan, count,
2123 msg->msg_flags & MSG_DONTWAIT);
2124 if (IS_ERR(tmp))
2125 return PTR_ERR(tmp);
2126
2127 *frag = tmp;
2128
2129 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2130 return -EFAULT;
2131
2132 (*frag)->priority = skb->priority;
2133
2134 sent += count;
2135 len -= count;
2136
2137 skb->len += (*frag)->len;
2138 skb->data_len += (*frag)->len;
2139
2140 frag = &(*frag)->next;
2141 }
2142
2143 return sent;
2144 }
2145
2146 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2147 struct msghdr *msg, size_t len,
2148 u32 priority)
2149 {
2150 struct l2cap_conn *conn = chan->conn;
2151 struct sk_buff *skb;
2152 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2153 struct l2cap_hdr *lh;
2154
2155 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2156 __le16_to_cpu(chan->psm), len, priority);
2157
2158 count = min_t(unsigned int, (conn->mtu - hlen), len);
2159
2160 skb = chan->ops->alloc_skb(chan, count + hlen,
2161 msg->msg_flags & MSG_DONTWAIT);
2162 if (IS_ERR(skb))
2163 return skb;
2164
2165 skb->priority = priority;
2166
2167 /* Create L2CAP header */
2168 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2169 lh->cid = cpu_to_le16(chan->dcid);
2170 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2171 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2172
2173 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2174 if (unlikely(err < 0)) {
2175 kfree_skb(skb);
2176 return ERR_PTR(err);
2177 }
2178 return skb;
2179 }
2180
2181 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2182 struct msghdr *msg, size_t len,
2183 u32 priority)
2184 {
2185 struct l2cap_conn *conn = chan->conn;
2186 struct sk_buff *skb;
2187 int err, count;
2188 struct l2cap_hdr *lh;
2189
2190 BT_DBG("chan %p len %zu", chan, len);
2191
2192 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2193
2194 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2195 msg->msg_flags & MSG_DONTWAIT);
2196 if (IS_ERR(skb))
2197 return skb;
2198
2199 skb->priority = priority;
2200
2201 /* Create L2CAP header */
2202 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2203 lh->cid = cpu_to_le16(chan->dcid);
2204 lh->len = cpu_to_le16(len);
2205
2206 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2207 if (unlikely(err < 0)) {
2208 kfree_skb(skb);
2209 return ERR_PTR(err);
2210 }
2211 return skb;
2212 }
2213
2214 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2215 struct msghdr *msg, size_t len,
2216 u16 sdulen)
2217 {
2218 struct l2cap_conn *conn = chan->conn;
2219 struct sk_buff *skb;
2220 int err, count, hlen;
2221 struct l2cap_hdr *lh;
2222
2223 BT_DBG("chan %p len %zu", chan, len);
2224
2225 if (!conn)
2226 return ERR_PTR(-ENOTCONN);
2227
2228 hlen = __ertm_hdr_size(chan);
2229
2230 if (sdulen)
2231 hlen += L2CAP_SDULEN_SIZE;
2232
2233 if (chan->fcs == L2CAP_FCS_CRC16)
2234 hlen += L2CAP_FCS_SIZE;
2235
2236 count = min_t(unsigned int, (conn->mtu - hlen), len);
2237
2238 skb = chan->ops->alloc_skb(chan, count + hlen,
2239 msg->msg_flags & MSG_DONTWAIT);
2240 if (IS_ERR(skb))
2241 return skb;
2242
2243 /* Create L2CAP header */
2244 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2245 lh->cid = cpu_to_le16(chan->dcid);
2246 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2247
2248 /* Control header is populated later */
2249 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2250 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2251 else
2252 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2253
2254 if (sdulen)
2255 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2256
2257 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2258 if (unlikely(err < 0)) {
2259 kfree_skb(skb);
2260 return ERR_PTR(err);
2261 }
2262
2263 bt_cb(skb)->control.fcs = chan->fcs;
2264 bt_cb(skb)->control.retries = 0;
2265 return skb;
2266 }
2267
2268 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2269 struct sk_buff_head *seg_queue,
2270 struct msghdr *msg, size_t len)
2271 {
2272 struct sk_buff *skb;
2273 u16 sdu_len;
2274 size_t pdu_len;
2275 u8 sar;
2276
2277 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2278
2279 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2280 * so fragmented skbs are not used. The HCI layer's handling
2281 * of fragmented skbs is not compatible with ERTM's queueing.
2282 */
2283
2284 /* PDU size is derived from the HCI MTU */
2285 pdu_len = chan->conn->mtu;
2286
2287 /* Constrain PDU size for BR/EDR connections */
2288 if (!chan->hs_hcon)
2289 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2290
2291 /* Adjust for largest possible L2CAP overhead. */
2292 if (chan->fcs)
2293 pdu_len -= L2CAP_FCS_SIZE;
2294
2295 pdu_len -= __ertm_hdr_size(chan);
2296
2297 /* Remote device may have requested smaller PDUs */
2298 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2299
2300 if (len <= pdu_len) {
2301 sar = L2CAP_SAR_UNSEGMENTED;
2302 sdu_len = 0;
2303 pdu_len = len;
2304 } else {
2305 sar = L2CAP_SAR_START;
2306 sdu_len = len;
2307 pdu_len -= L2CAP_SDULEN_SIZE;
2308 }
2309
2310 while (len > 0) {
2311 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2312
2313 if (IS_ERR(skb)) {
2314 __skb_queue_purge(seg_queue);
2315 return PTR_ERR(skb);
2316 }
2317
2318 bt_cb(skb)->control.sar = sar;
2319 __skb_queue_tail(seg_queue, skb);
2320
2321 len -= pdu_len;
2322 if (sdu_len) {
2323 sdu_len = 0;
2324 pdu_len += L2CAP_SDULEN_SIZE;
2325 }
2326
2327 if (len <= pdu_len) {
2328 sar = L2CAP_SAR_END;
2329 pdu_len = len;
2330 } else {
2331 sar = L2CAP_SAR_CONTINUE;
2332 }
2333 }
2334
2335 return 0;
2336 }
2337
2338 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2339 struct msghdr *msg,
2340 size_t len, u16 sdulen)
2341 {
2342 struct l2cap_conn *conn = chan->conn;
2343 struct sk_buff *skb;
2344 int err, count, hlen;
2345 struct l2cap_hdr *lh;
2346
2347 BT_DBG("chan %p len %zu", chan, len);
2348
2349 if (!conn)
2350 return ERR_PTR(-ENOTCONN);
2351
2352 hlen = L2CAP_HDR_SIZE;
2353
2354 if (sdulen)
2355 hlen += L2CAP_SDULEN_SIZE;
2356
2357 count = min_t(unsigned int, (conn->mtu - hlen), len);
2358
2359 skb = chan->ops->alloc_skb(chan, count + hlen,
2360 msg->msg_flags & MSG_DONTWAIT);
2361 if (IS_ERR(skb))
2362 return skb;
2363
2364 /* Create L2CAP header */
2365 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2366 lh->cid = cpu_to_le16(chan->dcid);
2367 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2368
2369 if (sdulen)
2370 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2371
2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373 if (unlikely(err < 0)) {
2374 kfree_skb(skb);
2375 return ERR_PTR(err);
2376 }
2377
2378 return skb;
2379 }
2380
2381 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2384 {
2385 struct sk_buff *skb;
2386 size_t pdu_len;
2387 u16 sdu_len;
2388
2389 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2390
2391 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2392
2393 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2394
2395 sdu_len = len;
2396 pdu_len -= L2CAP_SDULEN_SIZE;
2397
2398 while (len > 0) {
2399 if (len <= pdu_len)
2400 pdu_len = len;
2401
2402 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2403 if (IS_ERR(skb)) {
2404 __skb_queue_purge(seg_queue);
2405 return PTR_ERR(skb);
2406 }
2407
2408 __skb_queue_tail(seg_queue, skb);
2409
2410 len -= pdu_len;
2411
2412 if (sdu_len) {
2413 sdu_len = 0;
2414 pdu_len += L2CAP_SDULEN_SIZE;
2415 }
2416 }
2417
2418 return 0;
2419 }
2420
2421 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2422 u32 priority)
2423 {
2424 struct sk_buff *skb;
2425 int err;
2426 struct sk_buff_head seg_queue;
2427
2428 if (!chan->conn)
2429 return -ENOTCONN;
2430
2431 /* Connectionless channel */
2432 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2433 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2434 if (IS_ERR(skb))
2435 return PTR_ERR(skb);
2436
2437 l2cap_do_send(chan, skb);
2438 return len;
2439 }
2440
2441 switch (chan->mode) {
2442 case L2CAP_MODE_LE_FLOWCTL:
2443 /* Check outgoing MTU */
2444 if (len > chan->omtu)
2445 return -EMSGSIZE;
2446
2447 if (!chan->tx_credits)
2448 return -EAGAIN;
2449
2450 __skb_queue_head_init(&seg_queue);
2451
2452 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2453
2454 if (chan->state != BT_CONNECTED) {
2455 __skb_queue_purge(&seg_queue);
2456 err = -ENOTCONN;
2457 }
2458
2459 if (err)
2460 return err;
2461
2462 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2463
2464 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2465 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2466 chan->tx_credits--;
2467 }
2468
2469 if (!chan->tx_credits)
2470 chan->ops->suspend(chan);
2471
2472 err = len;
2473
2474 break;
2475
2476 case L2CAP_MODE_BASIC:
2477 /* Check outgoing MTU */
2478 if (len > chan->omtu)
2479 return -EMSGSIZE;
2480
2481 /* Create a basic PDU */
2482 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2483 if (IS_ERR(skb))
2484 return PTR_ERR(skb);
2485
2486 l2cap_do_send(chan, skb);
2487 err = len;
2488 break;
2489
2490 case L2CAP_MODE_ERTM:
2491 case L2CAP_MODE_STREAMING:
2492 /* Check outgoing MTU */
2493 if (len > chan->omtu) {
2494 err = -EMSGSIZE;
2495 break;
2496 }
2497
2498 __skb_queue_head_init(&seg_queue);
2499
2500 /* Do segmentation before calling in to the state machine,
2501 * since it's possible to block while waiting for memory
2502 * allocation.
2503 */
2504 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2505
2506 /* The channel could have been closed while segmenting,
2507 * check that it is still connected.
2508 */
2509 if (chan->state != BT_CONNECTED) {
2510 __skb_queue_purge(&seg_queue);
2511 err = -ENOTCONN;
2512 }
2513
2514 if (err)
2515 break;
2516
2517 if (chan->mode == L2CAP_MODE_ERTM)
2518 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2519 else
2520 l2cap_streaming_send(chan, &seg_queue);
2521
2522 err = len;
2523
2524 /* If the skbs were not queued for sending, they'll still be in
2525 * seg_queue and need to be purged.
2526 */
2527 __skb_queue_purge(&seg_queue);
2528 break;
2529
2530 default:
2531 BT_DBG("bad state %1.1x", chan->mode);
2532 err = -EBADFD;
2533 }
2534
2535 return err;
2536 }
2537
2538 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2539 {
2540 struct l2cap_ctrl control;
2541 u16 seq;
2542
2543 BT_DBG("chan %p, txseq %u", chan, txseq);
2544
2545 memset(&control, 0, sizeof(control));
2546 control.sframe = 1;
2547 control.super = L2CAP_SUPER_SREJ;
2548
2549 for (seq = chan->expected_tx_seq; seq != txseq;
2550 seq = __next_seq(chan, seq)) {
2551 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2552 control.reqseq = seq;
2553 l2cap_send_sframe(chan, &control);
2554 l2cap_seq_list_append(&chan->srej_list, seq);
2555 }
2556 }
2557
2558 chan->expected_tx_seq = __next_seq(chan, txseq);
2559 }
2560
2561 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2562 {
2563 struct l2cap_ctrl control;
2564
2565 BT_DBG("chan %p", chan);
2566
2567 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2568 return;
2569
2570 memset(&control, 0, sizeof(control));
2571 control.sframe = 1;
2572 control.super = L2CAP_SUPER_SREJ;
2573 control.reqseq = chan->srej_list.tail;
2574 l2cap_send_sframe(chan, &control);
2575 }
2576
2577 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2578 {
2579 struct l2cap_ctrl control;
2580 u16 initial_head;
2581 u16 seq;
2582
2583 BT_DBG("chan %p, txseq %u", chan, txseq);
2584
2585 memset(&control, 0, sizeof(control));
2586 control.sframe = 1;
2587 control.super = L2CAP_SUPER_SREJ;
2588
2589 /* Capture initial list head to allow only one pass through the list. */
2590 initial_head = chan->srej_list.head;
2591
2592 do {
2593 seq = l2cap_seq_list_pop(&chan->srej_list);
2594 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2595 break;
2596
2597 control.reqseq = seq;
2598 l2cap_send_sframe(chan, &control);
2599 l2cap_seq_list_append(&chan->srej_list, seq);
2600 } while (chan->srej_list.head != initial_head);
2601 }
2602
2603 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2604 {
2605 struct sk_buff *acked_skb;
2606 u16 ackseq;
2607
2608 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2609
2610 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2611 return;
2612
2613 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2614 chan->expected_ack_seq, chan->unacked_frames);
2615
2616 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2617 ackseq = __next_seq(chan, ackseq)) {
2618
2619 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2620 if (acked_skb) {
2621 skb_unlink(acked_skb, &chan->tx_q);
2622 kfree_skb(acked_skb);
2623 chan->unacked_frames--;
2624 }
2625 }
2626
2627 chan->expected_ack_seq = reqseq;
2628
2629 if (chan->unacked_frames == 0)
2630 __clear_retrans_timer(chan);
2631
2632 BT_DBG("unacked_frames %u", chan->unacked_frames);
2633 }
2634
2635 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2636 {
2637 BT_DBG("chan %p", chan);
2638
2639 chan->expected_tx_seq = chan->buffer_seq;
2640 l2cap_seq_list_clear(&chan->srej_list);
2641 skb_queue_purge(&chan->srej_q);
2642 chan->rx_state = L2CAP_RX_STATE_RECV;
2643 }
2644
2645 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2646 struct l2cap_ctrl *control,
2647 struct sk_buff_head *skbs, u8 event)
2648 {
2649 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2650 event);
2651
2652 switch (event) {
2653 case L2CAP_EV_DATA_REQUEST:
2654 if (chan->tx_send_head == NULL)
2655 chan->tx_send_head = skb_peek(skbs);
2656
2657 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2658 l2cap_ertm_send(chan);
2659 break;
2660 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2661 BT_DBG("Enter LOCAL_BUSY");
2662 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663
2664 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2665 /* The SREJ_SENT state must be aborted if we are to
2666 * enter the LOCAL_BUSY state.
2667 */
2668 l2cap_abort_rx_srej_sent(chan);
2669 }
2670
2671 l2cap_send_ack(chan);
2672
2673 break;
2674 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2675 BT_DBG("Exit LOCAL_BUSY");
2676 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2677
2678 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2679 struct l2cap_ctrl local_control;
2680
2681 memset(&local_control, 0, sizeof(local_control));
2682 local_control.sframe = 1;
2683 local_control.super = L2CAP_SUPER_RR;
2684 local_control.poll = 1;
2685 local_control.reqseq = chan->buffer_seq;
2686 l2cap_send_sframe(chan, &local_control);
2687
2688 chan->retry_count = 1;
2689 __set_monitor_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 }
2692 break;
2693 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2694 l2cap_process_reqseq(chan, control->reqseq);
2695 break;
2696 case L2CAP_EV_EXPLICIT_POLL:
2697 l2cap_send_rr_or_rnr(chan, 1);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 __clear_ack_timer(chan);
2701 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2702 break;
2703 case L2CAP_EV_RETRANS_TO:
2704 l2cap_send_rr_or_rnr(chan, 1);
2705 chan->retry_count = 1;
2706 __set_monitor_timer(chan);
2707 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2708 break;
2709 case L2CAP_EV_RECV_FBIT:
2710 /* Nothing to process */
2711 break;
2712 default:
2713 break;
2714 }
2715 }
2716
2717 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2718 struct l2cap_ctrl *control,
2719 struct sk_buff_head *skbs, u8 event)
2720 {
2721 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2722 event);
2723
2724 switch (event) {
2725 case L2CAP_EV_DATA_REQUEST:
2726 if (chan->tx_send_head == NULL)
2727 chan->tx_send_head = skb_peek(skbs);
2728 /* Queue data, but don't send. */
2729 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2730 break;
2731 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2732 BT_DBG("Enter LOCAL_BUSY");
2733 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734
2735 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2736 /* The SREJ_SENT state must be aborted if we are to
2737 * enter the LOCAL_BUSY state.
2738 */
2739 l2cap_abort_rx_srej_sent(chan);
2740 }
2741
2742 l2cap_send_ack(chan);
2743
2744 break;
2745 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2746 BT_DBG("Exit LOCAL_BUSY");
2747 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748
2749 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2750 struct l2cap_ctrl local_control;
2751 memset(&local_control, 0, sizeof(local_control));
2752 local_control.sframe = 1;
2753 local_control.super = L2CAP_SUPER_RR;
2754 local_control.poll = 1;
2755 local_control.reqseq = chan->buffer_seq;
2756 l2cap_send_sframe(chan, &local_control);
2757
2758 chan->retry_count = 1;
2759 __set_monitor_timer(chan);
2760 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2761 }
2762 break;
2763 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2764 l2cap_process_reqseq(chan, control->reqseq);
2765
2766 /* Fall through */
2767
2768 case L2CAP_EV_RECV_FBIT:
2769 if (control && control->final) {
2770 __clear_monitor_timer(chan);
2771 if (chan->unacked_frames > 0)
2772 __set_retrans_timer(chan);
2773 chan->retry_count = 0;
2774 chan->tx_state = L2CAP_TX_STATE_XMIT;
2775 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2776 }
2777 break;
2778 case L2CAP_EV_EXPLICIT_POLL:
2779 /* Ignore */
2780 break;
2781 case L2CAP_EV_MONITOR_TO:
2782 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2783 l2cap_send_rr_or_rnr(chan, 1);
2784 __set_monitor_timer(chan);
2785 chan->retry_count++;
2786 } else {
2787 l2cap_send_disconn_req(chan, ECONNABORTED);
2788 }
2789 break;
2790 default:
2791 break;
2792 }
2793 }
2794
2795 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2796 struct sk_buff_head *skbs, u8 event)
2797 {
2798 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2799 chan, control, skbs, event, chan->tx_state);
2800
2801 switch (chan->tx_state) {
2802 case L2CAP_TX_STATE_XMIT:
2803 l2cap_tx_state_xmit(chan, control, skbs, event);
2804 break;
2805 case L2CAP_TX_STATE_WAIT_F:
2806 l2cap_tx_state_wait_f(chan, control, skbs, event);
2807 break;
2808 default:
2809 /* Ignore event */
2810 break;
2811 }
2812 }
2813
2814 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2815 struct l2cap_ctrl *control)
2816 {
2817 BT_DBG("chan %p, control %p", chan, control);
2818 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2819 }
2820
2821 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2822 struct l2cap_ctrl *control)
2823 {
2824 BT_DBG("chan %p, control %p", chan, control);
2825 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2826 }
2827
2828 /* Copy frame to all raw sockets on that connection */
2829 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2830 {
2831 struct sk_buff *nskb;
2832 struct l2cap_chan *chan;
2833
2834 BT_DBG("conn %p", conn);
2835
2836 mutex_lock(&conn->chan_lock);
2837
2838 list_for_each_entry(chan, &conn->chan_l, list) {
2839 if (chan->chan_type != L2CAP_CHAN_RAW)
2840 continue;
2841
2842 /* Don't send frame to the channel it came from */
2843 if (bt_cb(skb)->chan == chan)
2844 continue;
2845
2846 nskb = skb_clone(skb, GFP_KERNEL);
2847 if (!nskb)
2848 continue;
2849 if (chan->ops->recv(chan, nskb))
2850 kfree_skb(nskb);
2851 }
2852
2853 mutex_unlock(&conn->chan_lock);
2854 }
2855
2856 /* ---- L2CAP signalling commands ---- */
2857 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2858 u8 ident, u16 dlen, void *data)
2859 {
2860 struct sk_buff *skb, **frag;
2861 struct l2cap_cmd_hdr *cmd;
2862 struct l2cap_hdr *lh;
2863 int len, count;
2864
2865 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2866 conn, code, ident, dlen);
2867
2868 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2869 return NULL;
2870
2871 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2872 count = min_t(unsigned int, conn->mtu, len);
2873
2874 skb = bt_skb_alloc(count, GFP_KERNEL);
2875 if (!skb)
2876 return NULL;
2877
2878 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2879 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2880
2881 if (conn->hcon->type == LE_LINK)
2882 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2883 else
2884 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2885
2886 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2887 cmd->code = code;
2888 cmd->ident = ident;
2889 cmd->len = cpu_to_le16(dlen);
2890
2891 if (dlen) {
2892 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2893 memcpy(skb_put(skb, count), data, count);
2894 data += count;
2895 }
2896
2897 len -= skb->len;
2898
2899 /* Continuation fragments (no L2CAP header) */
2900 frag = &skb_shinfo(skb)->frag_list;
2901 while (len) {
2902 count = min_t(unsigned int, conn->mtu, len);
2903
2904 *frag = bt_skb_alloc(count, GFP_KERNEL);
2905 if (!*frag)
2906 goto fail;
2907
2908 memcpy(skb_put(*frag, count), data, count);
2909
2910 len -= count;
2911 data += count;
2912
2913 frag = &(*frag)->next;
2914 }
2915
2916 return skb;
2917
2918 fail:
2919 kfree_skb(skb);
2920 return NULL;
2921 }
2922
2923 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2924 unsigned long *val)
2925 {
2926 struct l2cap_conf_opt *opt = *ptr;
2927 int len;
2928
2929 len = L2CAP_CONF_OPT_SIZE + opt->len;
2930 *ptr += len;
2931
2932 *type = opt->type;
2933 *olen = opt->len;
2934
2935 switch (opt->len) {
2936 case 1:
2937 *val = *((u8 *) opt->val);
2938 break;
2939
2940 case 2:
2941 *val = get_unaligned_le16(opt->val);
2942 break;
2943
2944 case 4:
2945 *val = get_unaligned_le32(opt->val);
2946 break;
2947
2948 default:
2949 *val = (unsigned long) opt->val;
2950 break;
2951 }
2952
2953 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2954 return len;
2955 }
2956
2957 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2958 {
2959 struct l2cap_conf_opt *opt = *ptr;
2960
2961 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2962
2963 opt->type = type;
2964 opt->len = len;
2965
2966 switch (len) {
2967 case 1:
2968 *((u8 *) opt->val) = val;
2969 break;
2970
2971 case 2:
2972 put_unaligned_le16(val, opt->val);
2973 break;
2974
2975 case 4:
2976 put_unaligned_le32(val, opt->val);
2977 break;
2978
2979 default:
2980 memcpy(opt->val, (void *) val, len);
2981 break;
2982 }
2983
2984 *ptr += L2CAP_CONF_OPT_SIZE + len;
2985 }
2986
2987 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2988 {
2989 struct l2cap_conf_efs efs;
2990
2991 switch (chan->mode) {
2992 case L2CAP_MODE_ERTM:
2993 efs.id = chan->local_id;
2994 efs.stype = chan->local_stype;
2995 efs.msdu = cpu_to_le16(chan->local_msdu);
2996 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2997 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2998 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2999 break;
3000
3001 case L2CAP_MODE_STREAMING:
3002 efs.id = 1;
3003 efs.stype = L2CAP_SERV_BESTEFFORT;
3004 efs.msdu = cpu_to_le16(chan->local_msdu);
3005 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3006 efs.acc_lat = 0;
3007 efs.flush_to = 0;
3008 break;
3009
3010 default:
3011 return;
3012 }
3013
3014 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3015 (unsigned long) &efs);
3016 }
3017
3018 static void l2cap_ack_timeout(struct work_struct *work)
3019 {
3020 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3021 ack_timer.work);
3022 u16 frames_to_ack;
3023
3024 BT_DBG("chan %p", chan);
3025
3026 l2cap_chan_lock(chan);
3027
3028 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3029 chan->last_acked_seq);
3030
3031 if (frames_to_ack)
3032 l2cap_send_rr_or_rnr(chan, 0);
3033
3034 l2cap_chan_unlock(chan);
3035 l2cap_chan_put(chan);
3036 }
3037
3038 int l2cap_ertm_init(struct l2cap_chan *chan)
3039 {
3040 int err;
3041
3042 chan->next_tx_seq = 0;
3043 chan->expected_tx_seq = 0;
3044 chan->expected_ack_seq = 0;
3045 chan->unacked_frames = 0;
3046 chan->buffer_seq = 0;
3047 chan->frames_sent = 0;
3048 chan->last_acked_seq = 0;
3049 chan->sdu = NULL;
3050 chan->sdu_last_frag = NULL;
3051 chan->sdu_len = 0;
3052
3053 skb_queue_head_init(&chan->tx_q);
3054
3055 chan->local_amp_id = AMP_ID_BREDR;
3056 chan->move_id = AMP_ID_BREDR;
3057 chan->move_state = L2CAP_MOVE_STABLE;
3058 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3059
3060 if (chan->mode != L2CAP_MODE_ERTM)
3061 return 0;
3062
3063 chan->rx_state = L2CAP_RX_STATE_RECV;
3064 chan->tx_state = L2CAP_TX_STATE_XMIT;
3065
3066 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3067 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3068 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3069
3070 skb_queue_head_init(&chan->srej_q);
3071
3072 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3073 if (err < 0)
3074 return err;
3075
3076 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3077 if (err < 0)
3078 l2cap_seq_list_free(&chan->srej_list);
3079
3080 return err;
3081 }
3082
3083 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3084 {
3085 switch (mode) {
3086 case L2CAP_MODE_STREAMING:
3087 case L2CAP_MODE_ERTM:
3088 if (l2cap_mode_supported(mode, remote_feat_mask))
3089 return mode;
3090 /* fall through */
3091 default:
3092 return L2CAP_MODE_BASIC;
3093 }
3094 }
3095
3096 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3097 {
3098 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3099 }
3100
3101 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3102 {
3103 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3104 }
3105
3106 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3107 struct l2cap_conf_rfc *rfc)
3108 {
3109 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3110 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3111
3112 /* Class 1 devices have must have ERTM timeouts
3113 * exceeding the Link Supervision Timeout. The
3114 * default Link Supervision Timeout for AMP
3115 * controllers is 10 seconds.
3116 *
3117 * Class 1 devices use 0xffffffff for their
3118 * best-effort flush timeout, so the clamping logic
3119 * will result in a timeout that meets the above
3120 * requirement. ERTM timeouts are 16-bit values, so
3121 * the maximum timeout is 65.535 seconds.
3122 */
3123
3124 /* Convert timeout to milliseconds and round */
3125 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3126
3127 /* This is the recommended formula for class 2 devices
3128 * that start ERTM timers when packets are sent to the
3129 * controller.
3130 */
3131 ertm_to = 3 * ertm_to + 500;
3132
3133 if (ertm_to > 0xffff)
3134 ertm_to = 0xffff;
3135
3136 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3137 rfc->monitor_timeout = rfc->retrans_timeout;
3138 } else {
3139 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3140 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3141 }
3142 }
3143
3144 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3145 {
3146 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3147 __l2cap_ews_supported(chan->conn)) {
3148 /* use extended control field */
3149 set_bit(FLAG_EXT_CTRL, &chan->flags);
3150 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3151 } else {
3152 chan->tx_win = min_t(u16, chan->tx_win,
3153 L2CAP_DEFAULT_TX_WINDOW);
3154 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3155 }
3156 chan->ack_win = chan->tx_win;
3157 }
3158
3159 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3160 {
3161 struct l2cap_conf_req *req = data;
3162 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3163 void *ptr = req->data;
3164 u16 size;
3165
3166 BT_DBG("chan %p", chan);
3167
3168 if (chan->num_conf_req || chan->num_conf_rsp)
3169 goto done;
3170
3171 switch (chan->mode) {
3172 case L2CAP_MODE_STREAMING:
3173 case L2CAP_MODE_ERTM:
3174 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3175 break;
3176
3177 if (__l2cap_efs_supported(chan->conn))
3178 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3179
3180 /* fall through */
3181 default:
3182 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3183 break;
3184 }
3185
3186 done:
3187 if (chan->imtu != L2CAP_DEFAULT_MTU)
3188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3189
3190 switch (chan->mode) {
3191 case L2CAP_MODE_BASIC:
3192 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3193 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3194 break;
3195
3196 rfc.mode = L2CAP_MODE_BASIC;
3197 rfc.txwin_size = 0;
3198 rfc.max_transmit = 0;
3199 rfc.retrans_timeout = 0;
3200 rfc.monitor_timeout = 0;
3201 rfc.max_pdu_size = 0;
3202
3203 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3204 (unsigned long) &rfc);
3205 break;
3206
3207 case L2CAP_MODE_ERTM:
3208 rfc.mode = L2CAP_MODE_ERTM;
3209 rfc.max_transmit = chan->max_tx;
3210
3211 __l2cap_set_ertm_timeouts(chan, &rfc);
3212
3213 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3214 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3215 L2CAP_FCS_SIZE);
3216 rfc.max_pdu_size = cpu_to_le16(size);
3217
3218 l2cap_txwin_setup(chan);
3219
3220 rfc.txwin_size = min_t(u16, chan->tx_win,
3221 L2CAP_DEFAULT_TX_WINDOW);
3222
3223 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3224 (unsigned long) &rfc);
3225
3226 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3227 l2cap_add_opt_efs(&ptr, chan);
3228
3229 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3230 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3231 chan->tx_win);
3232
3233 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3234 if (chan->fcs == L2CAP_FCS_NONE ||
3235 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3236 chan->fcs = L2CAP_FCS_NONE;
3237 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3238 chan->fcs);
3239 }
3240 break;
3241
3242 case L2CAP_MODE_STREAMING:
3243 l2cap_txwin_setup(chan);
3244 rfc.mode = L2CAP_MODE_STREAMING;
3245 rfc.txwin_size = 0;
3246 rfc.max_transmit = 0;
3247 rfc.retrans_timeout = 0;
3248 rfc.monitor_timeout = 0;
3249
3250 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3251 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3252 L2CAP_FCS_SIZE);
3253 rfc.max_pdu_size = cpu_to_le16(size);
3254
3255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3256 (unsigned long) &rfc);
3257
3258 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3259 l2cap_add_opt_efs(&ptr, chan);
3260
3261 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3262 if (chan->fcs == L2CAP_FCS_NONE ||
3263 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3264 chan->fcs = L2CAP_FCS_NONE;
3265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3266 chan->fcs);
3267 }
3268 break;
3269 }
3270
3271 req->dcid = cpu_to_le16(chan->dcid);
3272 req->flags = __constant_cpu_to_le16(0);
3273
3274 return ptr - data;
3275 }
3276
3277 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3278 {
3279 struct l2cap_conf_rsp *rsp = data;
3280 void *ptr = rsp->data;
3281 void *req = chan->conf_req;
3282 int len = chan->conf_len;
3283 int type, hint, olen;
3284 unsigned long val;
3285 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3286 struct l2cap_conf_efs efs;
3287 u8 remote_efs = 0;
3288 u16 mtu = L2CAP_DEFAULT_MTU;
3289 u16 result = L2CAP_CONF_SUCCESS;
3290 u16 size;
3291
3292 BT_DBG("chan %p", chan);
3293
3294 while (len >= L2CAP_CONF_OPT_SIZE) {
3295 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3296
3297 hint = type & L2CAP_CONF_HINT;
3298 type &= L2CAP_CONF_MASK;
3299
3300 switch (type) {
3301 case L2CAP_CONF_MTU:
3302 mtu = val;
3303 break;
3304
3305 case L2CAP_CONF_FLUSH_TO:
3306 chan->flush_to = val;
3307 break;
3308
3309 case L2CAP_CONF_QOS:
3310 break;
3311
3312 case L2CAP_CONF_RFC:
3313 if (olen == sizeof(rfc))
3314 memcpy(&rfc, (void *) val, olen);
3315 break;
3316
3317 case L2CAP_CONF_FCS:
3318 if (val == L2CAP_FCS_NONE)
3319 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3320 break;
3321
3322 case L2CAP_CONF_EFS:
3323 remote_efs = 1;
3324 if (olen == sizeof(efs))
3325 memcpy(&efs, (void *) val, olen);
3326 break;
3327
3328 case L2CAP_CONF_EWS:
3329 if (!chan->conn->hs_enabled)
3330 return -ECONNREFUSED;
3331
3332 set_bit(FLAG_EXT_CTRL, &chan->flags);
3333 set_bit(CONF_EWS_RECV, &chan->conf_state);
3334 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3335 chan->remote_tx_win = val;
3336 break;
3337
3338 default:
3339 if (hint)
3340 break;
3341
3342 result = L2CAP_CONF_UNKNOWN;
3343 *((u8 *) ptr++) = type;
3344 break;
3345 }
3346 }
3347
3348 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3349 goto done;
3350
3351 switch (chan->mode) {
3352 case L2CAP_MODE_STREAMING:
3353 case L2CAP_MODE_ERTM:
3354 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3355 chan->mode = l2cap_select_mode(rfc.mode,
3356 chan->conn->feat_mask);
3357 break;
3358 }
3359
3360 if (remote_efs) {
3361 if (__l2cap_efs_supported(chan->conn))
3362 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3363 else
3364 return -ECONNREFUSED;
3365 }
3366
3367 if (chan->mode != rfc.mode)
3368 return -ECONNREFUSED;
3369
3370 break;
3371 }
3372
3373 done:
3374 if (chan->mode != rfc.mode) {
3375 result = L2CAP_CONF_UNACCEPT;
3376 rfc.mode = chan->mode;
3377
3378 if (chan->num_conf_rsp == 1)
3379 return -ECONNREFUSED;
3380
3381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3382 (unsigned long) &rfc);
3383 }
3384
3385 if (result == L2CAP_CONF_SUCCESS) {
3386 /* Configure output options and let the other side know
3387 * which ones we don't like. */
3388
3389 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3390 result = L2CAP_CONF_UNACCEPT;
3391 else {
3392 chan->omtu = mtu;
3393 set_bit(CONF_MTU_DONE, &chan->conf_state);
3394 }
3395 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3396
3397 if (remote_efs) {
3398 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3399 efs.stype != L2CAP_SERV_NOTRAFIC &&
3400 efs.stype != chan->local_stype) {
3401
3402 result = L2CAP_CONF_UNACCEPT;
3403
3404 if (chan->num_conf_req >= 1)
3405 return -ECONNREFUSED;
3406
3407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3408 sizeof(efs),
3409 (unsigned long) &efs);
3410 } else {
3411 /* Send PENDING Conf Rsp */
3412 result = L2CAP_CONF_PENDING;
3413 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3414 }
3415 }
3416
3417 switch (rfc.mode) {
3418 case L2CAP_MODE_BASIC:
3419 chan->fcs = L2CAP_FCS_NONE;
3420 set_bit(CONF_MODE_DONE, &chan->conf_state);
3421 break;
3422
3423 case L2CAP_MODE_ERTM:
3424 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3425 chan->remote_tx_win = rfc.txwin_size;
3426 else
3427 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3428
3429 chan->remote_max_tx = rfc.max_transmit;
3430
3431 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3432 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3433 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3434 rfc.max_pdu_size = cpu_to_le16(size);
3435 chan->remote_mps = size;
3436
3437 __l2cap_set_ertm_timeouts(chan, &rfc);
3438
3439 set_bit(CONF_MODE_DONE, &chan->conf_state);
3440
3441 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3442 sizeof(rfc), (unsigned long) &rfc);
3443
3444 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3445 chan->remote_id = efs.id;
3446 chan->remote_stype = efs.stype;
3447 chan->remote_msdu = le16_to_cpu(efs.msdu);
3448 chan->remote_flush_to =
3449 le32_to_cpu(efs.flush_to);
3450 chan->remote_acc_lat =
3451 le32_to_cpu(efs.acc_lat);
3452 chan->remote_sdu_itime =
3453 le32_to_cpu(efs.sdu_itime);
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3455 sizeof(efs),
3456 (unsigned long) &efs);
3457 }
3458 break;
3459
3460 case L2CAP_MODE_STREAMING:
3461 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3462 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3463 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3464 rfc.max_pdu_size = cpu_to_le16(size);
3465 chan->remote_mps = size;
3466
3467 set_bit(CONF_MODE_DONE, &chan->conf_state);
3468
3469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3470 (unsigned long) &rfc);
3471
3472 break;
3473
3474 default:
3475 result = L2CAP_CONF_UNACCEPT;
3476
3477 memset(&rfc, 0, sizeof(rfc));
3478 rfc.mode = chan->mode;
3479 }
3480
3481 if (result == L2CAP_CONF_SUCCESS)
3482 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3483 }
3484 rsp->scid = cpu_to_le16(chan->dcid);
3485 rsp->result = cpu_to_le16(result);
3486 rsp->flags = __constant_cpu_to_le16(0);
3487
3488 return ptr - data;
3489 }
3490
3491 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3492 void *data, u16 *result)
3493 {
3494 struct l2cap_conf_req *req = data;
3495 void *ptr = req->data;
3496 int type, olen;
3497 unsigned long val;
3498 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3499 struct l2cap_conf_efs efs;
3500
3501 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3502
3503 while (len >= L2CAP_CONF_OPT_SIZE) {
3504 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3505
3506 switch (type) {
3507 case L2CAP_CONF_MTU:
3508 if (val < L2CAP_DEFAULT_MIN_MTU) {
3509 *result = L2CAP_CONF_UNACCEPT;
3510 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3511 } else
3512 chan->imtu = val;
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3514 break;
3515
3516 case L2CAP_CONF_FLUSH_TO:
3517 chan->flush_to = val;
3518 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3519 2, chan->flush_to);
3520 break;
3521
3522 case L2CAP_CONF_RFC:
3523 if (olen == sizeof(rfc))
3524 memcpy(&rfc, (void *)val, olen);
3525
3526 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3527 rfc.mode != chan->mode)
3528 return -ECONNREFUSED;
3529
3530 chan->fcs = 0;
3531
3532 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3533 sizeof(rfc), (unsigned long) &rfc);
3534 break;
3535
3536 case L2CAP_CONF_EWS:
3537 chan->ack_win = min_t(u16, val, chan->ack_win);
3538 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3539 chan->tx_win);
3540 break;
3541
3542 case L2CAP_CONF_EFS:
3543 if (olen == sizeof(efs))
3544 memcpy(&efs, (void *)val, olen);
3545
3546 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3547 efs.stype != L2CAP_SERV_NOTRAFIC &&
3548 efs.stype != chan->local_stype)
3549 return -ECONNREFUSED;
3550
3551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3552 (unsigned long) &efs);
3553 break;
3554
3555 case L2CAP_CONF_FCS:
3556 if (*result == L2CAP_CONF_PENDING)
3557 if (val == L2CAP_FCS_NONE)
3558 set_bit(CONF_RECV_NO_FCS,
3559 &chan->conf_state);
3560 break;
3561 }
3562 }
3563
3564 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3565 return -ECONNREFUSED;
3566
3567 chan->mode = rfc.mode;
3568
3569 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3570 switch (rfc.mode) {
3571 case L2CAP_MODE_ERTM:
3572 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3573 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3574 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3575 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3576 chan->ack_win = min_t(u16, chan->ack_win,
3577 rfc.txwin_size);
3578
3579 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3580 chan->local_msdu = le16_to_cpu(efs.msdu);
3581 chan->local_sdu_itime =
3582 le32_to_cpu(efs.sdu_itime);
3583 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3584 chan->local_flush_to =
3585 le32_to_cpu(efs.flush_to);
3586 }
3587 break;
3588
3589 case L2CAP_MODE_STREAMING:
3590 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3591 }
3592 }
3593
3594 req->dcid = cpu_to_le16(chan->dcid);
3595 req->flags = __constant_cpu_to_le16(0);
3596
3597 return ptr - data;
3598 }
3599
3600 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3601 u16 result, u16 flags)
3602 {
3603 struct l2cap_conf_rsp *rsp = data;
3604 void *ptr = rsp->data;
3605
3606 BT_DBG("chan %p", chan);
3607
3608 rsp->scid = cpu_to_le16(chan->dcid);
3609 rsp->result = cpu_to_le16(result);
3610 rsp->flags = cpu_to_le16(flags);
3611
3612 return ptr - data;
3613 }
3614
3615 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3616 {
3617 struct l2cap_le_conn_rsp rsp;
3618 struct l2cap_conn *conn = chan->conn;
3619
3620 BT_DBG("chan %p", chan);
3621
3622 rsp.dcid = cpu_to_le16(chan->scid);
3623 rsp.mtu = cpu_to_le16(chan->imtu);
3624 rsp.mps = cpu_to_le16(chan->mps);
3625 rsp.credits = cpu_to_le16(chan->rx_credits);
3626 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3627
3628 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3629 &rsp);
3630 }
3631
3632 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3633 {
3634 struct l2cap_conn_rsp rsp;
3635 struct l2cap_conn *conn = chan->conn;
3636 u8 buf[128];
3637 u8 rsp_code;
3638
3639 rsp.scid = cpu_to_le16(chan->dcid);
3640 rsp.dcid = cpu_to_le16(chan->scid);
3641 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3642 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3643
3644 if (chan->hs_hcon)
3645 rsp_code = L2CAP_CREATE_CHAN_RSP;
3646 else
3647 rsp_code = L2CAP_CONN_RSP;
3648
3649 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3650
3651 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3652
3653 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3654 return;
3655
3656 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3657 l2cap_build_conf_req(chan, buf), buf);
3658 chan->num_conf_req++;
3659 }
3660
3661 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3662 {
3663 int type, olen;
3664 unsigned long val;
3665 /* Use sane default values in case a misbehaving remote device
3666 * did not send an RFC or extended window size option.
3667 */
3668 u16 txwin_ext = chan->ack_win;
3669 struct l2cap_conf_rfc rfc = {
3670 .mode = chan->mode,
3671 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3672 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3673 .max_pdu_size = cpu_to_le16(chan->imtu),
3674 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3675 };
3676
3677 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3678
3679 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3680 return;
3681
3682 while (len >= L2CAP_CONF_OPT_SIZE) {
3683 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3684
3685 switch (type) {
3686 case L2CAP_CONF_RFC:
3687 if (olen == sizeof(rfc))
3688 memcpy(&rfc, (void *)val, olen);
3689 break;
3690 case L2CAP_CONF_EWS:
3691 txwin_ext = val;
3692 break;
3693 }
3694 }
3695
3696 switch (rfc.mode) {
3697 case L2CAP_MODE_ERTM:
3698 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3699 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3700 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3701 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3702 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3703 else
3704 chan->ack_win = min_t(u16, chan->ack_win,
3705 rfc.txwin_size);
3706 break;
3707 case L2CAP_MODE_STREAMING:
3708 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3709 }
3710 }
3711
3712 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3713 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3714 u8 *data)
3715 {
3716 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3717
3718 if (cmd_len < sizeof(*rej))
3719 return -EPROTO;
3720
3721 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3722 return 0;
3723
3724 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3725 cmd->ident == conn->info_ident) {
3726 cancel_delayed_work(&conn->info_timer);
3727
3728 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3729 conn->info_ident = 0;
3730
3731 l2cap_conn_start(conn);
3732 }
3733
3734 return 0;
3735 }
3736
3737 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3738 struct l2cap_cmd_hdr *cmd,
3739 u8 *data, u8 rsp_code, u8 amp_id)
3740 {
3741 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3742 struct l2cap_conn_rsp rsp;
3743 struct l2cap_chan *chan = NULL, *pchan;
3744 int result, status = L2CAP_CS_NO_INFO;
3745
3746 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3747 __le16 psm = req->psm;
3748
3749 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3750
3751 /* Check if we have socket listening on psm */
3752 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3753 &conn->hcon->dst, ACL_LINK);
3754 if (!pchan) {
3755 result = L2CAP_CR_BAD_PSM;
3756 goto sendresp;
3757 }
3758
3759 mutex_lock(&conn->chan_lock);
3760 l2cap_chan_lock(pchan);
3761
3762 /* Check if the ACL is secure enough (if not SDP) */
3763 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3764 !hci_conn_check_link_mode(conn->hcon)) {
3765 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3766 result = L2CAP_CR_SEC_BLOCK;
3767 goto response;
3768 }
3769
3770 result = L2CAP_CR_NO_MEM;
3771
3772 /* Check if we already have channel with that dcid */
3773 if (__l2cap_get_chan_by_dcid(conn, scid))
3774 goto response;
3775
3776 chan = pchan->ops->new_connection(pchan);
3777 if (!chan)
3778 goto response;
3779
3780 /* For certain devices (ex: HID mouse), support for authentication,
3781 * pairing and bonding is optional. For such devices, inorder to avoid
3782 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3783 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3784 */
3785 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3786
3787 bacpy(&chan->src, &conn->hcon->src);
3788 bacpy(&chan->dst, &conn->hcon->dst);
3789 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3790 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3791 chan->psm = psm;
3792 chan->dcid = scid;
3793 chan->local_amp_id = amp_id;
3794
3795 __l2cap_chan_add(conn, chan);
3796
3797 dcid = chan->scid;
3798
3799 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3800
3801 chan->ident = cmd->ident;
3802
3803 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3804 if (l2cap_chan_check_security(chan)) {
3805 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3806 l2cap_state_change(chan, BT_CONNECT2);
3807 result = L2CAP_CR_PEND;
3808 status = L2CAP_CS_AUTHOR_PEND;
3809 chan->ops->defer(chan);
3810 } else {
3811 /* Force pending result for AMP controllers.
3812 * The connection will succeed after the
3813 * physical link is up.
3814 */
3815 if (amp_id == AMP_ID_BREDR) {
3816 l2cap_state_change(chan, BT_CONFIG);
3817 result = L2CAP_CR_SUCCESS;
3818 } else {
3819 l2cap_state_change(chan, BT_CONNECT2);
3820 result = L2CAP_CR_PEND;
3821 }
3822 status = L2CAP_CS_NO_INFO;
3823 }
3824 } else {
3825 l2cap_state_change(chan, BT_CONNECT2);
3826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_AUTHEN_PEND;
3828 }
3829 } else {
3830 l2cap_state_change(chan, BT_CONNECT2);
3831 result = L2CAP_CR_PEND;
3832 status = L2CAP_CS_NO_INFO;
3833 }
3834
3835 response:
3836 l2cap_chan_unlock(pchan);
3837 mutex_unlock(&conn->chan_lock);
3838
3839 sendresp:
3840 rsp.scid = cpu_to_le16(scid);
3841 rsp.dcid = cpu_to_le16(dcid);
3842 rsp.result = cpu_to_le16(result);
3843 rsp.status = cpu_to_le16(status);
3844 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3845
3846 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3847 struct l2cap_info_req info;
3848 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3849
3850 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3851 conn->info_ident = l2cap_get_ident(conn);
3852
3853 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3854
3855 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3856 sizeof(info), &info);
3857 }
3858
3859 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3860 result == L2CAP_CR_SUCCESS) {
3861 u8 buf[128];
3862 set_bit(CONF_REQ_SENT, &chan->conf_state);
3863 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3864 l2cap_build_conf_req(chan, buf), buf);
3865 chan->num_conf_req++;
3866 }
3867
3868 return chan;
3869 }
3870
3871 static int l2cap_connect_req(struct l2cap_conn *conn,
3872 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3873 {
3874 struct hci_dev *hdev = conn->hcon->hdev;
3875 struct hci_conn *hcon = conn->hcon;
3876
3877 if (cmd_len < sizeof(struct l2cap_conn_req))
3878 return -EPROTO;
3879
3880 hci_dev_lock(hdev);
3881 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3882 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3883 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3884 hcon->dst_type, 0, NULL, 0,
3885 hcon->dev_class);
3886 hci_dev_unlock(hdev);
3887
3888 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3889 return 0;
3890 }
3891
3892 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3893 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3894 u8 *data)
3895 {
3896 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3897 u16 scid, dcid, result, status;
3898 struct l2cap_chan *chan;
3899 u8 req[128];
3900 int err;
3901
3902 if (cmd_len < sizeof(*rsp))
3903 return -EPROTO;
3904
3905 scid = __le16_to_cpu(rsp->scid);
3906 dcid = __le16_to_cpu(rsp->dcid);
3907 result = __le16_to_cpu(rsp->result);
3908 status = __le16_to_cpu(rsp->status);
3909
3910 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3911 dcid, scid, result, status);
3912
3913 mutex_lock(&conn->chan_lock);
3914
3915 if (scid) {
3916 chan = __l2cap_get_chan_by_scid(conn, scid);
3917 if (!chan) {
3918 err = -EBADSLT;
3919 goto unlock;
3920 }
3921 } else {
3922 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3923 if (!chan) {
3924 err = -EBADSLT;
3925 goto unlock;
3926 }
3927 }
3928
3929 err = 0;
3930
3931 l2cap_chan_lock(chan);
3932
3933 switch (result) {
3934 case L2CAP_CR_SUCCESS:
3935 l2cap_state_change(chan, BT_CONFIG);
3936 chan->ident = 0;
3937 chan->dcid = dcid;
3938 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3939
3940 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3941 break;
3942
3943 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3944 l2cap_build_conf_req(chan, req), req);
3945 chan->num_conf_req++;
3946 break;
3947
3948 case L2CAP_CR_PEND:
3949 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3950 break;
3951
3952 default:
3953 l2cap_chan_del(chan, ECONNREFUSED);
3954 break;
3955 }
3956
3957 l2cap_chan_unlock(chan);
3958
3959 unlock:
3960 mutex_unlock(&conn->chan_lock);
3961
3962 return err;
3963 }
3964
3965 static inline void set_default_fcs(struct l2cap_chan *chan)
3966 {
3967 /* FCS is enabled only in ERTM or streaming mode, if one or both
3968 * sides request it.
3969 */
3970 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3971 chan->fcs = L2CAP_FCS_NONE;
3972 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3973 chan->fcs = L2CAP_FCS_CRC16;
3974 }
3975
3976 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3977 u8 ident, u16 flags)
3978 {
3979 struct l2cap_conn *conn = chan->conn;
3980
3981 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3982 flags);
3983
3984 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3985 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3986
3987 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3988 l2cap_build_conf_rsp(chan, data,
3989 L2CAP_CONF_SUCCESS, flags), data);
3990 }
3991
3992 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3993 u16 scid, u16 dcid)
3994 {
3995 struct l2cap_cmd_rej_cid rej;
3996
3997 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3998 rej.scid = __cpu_to_le16(scid);
3999 rej.dcid = __cpu_to_le16(dcid);
4000
4001 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4002 }
4003
4004 static inline int l2cap_config_req(struct l2cap_conn *conn,
4005 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4006 u8 *data)
4007 {
4008 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4009 u16 dcid, flags;
4010 u8 rsp[64];
4011 struct l2cap_chan *chan;
4012 int len, err = 0;
4013
4014 if (cmd_len < sizeof(*req))
4015 return -EPROTO;
4016
4017 dcid = __le16_to_cpu(req->dcid);
4018 flags = __le16_to_cpu(req->flags);
4019
4020 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4021
4022 chan = l2cap_get_chan_by_scid(conn, dcid);
4023 if (!chan) {
4024 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4025 return 0;
4026 }
4027
4028 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4029 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4030 chan->dcid);
4031 goto unlock;
4032 }
4033
4034 /* Reject if config buffer is too small. */
4035 len = cmd_len - sizeof(*req);
4036 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4038 l2cap_build_conf_rsp(chan, rsp,
4039 L2CAP_CONF_REJECT, flags), rsp);
4040 goto unlock;
4041 }
4042
4043 /* Store config. */
4044 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4045 chan->conf_len += len;
4046
4047 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4048 /* Incomplete config. Send empty response. */
4049 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4050 l2cap_build_conf_rsp(chan, rsp,
4051 L2CAP_CONF_SUCCESS, flags), rsp);
4052 goto unlock;
4053 }
4054
4055 /* Complete config. */
4056 len = l2cap_parse_conf_req(chan, rsp);
4057 if (len < 0) {
4058 l2cap_send_disconn_req(chan, ECONNRESET);
4059 goto unlock;
4060 }
4061
4062 chan->ident = cmd->ident;
4063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4064 chan->num_conf_rsp++;
4065
4066 /* Reset config buffer. */
4067 chan->conf_len = 0;
4068
4069 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4070 goto unlock;
4071
4072 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4073 set_default_fcs(chan);
4074
4075 if (chan->mode == L2CAP_MODE_ERTM ||
4076 chan->mode == L2CAP_MODE_STREAMING)
4077 err = l2cap_ertm_init(chan);
4078
4079 if (err < 0)
4080 l2cap_send_disconn_req(chan, -err);
4081 else
4082 l2cap_chan_ready(chan);
4083
4084 goto unlock;
4085 }
4086
4087 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4088 u8 buf[64];
4089 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4090 l2cap_build_conf_req(chan, buf), buf);
4091 chan->num_conf_req++;
4092 }
4093
4094 /* Got Conf Rsp PENDING from remote side and asume we sent
4095 Conf Rsp PENDING in the code above */
4096 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4097 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4098
4099 /* check compatibility */
4100
4101 /* Send rsp for BR/EDR channel */
4102 if (!chan->hs_hcon)
4103 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4104 else
4105 chan->ident = cmd->ident;
4106 }
4107
4108 unlock:
4109 l2cap_chan_unlock(chan);
4110 return err;
4111 }
4112
4113 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4114 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4115 u8 *data)
4116 {
4117 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4118 u16 scid, flags, result;
4119 struct l2cap_chan *chan;
4120 int len = cmd_len - sizeof(*rsp);
4121 int err = 0;
4122
4123 if (cmd_len < sizeof(*rsp))
4124 return -EPROTO;
4125
4126 scid = __le16_to_cpu(rsp->scid);
4127 flags = __le16_to_cpu(rsp->flags);
4128 result = __le16_to_cpu(rsp->result);
4129
4130 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4131 result, len);
4132
4133 chan = l2cap_get_chan_by_scid(conn, scid);
4134 if (!chan)
4135 return 0;
4136
4137 switch (result) {
4138 case L2CAP_CONF_SUCCESS:
4139 l2cap_conf_rfc_get(chan, rsp->data, len);
4140 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4141 break;
4142
4143 case L2CAP_CONF_PENDING:
4144 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4145
4146 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4147 char buf[64];
4148
4149 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4150 buf, &result);
4151 if (len < 0) {
4152 l2cap_send_disconn_req(chan, ECONNRESET);
4153 goto done;
4154 }
4155
4156 if (!chan->hs_hcon) {
4157 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4158 0);
4159 } else {
4160 if (l2cap_check_efs(chan)) {
4161 amp_create_logical_link(chan);
4162 chan->ident = cmd->ident;
4163 }
4164 }
4165 }
4166 goto done;
4167
4168 case L2CAP_CONF_UNACCEPT:
4169 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4170 char req[64];
4171
4172 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4173 l2cap_send_disconn_req(chan, ECONNRESET);
4174 goto done;
4175 }
4176
4177 /* throw out any old stored conf requests */
4178 result = L2CAP_CONF_SUCCESS;
4179 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4180 req, &result);
4181 if (len < 0) {
4182 l2cap_send_disconn_req(chan, ECONNRESET);
4183 goto done;
4184 }
4185
4186 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4187 L2CAP_CONF_REQ, len, req);
4188 chan->num_conf_req++;
4189 if (result != L2CAP_CONF_SUCCESS)
4190 goto done;
4191 break;
4192 }
4193
4194 default:
4195 l2cap_chan_set_err(chan, ECONNRESET);
4196
4197 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4198 l2cap_send_disconn_req(chan, ECONNRESET);
4199 goto done;
4200 }
4201
4202 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4203 goto done;
4204
4205 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4206
4207 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4208 set_default_fcs(chan);
4209
4210 if (chan->mode == L2CAP_MODE_ERTM ||
4211 chan->mode == L2CAP_MODE_STREAMING)
4212 err = l2cap_ertm_init(chan);
4213
4214 if (err < 0)
4215 l2cap_send_disconn_req(chan, -err);
4216 else
4217 l2cap_chan_ready(chan);
4218 }
4219
4220 done:
4221 l2cap_chan_unlock(chan);
4222 return err;
4223 }
4224
4225 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4226 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4227 u8 *data)
4228 {
4229 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4230 struct l2cap_disconn_rsp rsp;
4231 u16 dcid, scid;
4232 struct l2cap_chan *chan;
4233
4234 if (cmd_len != sizeof(*req))
4235 return -EPROTO;
4236
4237 scid = __le16_to_cpu(req->scid);
4238 dcid = __le16_to_cpu(req->dcid);
4239
4240 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4241
4242 mutex_lock(&conn->chan_lock);
4243
4244 chan = __l2cap_get_chan_by_scid(conn, dcid);
4245 if (!chan) {
4246 mutex_unlock(&conn->chan_lock);
4247 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4248 return 0;
4249 }
4250
4251 l2cap_chan_lock(chan);
4252
4253 rsp.dcid = cpu_to_le16(chan->scid);
4254 rsp.scid = cpu_to_le16(chan->dcid);
4255 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4256
4257 chan->ops->set_shutdown(chan);
4258
4259 l2cap_chan_hold(chan);
4260 l2cap_chan_del(chan, ECONNRESET);
4261
4262 l2cap_chan_unlock(chan);
4263
4264 chan->ops->close(chan);
4265 l2cap_chan_put(chan);
4266
4267 mutex_unlock(&conn->chan_lock);
4268
4269 return 0;
4270 }
4271
4272 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4274 u8 *data)
4275 {
4276 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4277 u16 dcid, scid;
4278 struct l2cap_chan *chan;
4279
4280 if (cmd_len != sizeof(*rsp))
4281 return -EPROTO;
4282
4283 scid = __le16_to_cpu(rsp->scid);
4284 dcid = __le16_to_cpu(rsp->dcid);
4285
4286 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4287
4288 mutex_lock(&conn->chan_lock);
4289
4290 chan = __l2cap_get_chan_by_scid(conn, scid);
4291 if (!chan) {
4292 mutex_unlock(&conn->chan_lock);
4293 return 0;
4294 }
4295
4296 l2cap_chan_lock(chan);
4297
4298 l2cap_chan_hold(chan);
4299 l2cap_chan_del(chan, 0);
4300
4301 l2cap_chan_unlock(chan);
4302
4303 chan->ops->close(chan);
4304 l2cap_chan_put(chan);
4305
4306 mutex_unlock(&conn->chan_lock);
4307
4308 return 0;
4309 }
4310
4311 static inline int l2cap_information_req(struct l2cap_conn *conn,
4312 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4313 u8 *data)
4314 {
4315 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4316 u16 type;
4317
4318 if (cmd_len != sizeof(*req))
4319 return -EPROTO;
4320
4321 type = __le16_to_cpu(req->type);
4322
4323 BT_DBG("type 0x%4.4x", type);
4324
4325 if (type == L2CAP_IT_FEAT_MASK) {
4326 u8 buf[8];
4327 u32 feat_mask = l2cap_feat_mask;
4328 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4329 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4330 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4331 if (!disable_ertm)
4332 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4333 | L2CAP_FEAT_FCS;
4334 if (conn->hs_enabled)
4335 feat_mask |= L2CAP_FEAT_EXT_FLOW
4336 | L2CAP_FEAT_EXT_WINDOW;
4337
4338 put_unaligned_le32(feat_mask, rsp->data);
4339 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4340 buf);
4341 } else if (type == L2CAP_IT_FIXED_CHAN) {
4342 u8 buf[12];
4343 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4344
4345 if (conn->hs_enabled)
4346 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4347 else
4348 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4349
4350 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4351 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4352 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4353 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4354 buf);
4355 } else {
4356 struct l2cap_info_rsp rsp;
4357 rsp.type = cpu_to_le16(type);
4358 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4359 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4360 &rsp);
4361 }
4362
4363 return 0;
4364 }
4365
4366 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4367 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4368 u8 *data)
4369 {
4370 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4371 u16 type, result;
4372
4373 if (cmd_len < sizeof(*rsp))
4374 return -EPROTO;
4375
4376 type = __le16_to_cpu(rsp->type);
4377 result = __le16_to_cpu(rsp->result);
4378
4379 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4380
4381 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4382 if (cmd->ident != conn->info_ident ||
4383 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4384 return 0;
4385
4386 cancel_delayed_work(&conn->info_timer);
4387
4388 if (result != L2CAP_IR_SUCCESS) {
4389 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4390 conn->info_ident = 0;
4391
4392 l2cap_conn_start(conn);
4393
4394 return 0;
4395 }
4396
4397 switch (type) {
4398 case L2CAP_IT_FEAT_MASK:
4399 conn->feat_mask = get_unaligned_le32(rsp->data);
4400
4401 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4402 struct l2cap_info_req req;
4403 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4404
4405 conn->info_ident = l2cap_get_ident(conn);
4406
4407 l2cap_send_cmd(conn, conn->info_ident,
4408 L2CAP_INFO_REQ, sizeof(req), &req);
4409 } else {
4410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4411 conn->info_ident = 0;
4412
4413 l2cap_conn_start(conn);
4414 }
4415 break;
4416
4417 case L2CAP_IT_FIXED_CHAN:
4418 conn->fixed_chan_mask = rsp->data[0];
4419 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4420 conn->info_ident = 0;
4421
4422 l2cap_conn_start(conn);
4423 break;
4424 }
4425
4426 return 0;
4427 }
4428
4429 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4430 struct l2cap_cmd_hdr *cmd,
4431 u16 cmd_len, void *data)
4432 {
4433 struct l2cap_create_chan_req *req = data;
4434 struct l2cap_create_chan_rsp rsp;
4435 struct l2cap_chan *chan;
4436 struct hci_dev *hdev;
4437 u16 psm, scid;
4438
4439 if (cmd_len != sizeof(*req))
4440 return -EPROTO;
4441
4442 if (!conn->hs_enabled)
4443 return -EINVAL;
4444
4445 psm = le16_to_cpu(req->psm);
4446 scid = le16_to_cpu(req->scid);
4447
4448 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4449
4450 /* For controller id 0 make BR/EDR connection */
4451 if (req->amp_id == AMP_ID_BREDR) {
4452 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4453 req->amp_id);
4454 return 0;
4455 }
4456
4457 /* Validate AMP controller id */
4458 hdev = hci_dev_get(req->amp_id);
4459 if (!hdev)
4460 goto error;
4461
4462 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4463 hci_dev_put(hdev);
4464 goto error;
4465 }
4466
4467 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4468 req->amp_id);
4469 if (chan) {
4470 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4471 struct hci_conn *hs_hcon;
4472
4473 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4474 &conn->hcon->dst);
4475 if (!hs_hcon) {
4476 hci_dev_put(hdev);
4477 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4478 chan->dcid);
4479 return 0;
4480 }
4481
4482 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4483
4484 mgr->bredr_chan = chan;
4485 chan->hs_hcon = hs_hcon;
4486 chan->fcs = L2CAP_FCS_NONE;
4487 conn->mtu = hdev->block_mtu;
4488 }
4489
4490 hci_dev_put(hdev);
4491
4492 return 0;
4493
4494 error:
4495 rsp.dcid = 0;
4496 rsp.scid = cpu_to_le16(scid);
4497 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4498 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4499
4500 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4501 sizeof(rsp), &rsp);
4502
4503 return 0;
4504 }
4505
4506 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4507 {
4508 struct l2cap_move_chan_req req;
4509 u8 ident;
4510
4511 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4512
4513 ident = l2cap_get_ident(chan->conn);
4514 chan->ident = ident;
4515
4516 req.icid = cpu_to_le16(chan->scid);
4517 req.dest_amp_id = dest_amp_id;
4518
4519 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4520 &req);
4521
4522 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4523 }
4524
4525 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4526 {
4527 struct l2cap_move_chan_rsp rsp;
4528
4529 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4530
4531 rsp.icid = cpu_to_le16(chan->dcid);
4532 rsp.result = cpu_to_le16(result);
4533
4534 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4535 sizeof(rsp), &rsp);
4536 }
4537
4538 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4539 {
4540 struct l2cap_move_chan_cfm cfm;
4541
4542 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4543
4544 chan->ident = l2cap_get_ident(chan->conn);
4545
4546 cfm.icid = cpu_to_le16(chan->scid);
4547 cfm.result = cpu_to_le16(result);
4548
4549 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4550 sizeof(cfm), &cfm);
4551
4552 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4553 }
4554
4555 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4556 {
4557 struct l2cap_move_chan_cfm cfm;
4558
4559 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4560
4561 cfm.icid = cpu_to_le16(icid);
4562 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4563
4564 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4565 sizeof(cfm), &cfm);
4566 }
4567
4568 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4569 u16 icid)
4570 {
4571 struct l2cap_move_chan_cfm_rsp rsp;
4572
4573 BT_DBG("icid 0x%4.4x", icid);
4574
4575 rsp.icid = cpu_to_le16(icid);
4576 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4577 }
4578
4579 static void __release_logical_link(struct l2cap_chan *chan)
4580 {
4581 chan->hs_hchan = NULL;
4582 chan->hs_hcon = NULL;
4583
4584 /* Placeholder - release the logical link */
4585 }
4586
4587 static void l2cap_logical_fail(struct l2cap_chan *chan)
4588 {
4589 /* Logical link setup failed */
4590 if (chan->state != BT_CONNECTED) {
4591 /* Create channel failure, disconnect */
4592 l2cap_send_disconn_req(chan, ECONNRESET);
4593 return;
4594 }
4595
4596 switch (chan->move_role) {
4597 case L2CAP_MOVE_ROLE_RESPONDER:
4598 l2cap_move_done(chan);
4599 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4600 break;
4601 case L2CAP_MOVE_ROLE_INITIATOR:
4602 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4603 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4604 /* Remote has only sent pending or
4605 * success responses, clean up
4606 */
4607 l2cap_move_done(chan);
4608 }
4609
4610 /* Other amp move states imply that the move
4611 * has already aborted
4612 */
4613 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4614 break;
4615 }
4616 }
4617
4618 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4619 struct hci_chan *hchan)
4620 {
4621 struct l2cap_conf_rsp rsp;
4622
4623 chan->hs_hchan = hchan;
4624 chan->hs_hcon->l2cap_data = chan->conn;
4625
4626 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4627
4628 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4629 int err;
4630
4631 set_default_fcs(chan);
4632
4633 err = l2cap_ertm_init(chan);
4634 if (err < 0)
4635 l2cap_send_disconn_req(chan, -err);
4636 else
4637 l2cap_chan_ready(chan);
4638 }
4639 }
4640
4641 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4642 struct hci_chan *hchan)
4643 {
4644 chan->hs_hcon = hchan->conn;
4645 chan->hs_hcon->l2cap_data = chan->conn;
4646
4647 BT_DBG("move_state %d", chan->move_state);
4648
4649 switch (chan->move_state) {
4650 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4651 /* Move confirm will be sent after a success
4652 * response is received
4653 */
4654 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4655 break;
4656 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4657 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4658 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4659 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4660 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4661 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4662 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4663 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4664 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4665 }
4666 break;
4667 default:
4668 /* Move was not in expected state, free the channel */
4669 __release_logical_link(chan);
4670
4671 chan->move_state = L2CAP_MOVE_STABLE;
4672 }
4673 }
4674
4675 /* Call with chan locked */
4676 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4677 u8 status)
4678 {
4679 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4680
4681 if (status) {
4682 l2cap_logical_fail(chan);
4683 __release_logical_link(chan);
4684 return;
4685 }
4686
4687 if (chan->state != BT_CONNECTED) {
4688 /* Ignore logical link if channel is on BR/EDR */
4689 if (chan->local_amp_id != AMP_ID_BREDR)
4690 l2cap_logical_finish_create(chan, hchan);
4691 } else {
4692 l2cap_logical_finish_move(chan, hchan);
4693 }
4694 }
4695
4696 void l2cap_move_start(struct l2cap_chan *chan)
4697 {
4698 BT_DBG("chan %p", chan);
4699
4700 if (chan->local_amp_id == AMP_ID_BREDR) {
4701 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4702 return;
4703 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4704 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4705 /* Placeholder - start physical link setup */
4706 } else {
4707 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4708 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4709 chan->move_id = 0;
4710 l2cap_move_setup(chan);
4711 l2cap_send_move_chan_req(chan, 0);
4712 }
4713 }
4714
4715 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4716 u8 local_amp_id, u8 remote_amp_id)
4717 {
4718 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4719 local_amp_id, remote_amp_id);
4720
4721 chan->fcs = L2CAP_FCS_NONE;
4722
4723 /* Outgoing channel on AMP */
4724 if (chan->state == BT_CONNECT) {
4725 if (result == L2CAP_CR_SUCCESS) {
4726 chan->local_amp_id = local_amp_id;
4727 l2cap_send_create_chan_req(chan, remote_amp_id);
4728 } else {
4729 /* Revert to BR/EDR connect */
4730 l2cap_send_conn_req(chan);
4731 }
4732
4733 return;
4734 }
4735
4736 /* Incoming channel on AMP */
4737 if (__l2cap_no_conn_pending(chan)) {
4738 struct l2cap_conn_rsp rsp;
4739 char buf[128];
4740 rsp.scid = cpu_to_le16(chan->dcid);
4741 rsp.dcid = cpu_to_le16(chan->scid);
4742
4743 if (result == L2CAP_CR_SUCCESS) {
4744 /* Send successful response */
4745 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4746 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4747 } else {
4748 /* Send negative response */
4749 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4750 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4751 }
4752
4753 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4754 sizeof(rsp), &rsp);
4755
4756 if (result == L2CAP_CR_SUCCESS) {
4757 l2cap_state_change(chan, BT_CONFIG);
4758 set_bit(CONF_REQ_SENT, &chan->conf_state);
4759 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4760 L2CAP_CONF_REQ,
4761 l2cap_build_conf_req(chan, buf), buf);
4762 chan->num_conf_req++;
4763 }
4764 }
4765 }
4766
4767 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4768 u8 remote_amp_id)
4769 {
4770 l2cap_move_setup(chan);
4771 chan->move_id = local_amp_id;
4772 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4773
4774 l2cap_send_move_chan_req(chan, remote_amp_id);
4775 }
4776
4777 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4778 {
4779 struct hci_chan *hchan = NULL;
4780
4781 /* Placeholder - get hci_chan for logical link */
4782
4783 if (hchan) {
4784 if (hchan->state == BT_CONNECTED) {
4785 /* Logical link is ready to go */
4786 chan->hs_hcon = hchan->conn;
4787 chan->hs_hcon->l2cap_data = chan->conn;
4788 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4789 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4790
4791 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4792 } else {
4793 /* Wait for logical link to be ready */
4794 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4795 }
4796 } else {
4797 /* Logical link not available */
4798 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4799 }
4800 }
4801
4802 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4803 {
4804 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4805 u8 rsp_result;
4806 if (result == -EINVAL)
4807 rsp_result = L2CAP_MR_BAD_ID;
4808 else
4809 rsp_result = L2CAP_MR_NOT_ALLOWED;
4810
4811 l2cap_send_move_chan_rsp(chan, rsp_result);
4812 }
4813
4814 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4815 chan->move_state = L2CAP_MOVE_STABLE;
4816
4817 /* Restart data transmission */
4818 l2cap_ertm_send(chan);
4819 }
4820
4821 /* Invoke with locked chan */
4822 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4823 {
4824 u8 local_amp_id = chan->local_amp_id;
4825 u8 remote_amp_id = chan->remote_amp_id;
4826
4827 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4828 chan, result, local_amp_id, remote_amp_id);
4829
4830 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4831 l2cap_chan_unlock(chan);
4832 return;
4833 }
4834
4835 if (chan->state != BT_CONNECTED) {
4836 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4837 } else if (result != L2CAP_MR_SUCCESS) {
4838 l2cap_do_move_cancel(chan, result);
4839 } else {
4840 switch (chan->move_role) {
4841 case L2CAP_MOVE_ROLE_INITIATOR:
4842 l2cap_do_move_initiate(chan, local_amp_id,
4843 remote_amp_id);
4844 break;
4845 case L2CAP_MOVE_ROLE_RESPONDER:
4846 l2cap_do_move_respond(chan, result);
4847 break;
4848 default:
4849 l2cap_do_move_cancel(chan, result);
4850 break;
4851 }
4852 }
4853 }
4854
4855 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4856 struct l2cap_cmd_hdr *cmd,
4857 u16 cmd_len, void *data)
4858 {
4859 struct l2cap_move_chan_req *req = data;
4860 struct l2cap_move_chan_rsp rsp;
4861 struct l2cap_chan *chan;
4862 u16 icid = 0;
4863 u16 result = L2CAP_MR_NOT_ALLOWED;
4864
4865 if (cmd_len != sizeof(*req))
4866 return -EPROTO;
4867
4868 icid = le16_to_cpu(req->icid);
4869
4870 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4871
4872 if (!conn->hs_enabled)
4873 return -EINVAL;
4874
4875 chan = l2cap_get_chan_by_dcid(conn, icid);
4876 if (!chan) {
4877 rsp.icid = cpu_to_le16(icid);
4878 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4879 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4880 sizeof(rsp), &rsp);
4881 return 0;
4882 }
4883
4884 chan->ident = cmd->ident;
4885
4886 if (chan->scid < L2CAP_CID_DYN_START ||
4887 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4888 (chan->mode != L2CAP_MODE_ERTM &&
4889 chan->mode != L2CAP_MODE_STREAMING)) {
4890 result = L2CAP_MR_NOT_ALLOWED;
4891 goto send_move_response;
4892 }
4893
4894 if (chan->local_amp_id == req->dest_amp_id) {
4895 result = L2CAP_MR_SAME_ID;
4896 goto send_move_response;
4897 }
4898
4899 if (req->dest_amp_id != AMP_ID_BREDR) {
4900 struct hci_dev *hdev;
4901 hdev = hci_dev_get(req->dest_amp_id);
4902 if (!hdev || hdev->dev_type != HCI_AMP ||
4903 !test_bit(HCI_UP, &hdev->flags)) {
4904 if (hdev)
4905 hci_dev_put(hdev);
4906
4907 result = L2CAP_MR_BAD_ID;
4908 goto send_move_response;
4909 }
4910 hci_dev_put(hdev);
4911 }
4912
4913 /* Detect a move collision. Only send a collision response
4914 * if this side has "lost", otherwise proceed with the move.
4915 * The winner has the larger bd_addr.
4916 */
4917 if ((__chan_is_moving(chan) ||
4918 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4919 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4920 result = L2CAP_MR_COLLISION;
4921 goto send_move_response;
4922 }
4923
4924 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4925 l2cap_move_setup(chan);
4926 chan->move_id = req->dest_amp_id;
4927 icid = chan->dcid;
4928
4929 if (req->dest_amp_id == AMP_ID_BREDR) {
4930 /* Moving to BR/EDR */
4931 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4932 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4933 result = L2CAP_MR_PEND;
4934 } else {
4935 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4936 result = L2CAP_MR_SUCCESS;
4937 }
4938 } else {
4939 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4940 /* Placeholder - uncomment when amp functions are available */
4941 /*amp_accept_physical(chan, req->dest_amp_id);*/
4942 result = L2CAP_MR_PEND;
4943 }
4944
4945 send_move_response:
4946 l2cap_send_move_chan_rsp(chan, result);
4947
4948 l2cap_chan_unlock(chan);
4949
4950 return 0;
4951 }
4952
4953 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4954 {
4955 struct l2cap_chan *chan;
4956 struct hci_chan *hchan = NULL;
4957
4958 chan = l2cap_get_chan_by_scid(conn, icid);
4959 if (!chan) {
4960 l2cap_send_move_chan_cfm_icid(conn, icid);
4961 return;
4962 }
4963
4964 __clear_chan_timer(chan);
4965 if (result == L2CAP_MR_PEND)
4966 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4967
4968 switch (chan->move_state) {
4969 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4970 /* Move confirm will be sent when logical link
4971 * is complete.
4972 */
4973 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4974 break;
4975 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4976 if (result == L2CAP_MR_PEND) {
4977 break;
4978 } else if (test_bit(CONN_LOCAL_BUSY,
4979 &chan->conn_state)) {
4980 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4981 } else {
4982 /* Logical link is up or moving to BR/EDR,
4983 * proceed with move
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4986 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4987 }
4988 break;
4989 case L2CAP_MOVE_WAIT_RSP:
4990 /* Moving to AMP */
4991 if (result == L2CAP_MR_SUCCESS) {
4992 /* Remote is ready, send confirm immediately
4993 * after logical link is ready
4994 */
4995 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4996 } else {
4997 /* Both logical link and move success
4998 * are required to confirm
4999 */
5000 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5001 }
5002
5003 /* Placeholder - get hci_chan for logical link */
5004 if (!hchan) {
5005 /* Logical link not available */
5006 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5007 break;
5008 }
5009
5010 /* If the logical link is not yet connected, do not
5011 * send confirmation.
5012 */
5013 if (hchan->state != BT_CONNECTED)
5014 break;
5015
5016 /* Logical link is already ready to go */
5017
5018 chan->hs_hcon = hchan->conn;
5019 chan->hs_hcon->l2cap_data = chan->conn;
5020
5021 if (result == L2CAP_MR_SUCCESS) {
5022 /* Can confirm now */
5023 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5024 } else {
5025 /* Now only need move success
5026 * to confirm
5027 */
5028 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5029 }
5030
5031 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5032 break;
5033 default:
5034 /* Any other amp move state means the move failed. */
5035 chan->move_id = chan->local_amp_id;
5036 l2cap_move_done(chan);
5037 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5038 }
5039
5040 l2cap_chan_unlock(chan);
5041 }
5042
5043 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5044 u16 result)
5045 {
5046 struct l2cap_chan *chan;
5047
5048 chan = l2cap_get_chan_by_ident(conn, ident);
5049 if (!chan) {
5050 /* Could not locate channel, icid is best guess */
5051 l2cap_send_move_chan_cfm_icid(conn, icid);
5052 return;
5053 }
5054
5055 __clear_chan_timer(chan);
5056
5057 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5058 if (result == L2CAP_MR_COLLISION) {
5059 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5060 } else {
5061 /* Cleanup - cancel move */
5062 chan->move_id = chan->local_amp_id;
5063 l2cap_move_done(chan);
5064 }
5065 }
5066
5067 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5068
5069 l2cap_chan_unlock(chan);
5070 }
5071
5072 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5073 struct l2cap_cmd_hdr *cmd,
5074 u16 cmd_len, void *data)
5075 {
5076 struct l2cap_move_chan_rsp *rsp = data;
5077 u16 icid, result;
5078
5079 if (cmd_len != sizeof(*rsp))
5080 return -EPROTO;
5081
5082 icid = le16_to_cpu(rsp->icid);
5083 result = le16_to_cpu(rsp->result);
5084
5085 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5086
5087 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5088 l2cap_move_continue(conn, icid, result);
5089 else
5090 l2cap_move_fail(conn, cmd->ident, icid, result);
5091
5092 return 0;
5093 }
5094
5095 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5096 struct l2cap_cmd_hdr *cmd,
5097 u16 cmd_len, void *data)
5098 {
5099 struct l2cap_move_chan_cfm *cfm = data;
5100 struct l2cap_chan *chan;
5101 u16 icid, result;
5102
5103 if (cmd_len != sizeof(*cfm))
5104 return -EPROTO;
5105
5106 icid = le16_to_cpu(cfm->icid);
5107 result = le16_to_cpu(cfm->result);
5108
5109 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5110
5111 chan = l2cap_get_chan_by_dcid(conn, icid);
5112 if (!chan) {
5113 /* Spec requires a response even if the icid was not found */
5114 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5115 return 0;
5116 }
5117
5118 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5119 if (result == L2CAP_MC_CONFIRMED) {
5120 chan->local_amp_id = chan->move_id;
5121 if (chan->local_amp_id == AMP_ID_BREDR)
5122 __release_logical_link(chan);
5123 } else {
5124 chan->move_id = chan->local_amp_id;
5125 }
5126
5127 l2cap_move_done(chan);
5128 }
5129
5130 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5131
5132 l2cap_chan_unlock(chan);
5133
5134 return 0;
5135 }
5136
5137 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5138 struct l2cap_cmd_hdr *cmd,
5139 u16 cmd_len, void *data)
5140 {
5141 struct l2cap_move_chan_cfm_rsp *rsp = data;
5142 struct l2cap_chan *chan;
5143 u16 icid;
5144
5145 if (cmd_len != sizeof(*rsp))
5146 return -EPROTO;
5147
5148 icid = le16_to_cpu(rsp->icid);
5149
5150 BT_DBG("icid 0x%4.4x", icid);
5151
5152 chan = l2cap_get_chan_by_scid(conn, icid);
5153 if (!chan)
5154 return 0;
5155
5156 __clear_chan_timer(chan);
5157
5158 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5159 chan->local_amp_id = chan->move_id;
5160
5161 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5162 __release_logical_link(chan);
5163
5164 l2cap_move_done(chan);
5165 }
5166
5167 l2cap_chan_unlock(chan);
5168
5169 return 0;
5170 }
5171
5172 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5173 u16 to_multiplier)
5174 {
5175 u16 max_latency;
5176
5177 if (min > max || min < 6 || max > 3200)
5178 return -EINVAL;
5179
5180 if (to_multiplier < 10 || to_multiplier > 3200)
5181 return -EINVAL;
5182
5183 if (max >= to_multiplier * 8)
5184 return -EINVAL;
5185
5186 max_latency = (to_multiplier * 8 / max) - 1;
5187 if (latency > 499 || latency > max_latency)
5188 return -EINVAL;
5189
5190 return 0;
5191 }
5192
5193 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5194 struct l2cap_cmd_hdr *cmd,
5195 u16 cmd_len, u8 *data)
5196 {
5197 struct hci_conn *hcon = conn->hcon;
5198 struct l2cap_conn_param_update_req *req;
5199 struct l2cap_conn_param_update_rsp rsp;
5200 u16 min, max, latency, to_multiplier;
5201 int err;
5202
5203 if (!(hcon->link_mode & HCI_LM_MASTER))
5204 return -EINVAL;
5205
5206 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5207 return -EPROTO;
5208
5209 req = (struct l2cap_conn_param_update_req *) data;
5210 min = __le16_to_cpu(req->min);
5211 max = __le16_to_cpu(req->max);
5212 latency = __le16_to_cpu(req->latency);
5213 to_multiplier = __le16_to_cpu(req->to_multiplier);
5214
5215 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5216 min, max, latency, to_multiplier);
5217
5218 memset(&rsp, 0, sizeof(rsp));
5219
5220 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5221 if (err)
5222 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5223 else
5224 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5225
5226 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5227 sizeof(rsp), &rsp);
5228
5229 if (!err)
5230 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5231
5232 return 0;
5233 }
5234
5235 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5236 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5237 u8 *data)
5238 {
5239 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5240 u16 dcid, mtu, mps, credits, result;
5241 struct l2cap_chan *chan;
5242 int err;
5243
5244 if (cmd_len < sizeof(*rsp))
5245 return -EPROTO;
5246
5247 dcid = __le16_to_cpu(rsp->dcid);
5248 mtu = __le16_to_cpu(rsp->mtu);
5249 mps = __le16_to_cpu(rsp->mps);
5250 credits = __le16_to_cpu(rsp->credits);
5251 result = __le16_to_cpu(rsp->result);
5252
5253 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5254 return -EPROTO;
5255
5256 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5257 dcid, mtu, mps, credits, result);
5258
5259 mutex_lock(&conn->chan_lock);
5260
5261 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5262 if (!chan) {
5263 err = -EBADSLT;
5264 goto unlock;
5265 }
5266
5267 err = 0;
5268
5269 l2cap_chan_lock(chan);
5270
5271 switch (result) {
5272 case L2CAP_CR_SUCCESS:
5273 chan->ident = 0;
5274 chan->dcid = dcid;
5275 chan->omtu = mtu;
5276 chan->remote_mps = mps;
5277 chan->tx_credits = credits;
5278 l2cap_chan_ready(chan);
5279 break;
5280
5281 default:
5282 l2cap_chan_del(chan, ECONNREFUSED);
5283 break;
5284 }
5285
5286 l2cap_chan_unlock(chan);
5287
5288 unlock:
5289 mutex_unlock(&conn->chan_lock);
5290
5291 return err;
5292 }
5293
5294 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5295 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5296 u8 *data)
5297 {
5298 int err = 0;
5299
5300 switch (cmd->code) {
5301 case L2CAP_COMMAND_REJ:
5302 l2cap_command_rej(conn, cmd, cmd_len, data);
5303 break;
5304
5305 case L2CAP_CONN_REQ:
5306 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5307 break;
5308
5309 case L2CAP_CONN_RSP:
5310 case L2CAP_CREATE_CHAN_RSP:
5311 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5312 break;
5313
5314 case L2CAP_CONF_REQ:
5315 err = l2cap_config_req(conn, cmd, cmd_len, data);
5316 break;
5317
5318 case L2CAP_CONF_RSP:
5319 l2cap_config_rsp(conn, cmd, cmd_len, data);
5320 break;
5321
5322 case L2CAP_DISCONN_REQ:
5323 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5324 break;
5325
5326 case L2CAP_DISCONN_RSP:
5327 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_ECHO_REQ:
5331 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5332 break;
5333
5334 case L2CAP_ECHO_RSP:
5335 break;
5336
5337 case L2CAP_INFO_REQ:
5338 err = l2cap_information_req(conn, cmd, cmd_len, data);
5339 break;
5340
5341 case L2CAP_INFO_RSP:
5342 l2cap_information_rsp(conn, cmd, cmd_len, data);
5343 break;
5344
5345 case L2CAP_CREATE_CHAN_REQ:
5346 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5347 break;
5348
5349 case L2CAP_MOVE_CHAN_REQ:
5350 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5351 break;
5352
5353 case L2CAP_MOVE_CHAN_RSP:
5354 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5355 break;
5356
5357 case L2CAP_MOVE_CHAN_CFM:
5358 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5359 break;
5360
5361 case L2CAP_MOVE_CHAN_CFM_RSP:
5362 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5363 break;
5364
5365 default:
5366 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5367 err = -EINVAL;
5368 break;
5369 }
5370
5371 return err;
5372 }
5373
5374 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5375 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5376 u8 *data)
5377 {
5378 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5379 struct l2cap_le_conn_rsp rsp;
5380 struct l2cap_chan *chan, *pchan;
5381 u16 dcid, scid, credits, mtu, mps;
5382 __le16 psm;
5383 u8 result;
5384
5385 if (cmd_len != sizeof(*req))
5386 return -EPROTO;
5387
5388 scid = __le16_to_cpu(req->scid);
5389 mtu = __le16_to_cpu(req->mtu);
5390 mps = __le16_to_cpu(req->mps);
5391 psm = req->psm;
5392 dcid = 0;
5393 credits = 0;
5394
5395 if (mtu < 23 || mps < 23)
5396 return -EPROTO;
5397
5398 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5399 scid, mtu, mps);
5400
5401 /* Check if we have socket listening on psm */
5402 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5403 &conn->hcon->dst, LE_LINK);
5404 if (!pchan) {
5405 result = L2CAP_CR_BAD_PSM;
5406 chan = NULL;
5407 goto response;
5408 }
5409
5410 mutex_lock(&conn->chan_lock);
5411 l2cap_chan_lock(pchan);
5412
5413 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5414 result = L2CAP_CR_AUTHENTICATION;
5415 chan = NULL;
5416 goto response_unlock;
5417 }
5418
5419 /* Check if we already have channel with that dcid */
5420 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5421 result = L2CAP_CR_NO_MEM;
5422 chan = NULL;
5423 goto response_unlock;
5424 }
5425
5426 chan = pchan->ops->new_connection(pchan);
5427 if (!chan) {
5428 result = L2CAP_CR_NO_MEM;
5429 goto response_unlock;
5430 }
5431
5432 l2cap_le_flowctl_init(chan);
5433
5434 bacpy(&chan->src, &conn->hcon->src);
5435 bacpy(&chan->dst, &conn->hcon->dst);
5436 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5437 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5438 chan->psm = psm;
5439 chan->dcid = scid;
5440 chan->omtu = mtu;
5441 chan->remote_mps = mps;
5442 chan->tx_credits = __le16_to_cpu(req->credits);
5443
5444 __l2cap_chan_add(conn, chan);
5445 dcid = chan->scid;
5446 credits = chan->rx_credits;
5447
5448 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5449
5450 chan->ident = cmd->ident;
5451
5452 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5453 l2cap_state_change(chan, BT_CONNECT2);
5454 result = L2CAP_CR_PEND;
5455 chan->ops->defer(chan);
5456 } else {
5457 l2cap_chan_ready(chan);
5458 result = L2CAP_CR_SUCCESS;
5459 }
5460
5461 response_unlock:
5462 l2cap_chan_unlock(pchan);
5463 mutex_unlock(&conn->chan_lock);
5464
5465 if (result == L2CAP_CR_PEND)
5466 return 0;
5467
5468 response:
5469 if (chan) {
5470 rsp.mtu = cpu_to_le16(chan->imtu);
5471 rsp.mps = cpu_to_le16(chan->mps);
5472 } else {
5473 rsp.mtu = 0;
5474 rsp.mps = 0;
5475 }
5476
5477 rsp.dcid = cpu_to_le16(dcid);
5478 rsp.credits = cpu_to_le16(credits);
5479 rsp.result = cpu_to_le16(result);
5480
5481 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5482
5483 return 0;
5484 }
5485
5486 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5487 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5488 u8 *data)
5489 {
5490 struct l2cap_le_credits *pkt;
5491 struct l2cap_chan *chan;
5492 u16 cid, credits, max_credits;
5493
5494 if (cmd_len != sizeof(*pkt))
5495 return -EPROTO;
5496
5497 pkt = (struct l2cap_le_credits *) data;
5498 cid = __le16_to_cpu(pkt->cid);
5499 credits = __le16_to_cpu(pkt->credits);
5500
5501 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5502
5503 chan = l2cap_get_chan_by_dcid(conn, cid);
5504 if (!chan)
5505 return -EBADSLT;
5506
5507 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5508 if (credits > max_credits) {
5509 BT_ERR("LE credits overflow");
5510 l2cap_send_disconn_req(chan, ECONNRESET);
5511
5512 /* Return 0 so that we don't trigger an unnecessary
5513 * command reject packet.
5514 */
5515 return 0;
5516 }
5517
5518 chan->tx_credits += credits;
5519
5520 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5521 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5522 chan->tx_credits--;
5523 }
5524
5525 if (chan->tx_credits)
5526 chan->ops->resume(chan);
5527
5528 l2cap_chan_unlock(chan);
5529
5530 return 0;
5531 }
5532
5533 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5534 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5535 u8 *data)
5536 {
5537 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5538 struct l2cap_chan *chan;
5539
5540 if (cmd_len < sizeof(*rej))
5541 return -EPROTO;
5542
5543 mutex_lock(&conn->chan_lock);
5544
5545 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5546 if (!chan)
5547 goto done;
5548
5549 l2cap_chan_lock(chan);
5550 l2cap_chan_del(chan, ECONNREFUSED);
5551 l2cap_chan_unlock(chan);
5552
5553 done:
5554 mutex_unlock(&conn->chan_lock);
5555 return 0;
5556 }
5557
5558 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5559 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5560 u8 *data)
5561 {
5562 int err = 0;
5563
5564 switch (cmd->code) {
5565 case L2CAP_COMMAND_REJ:
5566 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5567 break;
5568
5569 case L2CAP_CONN_PARAM_UPDATE_REQ:
5570 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5571 break;
5572
5573 case L2CAP_CONN_PARAM_UPDATE_RSP:
5574 break;
5575
5576 case L2CAP_LE_CONN_RSP:
5577 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5578 break;
5579
5580 case L2CAP_LE_CONN_REQ:
5581 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5582 break;
5583
5584 case L2CAP_LE_CREDITS:
5585 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5586 break;
5587
5588 case L2CAP_DISCONN_REQ:
5589 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5590 break;
5591
5592 case L2CAP_DISCONN_RSP:
5593 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5594 break;
5595
5596 default:
5597 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5598 err = -EINVAL;
5599 break;
5600 }
5601
5602 return err;
5603 }
5604
5605 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5606 struct sk_buff *skb)
5607 {
5608 struct hci_conn *hcon = conn->hcon;
5609 struct l2cap_cmd_hdr *cmd;
5610 u16 len;
5611 int err;
5612
5613 if (hcon->type != LE_LINK)
5614 goto drop;
5615
5616 if (skb->len < L2CAP_CMD_HDR_SIZE)
5617 goto drop;
5618
5619 cmd = (void *) skb->data;
5620 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5621
5622 len = le16_to_cpu(cmd->len);
5623
5624 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5625
5626 if (len != skb->len || !cmd->ident) {
5627 BT_DBG("corrupted command");
5628 goto drop;
5629 }
5630
5631 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5632 if (err) {
5633 struct l2cap_cmd_rej_unk rej;
5634
5635 BT_ERR("Wrong link type (%d)", err);
5636
5637 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5638 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5639 sizeof(rej), &rej);
5640 }
5641
5642 drop:
5643 kfree_skb(skb);
5644 }
5645
5646 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5647 struct sk_buff *skb)
5648 {
5649 struct hci_conn *hcon = conn->hcon;
5650 u8 *data = skb->data;
5651 int len = skb->len;
5652 struct l2cap_cmd_hdr cmd;
5653 int err;
5654
5655 l2cap_raw_recv(conn, skb);
5656
5657 if (hcon->type != ACL_LINK)
5658 goto drop;
5659
5660 while (len >= L2CAP_CMD_HDR_SIZE) {
5661 u16 cmd_len;
5662 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5663 data += L2CAP_CMD_HDR_SIZE;
5664 len -= L2CAP_CMD_HDR_SIZE;
5665
5666 cmd_len = le16_to_cpu(cmd.len);
5667
5668 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5669 cmd.ident);
5670
5671 if (cmd_len > len || !cmd.ident) {
5672 BT_DBG("corrupted command");
5673 break;
5674 }
5675
5676 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5677 if (err) {
5678 struct l2cap_cmd_rej_unk rej;
5679
5680 BT_ERR("Wrong link type (%d)", err);
5681
5682 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5683 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5684 sizeof(rej), &rej);
5685 }
5686
5687 data += cmd_len;
5688 len -= cmd_len;
5689 }
5690
5691 drop:
5692 kfree_skb(skb);
5693 }
5694
5695 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5696 {
5697 u16 our_fcs, rcv_fcs;
5698 int hdr_size;
5699
5700 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5701 hdr_size = L2CAP_EXT_HDR_SIZE;
5702 else
5703 hdr_size = L2CAP_ENH_HDR_SIZE;
5704
5705 if (chan->fcs == L2CAP_FCS_CRC16) {
5706 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5707 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5708 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5709
5710 if (our_fcs != rcv_fcs)
5711 return -EBADMSG;
5712 }
5713 return 0;
5714 }
5715
5716 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5717 {
5718 struct l2cap_ctrl control;
5719
5720 BT_DBG("chan %p", chan);
5721
5722 memset(&control, 0, sizeof(control));
5723 control.sframe = 1;
5724 control.final = 1;
5725 control.reqseq = chan->buffer_seq;
5726 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5727
5728 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5729 control.super = L2CAP_SUPER_RNR;
5730 l2cap_send_sframe(chan, &control);
5731 }
5732
5733 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5734 chan->unacked_frames > 0)
5735 __set_retrans_timer(chan);
5736
5737 /* Send pending iframes */
5738 l2cap_ertm_send(chan);
5739
5740 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5741 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5742 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5743 * send it now.
5744 */
5745 control.super = L2CAP_SUPER_RR;
5746 l2cap_send_sframe(chan, &control);
5747 }
5748 }
5749
5750 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5751 struct sk_buff **last_frag)
5752 {
5753 /* skb->len reflects data in skb as well as all fragments
5754 * skb->data_len reflects only data in fragments
5755 */
5756 if (!skb_has_frag_list(skb))
5757 skb_shinfo(skb)->frag_list = new_frag;
5758
5759 new_frag->next = NULL;
5760
5761 (*last_frag)->next = new_frag;
5762 *last_frag = new_frag;
5763
5764 skb->len += new_frag->len;
5765 skb->data_len += new_frag->len;
5766 skb->truesize += new_frag->truesize;
5767 }
5768
5769 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5770 struct l2cap_ctrl *control)
5771 {
5772 int err = -EINVAL;
5773
5774 switch (control->sar) {
5775 case L2CAP_SAR_UNSEGMENTED:
5776 if (chan->sdu)
5777 break;
5778
5779 err = chan->ops->recv(chan, skb);
5780 break;
5781
5782 case L2CAP_SAR_START:
5783 if (chan->sdu)
5784 break;
5785
5786 chan->sdu_len = get_unaligned_le16(skb->data);
5787 skb_pull(skb, L2CAP_SDULEN_SIZE);
5788
5789 if (chan->sdu_len > chan->imtu) {
5790 err = -EMSGSIZE;
5791 break;
5792 }
5793
5794 if (skb->len >= chan->sdu_len)
5795 break;
5796
5797 chan->sdu = skb;
5798 chan->sdu_last_frag = skb;
5799
5800 skb = NULL;
5801 err = 0;
5802 break;
5803
5804 case L2CAP_SAR_CONTINUE:
5805 if (!chan->sdu)
5806 break;
5807
5808 append_skb_frag(chan->sdu, skb,
5809 &chan->sdu_last_frag);
5810 skb = NULL;
5811
5812 if (chan->sdu->len >= chan->sdu_len)
5813 break;
5814
5815 err = 0;
5816 break;
5817
5818 case L2CAP_SAR_END:
5819 if (!chan->sdu)
5820 break;
5821
5822 append_skb_frag(chan->sdu, skb,
5823 &chan->sdu_last_frag);
5824 skb = NULL;
5825
5826 if (chan->sdu->len != chan->sdu_len)
5827 break;
5828
5829 err = chan->ops->recv(chan, chan->sdu);
5830
5831 if (!err) {
5832 /* Reassembly complete */
5833 chan->sdu = NULL;
5834 chan->sdu_last_frag = NULL;
5835 chan->sdu_len = 0;
5836 }
5837 break;
5838 }
5839
5840 if (err) {
5841 kfree_skb(skb);
5842 kfree_skb(chan->sdu);
5843 chan->sdu = NULL;
5844 chan->sdu_last_frag = NULL;
5845 chan->sdu_len = 0;
5846 }
5847
5848 return err;
5849 }
5850
5851 static int l2cap_resegment(struct l2cap_chan *chan)
5852 {
5853 /* Placeholder */
5854 return 0;
5855 }
5856
5857 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5858 {
5859 u8 event;
5860
5861 if (chan->mode != L2CAP_MODE_ERTM)
5862 return;
5863
5864 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5865 l2cap_tx(chan, NULL, NULL, event);
5866 }
5867
5868 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5869 {
5870 int err = 0;
5871 /* Pass sequential frames to l2cap_reassemble_sdu()
5872 * until a gap is encountered.
5873 */
5874
5875 BT_DBG("chan %p", chan);
5876
5877 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5878 struct sk_buff *skb;
5879 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5880 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5881
5882 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5883
5884 if (!skb)
5885 break;
5886
5887 skb_unlink(skb, &chan->srej_q);
5888 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5889 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5890 if (err)
5891 break;
5892 }
5893
5894 if (skb_queue_empty(&chan->srej_q)) {
5895 chan->rx_state = L2CAP_RX_STATE_RECV;
5896 l2cap_send_ack(chan);
5897 }
5898
5899 return err;
5900 }
5901
5902 static void l2cap_handle_srej(struct l2cap_chan *chan,
5903 struct l2cap_ctrl *control)
5904 {
5905 struct sk_buff *skb;
5906
5907 BT_DBG("chan %p, control %p", chan, control);
5908
5909 if (control->reqseq == chan->next_tx_seq) {
5910 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5911 l2cap_send_disconn_req(chan, ECONNRESET);
5912 return;
5913 }
5914
5915 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5916
5917 if (skb == NULL) {
5918 BT_DBG("Seq %d not available for retransmission",
5919 control->reqseq);
5920 return;
5921 }
5922
5923 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5924 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5925 l2cap_send_disconn_req(chan, ECONNRESET);
5926 return;
5927 }
5928
5929 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5930
5931 if (control->poll) {
5932 l2cap_pass_to_tx(chan, control);
5933
5934 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5935 l2cap_retransmit(chan, control);
5936 l2cap_ertm_send(chan);
5937
5938 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5939 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5940 chan->srej_save_reqseq = control->reqseq;
5941 }
5942 } else {
5943 l2cap_pass_to_tx_fbit(chan, control);
5944
5945 if (control->final) {
5946 if (chan->srej_save_reqseq != control->reqseq ||
5947 !test_and_clear_bit(CONN_SREJ_ACT,
5948 &chan->conn_state))
5949 l2cap_retransmit(chan, control);
5950 } else {
5951 l2cap_retransmit(chan, control);
5952 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5953 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5954 chan->srej_save_reqseq = control->reqseq;
5955 }
5956 }
5957 }
5958 }
5959
5960 static void l2cap_handle_rej(struct l2cap_chan *chan,
5961 struct l2cap_ctrl *control)
5962 {
5963 struct sk_buff *skb;
5964
5965 BT_DBG("chan %p, control %p", chan, control);
5966
5967 if (control->reqseq == chan->next_tx_seq) {
5968 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5969 l2cap_send_disconn_req(chan, ECONNRESET);
5970 return;
5971 }
5972
5973 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5974
5975 if (chan->max_tx && skb &&
5976 bt_cb(skb)->control.retries >= chan->max_tx) {
5977 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5978 l2cap_send_disconn_req(chan, ECONNRESET);
5979 return;
5980 }
5981
5982 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5983
5984 l2cap_pass_to_tx(chan, control);
5985
5986 if (control->final) {
5987 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5988 l2cap_retransmit_all(chan, control);
5989 } else {
5990 l2cap_retransmit_all(chan, control);
5991 l2cap_ertm_send(chan);
5992 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5993 set_bit(CONN_REJ_ACT, &chan->conn_state);
5994 }
5995 }
5996
5997 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5998 {
5999 BT_DBG("chan %p, txseq %d", chan, txseq);
6000
6001 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6002 chan->expected_tx_seq);
6003
6004 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6005 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6006 chan->tx_win) {
6007 /* See notes below regarding "double poll" and
6008 * invalid packets.
6009 */
6010 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6011 BT_DBG("Invalid/Ignore - after SREJ");
6012 return L2CAP_TXSEQ_INVALID_IGNORE;
6013 } else {
6014 BT_DBG("Invalid - in window after SREJ sent");
6015 return L2CAP_TXSEQ_INVALID;
6016 }
6017 }
6018
6019 if (chan->srej_list.head == txseq) {
6020 BT_DBG("Expected SREJ");
6021 return L2CAP_TXSEQ_EXPECTED_SREJ;
6022 }
6023
6024 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6025 BT_DBG("Duplicate SREJ - txseq already stored");
6026 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6027 }
6028
6029 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6030 BT_DBG("Unexpected SREJ - not requested");
6031 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6032 }
6033 }
6034
6035 if (chan->expected_tx_seq == txseq) {
6036 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6037 chan->tx_win) {
6038 BT_DBG("Invalid - txseq outside tx window");
6039 return L2CAP_TXSEQ_INVALID;
6040 } else {
6041 BT_DBG("Expected");
6042 return L2CAP_TXSEQ_EXPECTED;
6043 }
6044 }
6045
6046 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6047 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6048 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6049 return L2CAP_TXSEQ_DUPLICATE;
6050 }
6051
6052 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6053 /* A source of invalid packets is a "double poll" condition,
6054 * where delays cause us to send multiple poll packets. If
6055 * the remote stack receives and processes both polls,
6056 * sequence numbers can wrap around in such a way that a
6057 * resent frame has a sequence number that looks like new data
6058 * with a sequence gap. This would trigger an erroneous SREJ
6059 * request.
6060 *
6061 * Fortunately, this is impossible with a tx window that's
6062 * less than half of the maximum sequence number, which allows
6063 * invalid frames to be safely ignored.
6064 *
6065 * With tx window sizes greater than half of the tx window
6066 * maximum, the frame is invalid and cannot be ignored. This
6067 * causes a disconnect.
6068 */
6069
6070 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6071 BT_DBG("Invalid/Ignore - txseq outside tx window");
6072 return L2CAP_TXSEQ_INVALID_IGNORE;
6073 } else {
6074 BT_DBG("Invalid - txseq outside tx window");
6075 return L2CAP_TXSEQ_INVALID;
6076 }
6077 } else {
6078 BT_DBG("Unexpected - txseq indicates missing frames");
6079 return L2CAP_TXSEQ_UNEXPECTED;
6080 }
6081 }
6082
6083 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6084 struct l2cap_ctrl *control,
6085 struct sk_buff *skb, u8 event)
6086 {
6087 int err = 0;
6088 bool skb_in_use = false;
6089
6090 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6091 event);
6092
6093 switch (event) {
6094 case L2CAP_EV_RECV_IFRAME:
6095 switch (l2cap_classify_txseq(chan, control->txseq)) {
6096 case L2CAP_TXSEQ_EXPECTED:
6097 l2cap_pass_to_tx(chan, control);
6098
6099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6100 BT_DBG("Busy, discarding expected seq %d",
6101 control->txseq);
6102 break;
6103 }
6104
6105 chan->expected_tx_seq = __next_seq(chan,
6106 control->txseq);
6107
6108 chan->buffer_seq = chan->expected_tx_seq;
6109 skb_in_use = true;
6110
6111 err = l2cap_reassemble_sdu(chan, skb, control);
6112 if (err)
6113 break;
6114
6115 if (control->final) {
6116 if (!test_and_clear_bit(CONN_REJ_ACT,
6117 &chan->conn_state)) {
6118 control->final = 0;
6119 l2cap_retransmit_all(chan, control);
6120 l2cap_ertm_send(chan);
6121 }
6122 }
6123
6124 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6125 l2cap_send_ack(chan);
6126 break;
6127 case L2CAP_TXSEQ_UNEXPECTED:
6128 l2cap_pass_to_tx(chan, control);
6129
6130 /* Can't issue SREJ frames in the local busy state.
6131 * Drop this frame, it will be seen as missing
6132 * when local busy is exited.
6133 */
6134 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6135 BT_DBG("Busy, discarding unexpected seq %d",
6136 control->txseq);
6137 break;
6138 }
6139
6140 /* There was a gap in the sequence, so an SREJ
6141 * must be sent for each missing frame. The
6142 * current frame is stored for later use.
6143 */
6144 skb_queue_tail(&chan->srej_q, skb);
6145 skb_in_use = true;
6146 BT_DBG("Queued %p (queue len %d)", skb,
6147 skb_queue_len(&chan->srej_q));
6148
6149 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6150 l2cap_seq_list_clear(&chan->srej_list);
6151 l2cap_send_srej(chan, control->txseq);
6152
6153 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6154 break;
6155 case L2CAP_TXSEQ_DUPLICATE:
6156 l2cap_pass_to_tx(chan, control);
6157 break;
6158 case L2CAP_TXSEQ_INVALID_IGNORE:
6159 break;
6160 case L2CAP_TXSEQ_INVALID:
6161 default:
6162 l2cap_send_disconn_req(chan, ECONNRESET);
6163 break;
6164 }
6165 break;
6166 case L2CAP_EV_RECV_RR:
6167 l2cap_pass_to_tx(chan, control);
6168 if (control->final) {
6169 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6170
6171 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6172 !__chan_is_moving(chan)) {
6173 control->final = 0;
6174 l2cap_retransmit_all(chan, control);
6175 }
6176
6177 l2cap_ertm_send(chan);
6178 } else if (control->poll) {
6179 l2cap_send_i_or_rr_or_rnr(chan);
6180 } else {
6181 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6182 &chan->conn_state) &&
6183 chan->unacked_frames)
6184 __set_retrans_timer(chan);
6185
6186 l2cap_ertm_send(chan);
6187 }
6188 break;
6189 case L2CAP_EV_RECV_RNR:
6190 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6191 l2cap_pass_to_tx(chan, control);
6192 if (control && control->poll) {
6193 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6194 l2cap_send_rr_or_rnr(chan, 0);
6195 }
6196 __clear_retrans_timer(chan);
6197 l2cap_seq_list_clear(&chan->retrans_list);
6198 break;
6199 case L2CAP_EV_RECV_REJ:
6200 l2cap_handle_rej(chan, control);
6201 break;
6202 case L2CAP_EV_RECV_SREJ:
6203 l2cap_handle_srej(chan, control);
6204 break;
6205 default:
6206 break;
6207 }
6208
6209 if (skb && !skb_in_use) {
6210 BT_DBG("Freeing %p", skb);
6211 kfree_skb(skb);
6212 }
6213
6214 return err;
6215 }
6216
6217 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6218 struct l2cap_ctrl *control,
6219 struct sk_buff *skb, u8 event)
6220 {
6221 int err = 0;
6222 u16 txseq = control->txseq;
6223 bool skb_in_use = false;
6224
6225 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6226 event);
6227
6228 switch (event) {
6229 case L2CAP_EV_RECV_IFRAME:
6230 switch (l2cap_classify_txseq(chan, txseq)) {
6231 case L2CAP_TXSEQ_EXPECTED:
6232 /* Keep frame for reassembly later */
6233 l2cap_pass_to_tx(chan, control);
6234 skb_queue_tail(&chan->srej_q, skb);
6235 skb_in_use = true;
6236 BT_DBG("Queued %p (queue len %d)", skb,
6237 skb_queue_len(&chan->srej_q));
6238
6239 chan->expected_tx_seq = __next_seq(chan, txseq);
6240 break;
6241 case L2CAP_TXSEQ_EXPECTED_SREJ:
6242 l2cap_seq_list_pop(&chan->srej_list);
6243
6244 l2cap_pass_to_tx(chan, control);
6245 skb_queue_tail(&chan->srej_q, skb);
6246 skb_in_use = true;
6247 BT_DBG("Queued %p (queue len %d)", skb,
6248 skb_queue_len(&chan->srej_q));
6249
6250 err = l2cap_rx_queued_iframes(chan);
6251 if (err)
6252 break;
6253
6254 break;
6255 case L2CAP_TXSEQ_UNEXPECTED:
6256 /* Got a frame that can't be reassembled yet.
6257 * Save it for later, and send SREJs to cover
6258 * the missing frames.
6259 */
6260 skb_queue_tail(&chan->srej_q, skb);
6261 skb_in_use = true;
6262 BT_DBG("Queued %p (queue len %d)", skb,
6263 skb_queue_len(&chan->srej_q));
6264
6265 l2cap_pass_to_tx(chan, control);
6266 l2cap_send_srej(chan, control->txseq);
6267 break;
6268 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6269 /* This frame was requested with an SREJ, but
6270 * some expected retransmitted frames are
6271 * missing. Request retransmission of missing
6272 * SREJ'd frames.
6273 */
6274 skb_queue_tail(&chan->srej_q, skb);
6275 skb_in_use = true;
6276 BT_DBG("Queued %p (queue len %d)", skb,
6277 skb_queue_len(&chan->srej_q));
6278
6279 l2cap_pass_to_tx(chan, control);
6280 l2cap_send_srej_list(chan, control->txseq);
6281 break;
6282 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6283 /* We've already queued this frame. Drop this copy. */
6284 l2cap_pass_to_tx(chan, control);
6285 break;
6286 case L2CAP_TXSEQ_DUPLICATE:
6287 /* Expecting a later sequence number, so this frame
6288 * was already received. Ignore it completely.
6289 */
6290 break;
6291 case L2CAP_TXSEQ_INVALID_IGNORE:
6292 break;
6293 case L2CAP_TXSEQ_INVALID:
6294 default:
6295 l2cap_send_disconn_req(chan, ECONNRESET);
6296 break;
6297 }
6298 break;
6299 case L2CAP_EV_RECV_RR:
6300 l2cap_pass_to_tx(chan, control);
6301 if (control->final) {
6302 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6303
6304 if (!test_and_clear_bit(CONN_REJ_ACT,
6305 &chan->conn_state)) {
6306 control->final = 0;
6307 l2cap_retransmit_all(chan, control);
6308 }
6309
6310 l2cap_ertm_send(chan);
6311 } else if (control->poll) {
6312 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6313 &chan->conn_state) &&
6314 chan->unacked_frames) {
6315 __set_retrans_timer(chan);
6316 }
6317
6318 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6319 l2cap_send_srej_tail(chan);
6320 } else {
6321 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6322 &chan->conn_state) &&
6323 chan->unacked_frames)
6324 __set_retrans_timer(chan);
6325
6326 l2cap_send_ack(chan);
6327 }
6328 break;
6329 case L2CAP_EV_RECV_RNR:
6330 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6331 l2cap_pass_to_tx(chan, control);
6332 if (control->poll) {
6333 l2cap_send_srej_tail(chan);
6334 } else {
6335 struct l2cap_ctrl rr_control;
6336 memset(&rr_control, 0, sizeof(rr_control));
6337 rr_control.sframe = 1;
6338 rr_control.super = L2CAP_SUPER_RR;
6339 rr_control.reqseq = chan->buffer_seq;
6340 l2cap_send_sframe(chan, &rr_control);
6341 }
6342
6343 break;
6344 case L2CAP_EV_RECV_REJ:
6345 l2cap_handle_rej(chan, control);
6346 break;
6347 case L2CAP_EV_RECV_SREJ:
6348 l2cap_handle_srej(chan, control);
6349 break;
6350 }
6351
6352 if (skb && !skb_in_use) {
6353 BT_DBG("Freeing %p", skb);
6354 kfree_skb(skb);
6355 }
6356
6357 return err;
6358 }
6359
6360 static int l2cap_finish_move(struct l2cap_chan *chan)
6361 {
6362 BT_DBG("chan %p", chan);
6363
6364 chan->rx_state = L2CAP_RX_STATE_RECV;
6365
6366 if (chan->hs_hcon)
6367 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6368 else
6369 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6370
6371 return l2cap_resegment(chan);
6372 }
6373
6374 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6375 struct l2cap_ctrl *control,
6376 struct sk_buff *skb, u8 event)
6377 {
6378 int err;
6379
6380 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6381 event);
6382
6383 if (!control->poll)
6384 return -EPROTO;
6385
6386 l2cap_process_reqseq(chan, control->reqseq);
6387
6388 if (!skb_queue_empty(&chan->tx_q))
6389 chan->tx_send_head = skb_peek(&chan->tx_q);
6390 else
6391 chan->tx_send_head = NULL;
6392
6393 /* Rewind next_tx_seq to the point expected
6394 * by the receiver.
6395 */
6396 chan->next_tx_seq = control->reqseq;
6397 chan->unacked_frames = 0;
6398
6399 err = l2cap_finish_move(chan);
6400 if (err)
6401 return err;
6402
6403 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6404 l2cap_send_i_or_rr_or_rnr(chan);
6405
6406 if (event == L2CAP_EV_RECV_IFRAME)
6407 return -EPROTO;
6408
6409 return l2cap_rx_state_recv(chan, control, NULL, event);
6410 }
6411
6412 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6413 struct l2cap_ctrl *control,
6414 struct sk_buff *skb, u8 event)
6415 {
6416 int err;
6417
6418 if (!control->final)
6419 return -EPROTO;
6420
6421 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6422
6423 chan->rx_state = L2CAP_RX_STATE_RECV;
6424 l2cap_process_reqseq(chan, control->reqseq);
6425
6426 if (!skb_queue_empty(&chan->tx_q))
6427 chan->tx_send_head = skb_peek(&chan->tx_q);
6428 else
6429 chan->tx_send_head = NULL;
6430
6431 /* Rewind next_tx_seq to the point expected
6432 * by the receiver.
6433 */
6434 chan->next_tx_seq = control->reqseq;
6435 chan->unacked_frames = 0;
6436
6437 if (chan->hs_hcon)
6438 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6439 else
6440 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6441
6442 err = l2cap_resegment(chan);
6443
6444 if (!err)
6445 err = l2cap_rx_state_recv(chan, control, skb, event);
6446
6447 return err;
6448 }
6449
6450 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6451 {
6452 /* Make sure reqseq is for a packet that has been sent but not acked */
6453 u16 unacked;
6454
6455 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6456 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6457 }
6458
6459 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6460 struct sk_buff *skb, u8 event)
6461 {
6462 int err = 0;
6463
6464 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6465 control, skb, event, chan->rx_state);
6466
6467 if (__valid_reqseq(chan, control->reqseq)) {
6468 switch (chan->rx_state) {
6469 case L2CAP_RX_STATE_RECV:
6470 err = l2cap_rx_state_recv(chan, control, skb, event);
6471 break;
6472 case L2CAP_RX_STATE_SREJ_SENT:
6473 err = l2cap_rx_state_srej_sent(chan, control, skb,
6474 event);
6475 break;
6476 case L2CAP_RX_STATE_WAIT_P:
6477 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6478 break;
6479 case L2CAP_RX_STATE_WAIT_F:
6480 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6481 break;
6482 default:
6483 /* shut it down */
6484 break;
6485 }
6486 } else {
6487 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6488 control->reqseq, chan->next_tx_seq,
6489 chan->expected_ack_seq);
6490 l2cap_send_disconn_req(chan, ECONNRESET);
6491 }
6492
6493 return err;
6494 }
6495
6496 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6497 struct sk_buff *skb)
6498 {
6499 int err = 0;
6500
6501 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6502 chan->rx_state);
6503
6504 if (l2cap_classify_txseq(chan, control->txseq) ==
6505 L2CAP_TXSEQ_EXPECTED) {
6506 l2cap_pass_to_tx(chan, control);
6507
6508 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6509 __next_seq(chan, chan->buffer_seq));
6510
6511 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6512
6513 l2cap_reassemble_sdu(chan, skb, control);
6514 } else {
6515 if (chan->sdu) {
6516 kfree_skb(chan->sdu);
6517 chan->sdu = NULL;
6518 }
6519 chan->sdu_last_frag = NULL;
6520 chan->sdu_len = 0;
6521
6522 if (skb) {
6523 BT_DBG("Freeing %p", skb);
6524 kfree_skb(skb);
6525 }
6526 }
6527
6528 chan->last_acked_seq = control->txseq;
6529 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6530
6531 return err;
6532 }
6533
6534 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6535 {
6536 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6537 u16 len;
6538 u8 event;
6539
6540 __unpack_control(chan, skb);
6541
6542 len = skb->len;
6543
6544 /*
6545 * We can just drop the corrupted I-frame here.
6546 * Receiver will miss it and start proper recovery
6547 * procedures and ask for retransmission.
6548 */
6549 if (l2cap_check_fcs(chan, skb))
6550 goto drop;
6551
6552 if (!control->sframe && control->sar == L2CAP_SAR_START)
6553 len -= L2CAP_SDULEN_SIZE;
6554
6555 if (chan->fcs == L2CAP_FCS_CRC16)
6556 len -= L2CAP_FCS_SIZE;
6557
6558 if (len > chan->mps) {
6559 l2cap_send_disconn_req(chan, ECONNRESET);
6560 goto drop;
6561 }
6562
6563 if (!control->sframe) {
6564 int err;
6565
6566 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6567 control->sar, control->reqseq, control->final,
6568 control->txseq);
6569
6570 /* Validate F-bit - F=0 always valid, F=1 only
6571 * valid in TX WAIT_F
6572 */
6573 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6574 goto drop;
6575
6576 if (chan->mode != L2CAP_MODE_STREAMING) {
6577 event = L2CAP_EV_RECV_IFRAME;
6578 err = l2cap_rx(chan, control, skb, event);
6579 } else {
6580 err = l2cap_stream_rx(chan, control, skb);
6581 }
6582
6583 if (err)
6584 l2cap_send_disconn_req(chan, ECONNRESET);
6585 } else {
6586 const u8 rx_func_to_event[4] = {
6587 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6588 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6589 };
6590
6591 /* Only I-frames are expected in streaming mode */
6592 if (chan->mode == L2CAP_MODE_STREAMING)
6593 goto drop;
6594
6595 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6596 control->reqseq, control->final, control->poll,
6597 control->super);
6598
6599 if (len != 0) {
6600 BT_ERR("Trailing bytes: %d in sframe", len);
6601 l2cap_send_disconn_req(chan, ECONNRESET);
6602 goto drop;
6603 }
6604
6605 /* Validate F and P bits */
6606 if (control->final && (control->poll ||
6607 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6608 goto drop;
6609
6610 event = rx_func_to_event[control->super];
6611 if (l2cap_rx(chan, control, skb, event))
6612 l2cap_send_disconn_req(chan, ECONNRESET);
6613 }
6614
6615 return 0;
6616
6617 drop:
6618 kfree_skb(skb);
6619 return 0;
6620 }
6621
6622 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6623 {
6624 struct l2cap_conn *conn = chan->conn;
6625 struct l2cap_le_credits pkt;
6626 u16 return_credits;
6627
6628 /* We return more credits to the sender only after the amount of
6629 * credits falls below half of the initial amount.
6630 */
6631 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6632 return;
6633
6634 return_credits = le_max_credits - chan->rx_credits;
6635
6636 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6637
6638 chan->rx_credits += return_credits;
6639
6640 pkt.cid = cpu_to_le16(chan->scid);
6641 pkt.credits = cpu_to_le16(return_credits);
6642
6643 chan->ident = l2cap_get_ident(conn);
6644
6645 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6646 }
6647
6648 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6649 {
6650 int err;
6651
6652 if (!chan->rx_credits) {
6653 BT_ERR("No credits to receive LE L2CAP data");
6654 l2cap_send_disconn_req(chan, ECONNRESET);
6655 return -ENOBUFS;
6656 }
6657
6658 if (chan->imtu < skb->len) {
6659 BT_ERR("Too big LE L2CAP PDU");
6660 return -ENOBUFS;
6661 }
6662
6663 chan->rx_credits--;
6664 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6665
6666 l2cap_chan_le_send_credits(chan);
6667
6668 err = 0;
6669
6670 if (!chan->sdu) {
6671 u16 sdu_len;
6672
6673 sdu_len = get_unaligned_le16(skb->data);
6674 skb_pull(skb, L2CAP_SDULEN_SIZE);
6675
6676 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6677 sdu_len, skb->len, chan->imtu);
6678
6679 if (sdu_len > chan->imtu) {
6680 BT_ERR("Too big LE L2CAP SDU length received");
6681 err = -EMSGSIZE;
6682 goto failed;
6683 }
6684
6685 if (skb->len > sdu_len) {
6686 BT_ERR("Too much LE L2CAP data received");
6687 err = -EINVAL;
6688 goto failed;
6689 }
6690
6691 if (skb->len == sdu_len)
6692 return chan->ops->recv(chan, skb);
6693
6694 chan->sdu = skb;
6695 chan->sdu_len = sdu_len;
6696 chan->sdu_last_frag = skb;
6697
6698 return 0;
6699 }
6700
6701 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6702 chan->sdu->len, skb->len, chan->sdu_len);
6703
6704 if (chan->sdu->len + skb->len > chan->sdu_len) {
6705 BT_ERR("Too much LE L2CAP data received");
6706 err = -EINVAL;
6707 goto failed;
6708 }
6709
6710 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6711 skb = NULL;
6712
6713 if (chan->sdu->len == chan->sdu_len) {
6714 err = chan->ops->recv(chan, chan->sdu);
6715 if (!err) {
6716 chan->sdu = NULL;
6717 chan->sdu_last_frag = NULL;
6718 chan->sdu_len = 0;
6719 }
6720 }
6721
6722 failed:
6723 if (err) {
6724 kfree_skb(skb);
6725 kfree_skb(chan->sdu);
6726 chan->sdu = NULL;
6727 chan->sdu_last_frag = NULL;
6728 chan->sdu_len = 0;
6729 }
6730
6731 /* We can't return an error here since we took care of the skb
6732 * freeing internally. An error return would cause the caller to
6733 * do a double-free of the skb.
6734 */
6735 return 0;
6736 }
6737
6738 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6739 struct sk_buff *skb)
6740 {
6741 struct l2cap_chan *chan;
6742
6743 chan = l2cap_get_chan_by_scid(conn, cid);
6744 if (!chan) {
6745 if (cid == L2CAP_CID_A2MP) {
6746 chan = a2mp_channel_create(conn, skb);
6747 if (!chan) {
6748 kfree_skb(skb);
6749 return;
6750 }
6751
6752 l2cap_chan_lock(chan);
6753 } else {
6754 BT_DBG("unknown cid 0x%4.4x", cid);
6755 /* Drop packet and return */
6756 kfree_skb(skb);
6757 return;
6758 }
6759 }
6760
6761 BT_DBG("chan %p, len %d", chan, skb->len);
6762
6763 if (chan->state != BT_CONNECTED)
6764 goto drop;
6765
6766 switch (chan->mode) {
6767 case L2CAP_MODE_LE_FLOWCTL:
6768 if (l2cap_le_data_rcv(chan, skb) < 0)
6769 goto drop;
6770
6771 goto done;
6772
6773 case L2CAP_MODE_BASIC:
6774 /* If socket recv buffers overflows we drop data here
6775 * which is *bad* because L2CAP has to be reliable.
6776 * But we don't have any other choice. L2CAP doesn't
6777 * provide flow control mechanism. */
6778
6779 if (chan->imtu < skb->len) {
6780 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6781 goto drop;
6782 }
6783
6784 if (!chan->ops->recv(chan, skb))
6785 goto done;
6786 break;
6787
6788 case L2CAP_MODE_ERTM:
6789 case L2CAP_MODE_STREAMING:
6790 l2cap_data_rcv(chan, skb);
6791 goto done;
6792
6793 default:
6794 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6795 break;
6796 }
6797
6798 drop:
6799 kfree_skb(skb);
6800
6801 done:
6802 l2cap_chan_unlock(chan);
6803 }
6804
6805 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6806 struct sk_buff *skb)
6807 {
6808 struct hci_conn *hcon = conn->hcon;
6809 struct l2cap_chan *chan;
6810
6811 if (hcon->type != ACL_LINK)
6812 goto drop;
6813
6814 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6815 ACL_LINK);
6816 if (!chan)
6817 goto drop;
6818
6819 BT_DBG("chan %p, len %d", chan, skb->len);
6820
6821 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6822 goto drop;
6823
6824 if (chan->imtu < skb->len)
6825 goto drop;
6826
6827 /* Store remote BD_ADDR and PSM for msg_name */
6828 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6829 bt_cb(skb)->psm = psm;
6830
6831 if (!chan->ops->recv(chan, skb))
6832 return;
6833
6834 drop:
6835 kfree_skb(skb);
6836 }
6837
6838 static void l2cap_att_channel(struct l2cap_conn *conn,
6839 struct sk_buff *skb)
6840 {
6841 struct hci_conn *hcon = conn->hcon;
6842 struct l2cap_chan *chan;
6843
6844 if (hcon->type != LE_LINK)
6845 goto drop;
6846
6847 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6848 &hcon->src, &hcon->dst);
6849 if (!chan)
6850 goto drop;
6851
6852 BT_DBG("chan %p, len %d", chan, skb->len);
6853
6854 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6855 goto drop;
6856
6857 if (chan->imtu < skb->len)
6858 goto drop;
6859
6860 if (!chan->ops->recv(chan, skb))
6861 return;
6862
6863 drop:
6864 kfree_skb(skb);
6865 }
6866
6867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6868 {
6869 struct l2cap_hdr *lh = (void *) skb->data;
6870 struct hci_conn *hcon = conn->hcon;
6871 u16 cid, len;
6872 __le16 psm;
6873
6874 if (hcon->state != BT_CONNECTED) {
6875 BT_DBG("queueing pending rx skb");
6876 skb_queue_tail(&conn->pending_rx, skb);
6877 return;
6878 }
6879
6880 skb_pull(skb, L2CAP_HDR_SIZE);
6881 cid = __le16_to_cpu(lh->cid);
6882 len = __le16_to_cpu(lh->len);
6883
6884 if (len != skb->len) {
6885 kfree_skb(skb);
6886 return;
6887 }
6888
6889 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6890
6891 switch (cid) {
6892 case L2CAP_CID_SIGNALING:
6893 l2cap_sig_channel(conn, skb);
6894 break;
6895
6896 case L2CAP_CID_CONN_LESS:
6897 psm = get_unaligned((__le16 *) skb->data);
6898 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6899 l2cap_conless_channel(conn, psm, skb);
6900 break;
6901
6902 case L2CAP_CID_ATT:
6903 l2cap_att_channel(conn, skb);
6904 break;
6905
6906 case L2CAP_CID_LE_SIGNALING:
6907 l2cap_le_sig_channel(conn, skb);
6908 break;
6909
6910 case L2CAP_CID_SMP:
6911 if (smp_sig_channel(conn, skb))
6912 l2cap_conn_del(conn->hcon, EACCES);
6913 break;
6914
6915 case L2CAP_FC_6LOWPAN:
6916 bt_6lowpan_recv(conn, skb);
6917 break;
6918
6919 default:
6920 l2cap_data_channel(conn, cid, skb);
6921 break;
6922 }
6923 }
6924
6925 static void process_pending_rx(struct work_struct *work)
6926 {
6927 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6928 pending_rx_work);
6929 struct sk_buff *skb;
6930
6931 BT_DBG("");
6932
6933 while ((skb = skb_dequeue(&conn->pending_rx)))
6934 l2cap_recv_frame(conn, skb);
6935 }
6936
6937 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6938 {
6939 struct l2cap_conn *conn = hcon->l2cap_data;
6940 struct hci_chan *hchan;
6941
6942 if (conn)
6943 return conn;
6944
6945 hchan = hci_chan_create(hcon);
6946 if (!hchan)
6947 return NULL;
6948
6949 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6950 if (!conn) {
6951 hci_chan_del(hchan);
6952 return NULL;
6953 }
6954
6955 kref_init(&conn->ref);
6956 hcon->l2cap_data = conn;
6957 conn->hcon = hcon;
6958 hci_conn_get(conn->hcon);
6959 conn->hchan = hchan;
6960
6961 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6962
6963 switch (hcon->type) {
6964 case LE_LINK:
6965 if (hcon->hdev->le_mtu) {
6966 conn->mtu = hcon->hdev->le_mtu;
6967 break;
6968 }
6969 /* fall through */
6970 default:
6971 conn->mtu = hcon->hdev->acl_mtu;
6972 break;
6973 }
6974
6975 conn->feat_mask = 0;
6976
6977 if (hcon->type == ACL_LINK)
6978 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6979 &hcon->hdev->dev_flags);
6980
6981 spin_lock_init(&conn->lock);
6982 mutex_init(&conn->chan_lock);
6983
6984 INIT_LIST_HEAD(&conn->chan_l);
6985 INIT_LIST_HEAD(&conn->users);
6986
6987 if (hcon->type == LE_LINK)
6988 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
6989 else
6990 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6991
6992 skb_queue_head_init(&conn->pending_rx);
6993 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6994
6995 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6996
6997 return conn;
6998 }
6999
7000 static bool is_valid_psm(u16 psm, u8 dst_type) {
7001 if (!psm)
7002 return false;
7003
7004 if (bdaddr_type_is_le(dst_type))
7005 return (psm <= 0x00ff);
7006
7007 /* PSM must be odd and lsb of upper byte must be 0 */
7008 return ((psm & 0x0101) == 0x0001);
7009 }
7010
7011 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7012 bdaddr_t *dst, u8 dst_type)
7013 {
7014 struct l2cap_conn *conn;
7015 struct hci_conn *hcon;
7016 struct hci_dev *hdev;
7017 __u8 auth_type;
7018 int err;
7019
7020 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7021 dst_type, __le16_to_cpu(psm));
7022
7023 hdev = hci_get_route(dst, &chan->src);
7024 if (!hdev)
7025 return -EHOSTUNREACH;
7026
7027 hci_dev_lock(hdev);
7028
7029 l2cap_chan_lock(chan);
7030
7031 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7032 chan->chan_type != L2CAP_CHAN_RAW) {
7033 err = -EINVAL;
7034 goto done;
7035 }
7036
7037 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7038 err = -EINVAL;
7039 goto done;
7040 }
7041
7042 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7043 err = -EINVAL;
7044 goto done;
7045 }
7046
7047 switch (chan->mode) {
7048 case L2CAP_MODE_BASIC:
7049 break;
7050 case L2CAP_MODE_LE_FLOWCTL:
7051 l2cap_le_flowctl_init(chan);
7052 break;
7053 case L2CAP_MODE_ERTM:
7054 case L2CAP_MODE_STREAMING:
7055 if (!disable_ertm)
7056 break;
7057 /* fall through */
7058 default:
7059 err = -ENOTSUPP;
7060 goto done;
7061 }
7062
7063 switch (chan->state) {
7064 case BT_CONNECT:
7065 case BT_CONNECT2:
7066 case BT_CONFIG:
7067 /* Already connecting */
7068 err = 0;
7069 goto done;
7070
7071 case BT_CONNECTED:
7072 /* Already connected */
7073 err = -EISCONN;
7074 goto done;
7075
7076 case BT_OPEN:
7077 case BT_BOUND:
7078 /* Can connect */
7079 break;
7080
7081 default:
7082 err = -EBADFD;
7083 goto done;
7084 }
7085
7086 /* Set destination address and psm */
7087 bacpy(&chan->dst, dst);
7088 chan->dst_type = dst_type;
7089
7090 chan->psm = psm;
7091 chan->dcid = cid;
7092
7093 auth_type = l2cap_get_auth_type(chan);
7094
7095 if (bdaddr_type_is_le(dst_type))
7096 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
7097 chan->sec_level, auth_type);
7098 else
7099 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
7100 chan->sec_level, auth_type);
7101
7102 if (IS_ERR(hcon)) {
7103 err = PTR_ERR(hcon);
7104 goto done;
7105 }
7106
7107 conn = l2cap_conn_add(hcon);
7108 if (!conn) {
7109 hci_conn_drop(hcon);
7110 err = -ENOMEM;
7111 goto done;
7112 }
7113
7114 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7115 hci_conn_drop(hcon);
7116 err = -EBUSY;
7117 goto done;
7118 }
7119
7120 /* Update source addr of the socket */
7121 bacpy(&chan->src, &hcon->src);
7122 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7123
7124 l2cap_chan_unlock(chan);
7125 l2cap_chan_add(conn, chan);
7126 l2cap_chan_lock(chan);
7127
7128 /* l2cap_chan_add takes its own ref so we can drop this one */
7129 hci_conn_drop(hcon);
7130
7131 l2cap_state_change(chan, BT_CONNECT);
7132 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7133
7134 /* Release chan->sport so that it can be reused by other
7135 * sockets (as it's only used for listening sockets).
7136 */
7137 write_lock(&chan_list_lock);
7138 chan->sport = 0;
7139 write_unlock(&chan_list_lock);
7140
7141 if (hcon->state == BT_CONNECTED) {
7142 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7143 __clear_chan_timer(chan);
7144 if (l2cap_chan_check_security(chan))
7145 l2cap_state_change(chan, BT_CONNECTED);
7146 } else
7147 l2cap_do_start(chan);
7148 }
7149
7150 err = 0;
7151
7152 done:
7153 l2cap_chan_unlock(chan);
7154 hci_dev_unlock(hdev);
7155 hci_dev_put(hdev);
7156 return err;
7157 }
7158
7159 /* ---- L2CAP interface with lower layer (HCI) ---- */
7160
7161 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7162 {
7163 int exact = 0, lm1 = 0, lm2 = 0;
7164 struct l2cap_chan *c;
7165
7166 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7167
7168 /* Find listening sockets and check their link_mode */
7169 read_lock(&chan_list_lock);
7170 list_for_each_entry(c, &chan_list, global_l) {
7171 if (c->state != BT_LISTEN)
7172 continue;
7173
7174 if (!bacmp(&c->src, &hdev->bdaddr)) {
7175 lm1 |= HCI_LM_ACCEPT;
7176 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7177 lm1 |= HCI_LM_MASTER;
7178 exact++;
7179 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7180 lm2 |= HCI_LM_ACCEPT;
7181 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7182 lm2 |= HCI_LM_MASTER;
7183 }
7184 }
7185 read_unlock(&chan_list_lock);
7186
7187 return exact ? lm1 : lm2;
7188 }
7189
7190 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7191 {
7192 struct l2cap_conn *conn;
7193
7194 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7195
7196 if (!status) {
7197 conn = l2cap_conn_add(hcon);
7198 if (conn)
7199 l2cap_conn_ready(conn);
7200 } else {
7201 l2cap_conn_del(hcon, bt_to_errno(status));
7202 }
7203 }
7204
7205 int l2cap_disconn_ind(struct hci_conn *hcon)
7206 {
7207 struct l2cap_conn *conn = hcon->l2cap_data;
7208
7209 BT_DBG("hcon %p", hcon);
7210
7211 if (!conn)
7212 return HCI_ERROR_REMOTE_USER_TERM;
7213 return conn->disc_reason;
7214 }
7215
7216 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7217 {
7218 BT_DBG("hcon %p reason %d", hcon, reason);
7219
7220 bt_6lowpan_del_conn(hcon->l2cap_data);
7221
7222 l2cap_conn_del(hcon, bt_to_errno(reason));
7223 }
7224
7225 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7226 {
7227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7228 return;
7229
7230 if (encrypt == 0x00) {
7231 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7232 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7233 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7234 chan->sec_level == BT_SECURITY_FIPS)
7235 l2cap_chan_close(chan, ECONNREFUSED);
7236 } else {
7237 if (chan->sec_level == BT_SECURITY_MEDIUM)
7238 __clear_chan_timer(chan);
7239 }
7240 }
7241
7242 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7243 {
7244 struct l2cap_conn *conn = hcon->l2cap_data;
7245 struct l2cap_chan *chan;
7246
7247 if (!conn)
7248 return 0;
7249
7250 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7251
7252 if (hcon->type == LE_LINK) {
7253 if (!status && encrypt)
7254 smp_distribute_keys(conn, 0);
7255 cancel_delayed_work(&conn->security_timer);
7256 }
7257
7258 mutex_lock(&conn->chan_lock);
7259
7260 list_for_each_entry(chan, &conn->chan_l, list) {
7261 l2cap_chan_lock(chan);
7262
7263 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7264 state_to_string(chan->state));
7265
7266 if (chan->scid == L2CAP_CID_A2MP) {
7267 l2cap_chan_unlock(chan);
7268 continue;
7269 }
7270
7271 if (chan->scid == L2CAP_CID_ATT) {
7272 if (!status && encrypt) {
7273 chan->sec_level = hcon->sec_level;
7274 l2cap_chan_ready(chan);
7275 }
7276
7277 l2cap_chan_unlock(chan);
7278 continue;
7279 }
7280
7281 if (!__l2cap_no_conn_pending(chan)) {
7282 l2cap_chan_unlock(chan);
7283 continue;
7284 }
7285
7286 if (!status && (chan->state == BT_CONNECTED ||
7287 chan->state == BT_CONFIG)) {
7288 chan->ops->resume(chan);
7289 l2cap_check_encryption(chan, encrypt);
7290 l2cap_chan_unlock(chan);
7291 continue;
7292 }
7293
7294 if (chan->state == BT_CONNECT) {
7295 if (!status)
7296 l2cap_start_connection(chan);
7297 else
7298 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7299 } else if (chan->state == BT_CONNECT2) {
7300 struct l2cap_conn_rsp rsp;
7301 __u16 res, stat;
7302
7303 if (!status) {
7304 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7305 res = L2CAP_CR_PEND;
7306 stat = L2CAP_CS_AUTHOR_PEND;
7307 chan->ops->defer(chan);
7308 } else {
7309 l2cap_state_change(chan, BT_CONFIG);
7310 res = L2CAP_CR_SUCCESS;
7311 stat = L2CAP_CS_NO_INFO;
7312 }
7313 } else {
7314 l2cap_state_change(chan, BT_DISCONN);
7315 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7316 res = L2CAP_CR_SEC_BLOCK;
7317 stat = L2CAP_CS_NO_INFO;
7318 }
7319
7320 rsp.scid = cpu_to_le16(chan->dcid);
7321 rsp.dcid = cpu_to_le16(chan->scid);
7322 rsp.result = cpu_to_le16(res);
7323 rsp.status = cpu_to_le16(stat);
7324 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7325 sizeof(rsp), &rsp);
7326
7327 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7328 res == L2CAP_CR_SUCCESS) {
7329 char buf[128];
7330 set_bit(CONF_REQ_SENT, &chan->conf_state);
7331 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7332 L2CAP_CONF_REQ,
7333 l2cap_build_conf_req(chan, buf),
7334 buf);
7335 chan->num_conf_req++;
7336 }
7337 }
7338
7339 l2cap_chan_unlock(chan);
7340 }
7341
7342 mutex_unlock(&conn->chan_lock);
7343
7344 return 0;
7345 }
7346
7347 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7348 {
7349 struct l2cap_conn *conn = hcon->l2cap_data;
7350 struct l2cap_hdr *hdr;
7351 int len;
7352
7353 /* For AMP controller do not create l2cap conn */
7354 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7355 goto drop;
7356
7357 if (!conn)
7358 conn = l2cap_conn_add(hcon);
7359
7360 if (!conn)
7361 goto drop;
7362
7363 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7364
7365 switch (flags) {
7366 case ACL_START:
7367 case ACL_START_NO_FLUSH:
7368 case ACL_COMPLETE:
7369 if (conn->rx_len) {
7370 BT_ERR("Unexpected start frame (len %d)", skb->len);
7371 kfree_skb(conn->rx_skb);
7372 conn->rx_skb = NULL;
7373 conn->rx_len = 0;
7374 l2cap_conn_unreliable(conn, ECOMM);
7375 }
7376
7377 /* Start fragment always begin with Basic L2CAP header */
7378 if (skb->len < L2CAP_HDR_SIZE) {
7379 BT_ERR("Frame is too short (len %d)", skb->len);
7380 l2cap_conn_unreliable(conn, ECOMM);
7381 goto drop;
7382 }
7383
7384 hdr = (struct l2cap_hdr *) skb->data;
7385 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7386
7387 if (len == skb->len) {
7388 /* Complete frame received */
7389 l2cap_recv_frame(conn, skb);
7390 return 0;
7391 }
7392
7393 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7394
7395 if (skb->len > len) {
7396 BT_ERR("Frame is too long (len %d, expected len %d)",
7397 skb->len, len);
7398 l2cap_conn_unreliable(conn, ECOMM);
7399 goto drop;
7400 }
7401
7402 /* Allocate skb for the complete frame (with header) */
7403 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7404 if (!conn->rx_skb)
7405 goto drop;
7406
7407 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7408 skb->len);
7409 conn->rx_len = len - skb->len;
7410 break;
7411
7412 case ACL_CONT:
7413 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7414
7415 if (!conn->rx_len) {
7416 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7417 l2cap_conn_unreliable(conn, ECOMM);
7418 goto drop;
7419 }
7420
7421 if (skb->len > conn->rx_len) {
7422 BT_ERR("Fragment is too long (len %d, expected %d)",
7423 skb->len, conn->rx_len);
7424 kfree_skb(conn->rx_skb);
7425 conn->rx_skb = NULL;
7426 conn->rx_len = 0;
7427 l2cap_conn_unreliable(conn, ECOMM);
7428 goto drop;
7429 }
7430
7431 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7432 skb->len);
7433 conn->rx_len -= skb->len;
7434
7435 if (!conn->rx_len) {
7436 /* Complete frame received. l2cap_recv_frame
7437 * takes ownership of the skb so set the global
7438 * rx_skb pointer to NULL first.
7439 */
7440 struct sk_buff *rx_skb = conn->rx_skb;
7441 conn->rx_skb = NULL;
7442 l2cap_recv_frame(conn, rx_skb);
7443 }
7444 break;
7445 }
7446
7447 drop:
7448 kfree_skb(skb);
7449 return 0;
7450 }
7451
7452 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7453 {
7454 struct l2cap_chan *c;
7455
7456 read_lock(&chan_list_lock);
7457
7458 list_for_each_entry(c, &chan_list, global_l) {
7459 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7460 &c->src, &c->dst,
7461 c->state, __le16_to_cpu(c->psm),
7462 c->scid, c->dcid, c->imtu, c->omtu,
7463 c->sec_level, c->mode);
7464 }
7465
7466 read_unlock(&chan_list_lock);
7467
7468 return 0;
7469 }
7470
7471 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7472 {
7473 return single_open(file, l2cap_debugfs_show, inode->i_private);
7474 }
7475
7476 static const struct file_operations l2cap_debugfs_fops = {
7477 .open = l2cap_debugfs_open,
7478 .read = seq_read,
7479 .llseek = seq_lseek,
7480 .release = single_release,
7481 };
7482
7483 static struct dentry *l2cap_debugfs;
7484
7485 int __init l2cap_init(void)
7486 {
7487 int err;
7488
7489 err = l2cap_init_sockets();
7490 if (err < 0)
7491 return err;
7492
7493 if (IS_ERR_OR_NULL(bt_debugfs))
7494 return 0;
7495
7496 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7497 NULL, &l2cap_debugfs_fops);
7498
7499 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7500 &le_max_credits);
7501 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7502 &le_default_mps);
7503
7504 bt_6lowpan_init();
7505
7506 return 0;
7507 }
7508
7509 void l2cap_exit(void)
7510 {
7511 bt_6lowpan_cleanup();
7512 debugfs_remove(l2cap_debugfs);
7513 l2cap_cleanup_sockets();
7514 }
7515
7516 module_param(disable_ertm, bool, 0644);
7517 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");