]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/l2cap_core.c
iwlwifi: mvm: fix a few wd_disable comments
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 #include "6lowpan.h"
44
45 bool disable_ertm;
46
47 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
48 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
65
66 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
67 {
68 if (hcon->type == LE_LINK) {
69 if (type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76 }
77
78 /* ---- L2CAP channels ---- */
79
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 u16 cid)
82 {
83 struct l2cap_chan *c;
84
85 list_for_each_entry(c, &conn->chan_l, list) {
86 if (c->dcid == cid)
87 return c;
88 }
89 return NULL;
90 }
91
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 u16 cid)
94 {
95 struct l2cap_chan *c;
96
97 list_for_each_entry(c, &conn->chan_l, list) {
98 if (c->scid == cid)
99 return c;
100 }
101 return NULL;
102 }
103
104 /* Find channel with given SCID.
105 * Returns locked channel. */
106 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 u16 cid)
108 {
109 struct l2cap_chan *c;
110
111 mutex_lock(&conn->chan_lock);
112 c = __l2cap_get_chan_by_scid(conn, cid);
113 if (c)
114 l2cap_chan_lock(c);
115 mutex_unlock(&conn->chan_lock);
116
117 return c;
118 }
119
120 /* Find channel with given DCID.
121 * Returns locked channel.
122 */
123 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
124 u16 cid)
125 {
126 struct l2cap_chan *c;
127
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_dcid(conn, cid);
130 if (c)
131 l2cap_chan_lock(c);
132 mutex_unlock(&conn->chan_lock);
133
134 return c;
135 }
136
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
138 u8 ident)
139 {
140 struct l2cap_chan *c;
141
142 list_for_each_entry(c, &conn->chan_l, list) {
143 if (c->ident == ident)
144 return c;
145 }
146 return NULL;
147 }
148
149 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
150 u8 ident)
151 {
152 struct l2cap_chan *c;
153
154 mutex_lock(&conn->chan_lock);
155 c = __l2cap_get_chan_by_ident(conn, ident);
156 if (c)
157 l2cap_chan_lock(c);
158 mutex_unlock(&conn->chan_lock);
159
160 return c;
161 }
162
163 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
164 {
165 struct l2cap_chan *c;
166
167 list_for_each_entry(c, &chan_list, global_l) {
168 if (c->sport == psm && !bacmp(&c->src, src))
169 return c;
170 }
171 return NULL;
172 }
173
174 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
175 {
176 int err;
177
178 write_lock(&chan_list_lock);
179
180 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 err = -EADDRINUSE;
182 goto done;
183 }
184
185 if (psm) {
186 chan->psm = psm;
187 chan->sport = psm;
188 err = 0;
189 } else {
190 u16 p;
191
192 err = -EINVAL;
193 for (p = 0x1001; p < 0x1100; p += 2)
194 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
195 chan->psm = cpu_to_le16(p);
196 chan->sport = cpu_to_le16(p);
197 err = 0;
198 break;
199 }
200 }
201
202 done:
203 write_unlock(&chan_list_lock);
204 return err;
205 }
206
207 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
208 {
209 write_lock(&chan_list_lock);
210
211 chan->scid = scid;
212
213 write_unlock(&chan_list_lock);
214
215 return 0;
216 }
217
218 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
219 {
220 u16 cid, dyn_end;
221
222 if (conn->hcon->type == LE_LINK)
223 dyn_end = L2CAP_CID_LE_DYN_END;
224 else
225 dyn_end = L2CAP_CID_DYN_END;
226
227 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
228 if (!__l2cap_get_chan_by_scid(conn, cid))
229 return cid;
230 }
231
232 return 0;
233 }
234
235 static void l2cap_state_change(struct l2cap_chan *chan, int state)
236 {
237 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
238 state_to_string(state));
239
240 chan->state = state;
241 chan->ops->state_change(chan, state, 0);
242 }
243
244 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
245 int state, int err)
246 {
247 chan->state = state;
248 chan->ops->state_change(chan, chan->state, err);
249 }
250
251 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
252 {
253 chan->ops->state_change(chan, chan->state, err);
254 }
255
256 static void __set_retrans_timer(struct l2cap_chan *chan)
257 {
258 if (!delayed_work_pending(&chan->monitor_timer) &&
259 chan->retrans_timeout) {
260 l2cap_set_timer(chan, &chan->retrans_timer,
261 msecs_to_jiffies(chan->retrans_timeout));
262 }
263 }
264
265 static void __set_monitor_timer(struct l2cap_chan *chan)
266 {
267 __clear_retrans_timer(chan);
268 if (chan->monitor_timeout) {
269 l2cap_set_timer(chan, &chan->monitor_timer,
270 msecs_to_jiffies(chan->monitor_timeout));
271 }
272 }
273
274 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
275 u16 seq)
276 {
277 struct sk_buff *skb;
278
279 skb_queue_walk(head, skb) {
280 if (bt_cb(skb)->control.txseq == seq)
281 return skb;
282 }
283
284 return NULL;
285 }
286
287 /* ---- L2CAP sequence number lists ---- */
288
289 /* For ERTM, ordered lists of sequence numbers must be tracked for
290 * SREJ requests that are received and for frames that are to be
291 * retransmitted. These seq_list functions implement a singly-linked
292 * list in an array, where membership in the list can also be checked
293 * in constant time. Items can also be added to the tail of the list
294 * and removed from the head in constant time, without further memory
295 * allocs or frees.
296 */
297
298 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
299 {
300 size_t alloc_size, i;
301
302 /* Allocated size is a power of 2 to map sequence numbers
303 * (which may be up to 14 bits) in to a smaller array that is
304 * sized for the negotiated ERTM transmit windows.
305 */
306 alloc_size = roundup_pow_of_two(size);
307
308 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
309 if (!seq_list->list)
310 return -ENOMEM;
311
312 seq_list->mask = alloc_size - 1;
313 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
314 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
315 for (i = 0; i < alloc_size; i++)
316 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
317
318 return 0;
319 }
320
321 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
322 {
323 kfree(seq_list->list);
324 }
325
326 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
327 u16 seq)
328 {
329 /* Constant-time check for list membership */
330 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
331 }
332
333 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
334 {
335 u16 mask = seq_list->mask;
336
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
338 /* In case someone tries to pop the head of an empty list */
339 return L2CAP_SEQ_LIST_CLEAR;
340 } else if (seq_list->head == seq) {
341 /* Head can be removed in constant time */
342 seq_list->head = seq_list->list[seq & mask];
343 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
344
345 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
346 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
347 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
348 }
349 } else {
350 /* Walk the list to find the sequence number */
351 u16 prev = seq_list->head;
352 while (seq_list->list[prev & mask] != seq) {
353 prev = seq_list->list[prev & mask];
354 if (prev == L2CAP_SEQ_LIST_TAIL)
355 return L2CAP_SEQ_LIST_CLEAR;
356 }
357
358 /* Unlink the number from the list and clear it */
359 seq_list->list[prev & mask] = seq_list->list[seq & mask];
360 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
361 if (seq_list->tail == seq)
362 seq_list->tail = prev;
363 }
364 return seq;
365 }
366
367 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
368 {
369 /* Remove the head in constant time */
370 return l2cap_seq_list_remove(seq_list, seq_list->head);
371 }
372
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 u16 i;
376
377 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 return;
379
380 for (i = 0; i <= seq_list->mask; i++)
381 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 u16 mask = seq_list->mask;
390
391 /* All appends happen in constant time */
392
393 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 return;
395
396 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 seq_list->head = seq;
398 else
399 seq_list->list[seq_list->tail & mask] = seq;
400
401 seq_list->tail = seq;
402 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404
405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 chan_timer.work);
409 struct l2cap_conn *conn = chan->conn;
410 int reason;
411
412 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414 mutex_lock(&conn->chan_lock);
415 l2cap_chan_lock(chan);
416
417 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
418 reason = ECONNREFUSED;
419 else if (chan->state == BT_CONNECT &&
420 chan->sec_level != BT_SECURITY_SDP)
421 reason = ECONNREFUSED;
422 else
423 reason = ETIMEDOUT;
424
425 l2cap_chan_close(chan, reason);
426
427 l2cap_chan_unlock(chan);
428
429 chan->ops->close(chan);
430 mutex_unlock(&conn->chan_lock);
431
432 l2cap_chan_put(chan);
433 }
434
435 struct l2cap_chan *l2cap_chan_create(void)
436 {
437 struct l2cap_chan *chan;
438
439 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
440 if (!chan)
441 return NULL;
442
443 mutex_init(&chan->lock);
444
445 write_lock(&chan_list_lock);
446 list_add(&chan->global_l, &chan_list);
447 write_unlock(&chan_list_lock);
448
449 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
450
451 chan->state = BT_OPEN;
452
453 kref_init(&chan->kref);
454
455 /* This flag is cleared in l2cap_chan_ready() */
456 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
457
458 BT_DBG("chan %p", chan);
459
460 return chan;
461 }
462
463 static void l2cap_chan_destroy(struct kref *kref)
464 {
465 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
466
467 BT_DBG("chan %p", chan);
468
469 write_lock(&chan_list_lock);
470 list_del(&chan->global_l);
471 write_unlock(&chan_list_lock);
472
473 kfree(chan);
474 }
475
476 void l2cap_chan_hold(struct l2cap_chan *c)
477 {
478 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
479
480 kref_get(&c->kref);
481 }
482
483 void l2cap_chan_put(struct l2cap_chan *c)
484 {
485 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
486
487 kref_put(&c->kref, l2cap_chan_destroy);
488 }
489
490 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
491 {
492 chan->fcs = L2CAP_FCS_CRC16;
493 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
494 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
495 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
496 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
497 chan->sec_level = BT_SECURITY_LOW;
498
499 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
500 }
501
502 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
503 {
504 chan->sdu = NULL;
505 chan->sdu_last_frag = NULL;
506 chan->sdu_len = 0;
507 chan->tx_credits = 0;
508 chan->rx_credits = le_max_credits;
509 chan->mps = min_t(u16, chan->imtu, L2CAP_LE_DEFAULT_MPS);
510
511 skb_queue_head_init(&chan->tx_q);
512 }
513
514 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
515 {
516 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
517 __le16_to_cpu(chan->psm), chan->dcid);
518
519 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
520
521 chan->conn = conn;
522
523 switch (chan->chan_type) {
524 case L2CAP_CHAN_CONN_ORIENTED:
525 if (conn->hcon->type == LE_LINK) {
526 if (chan->dcid == L2CAP_CID_ATT) {
527 chan->omtu = L2CAP_DEFAULT_MTU;
528 chan->scid = L2CAP_CID_ATT;
529 } else {
530 chan->scid = l2cap_alloc_cid(conn);
531 }
532 } else {
533 /* Alloc CID for connection-oriented socket */
534 chan->scid = l2cap_alloc_cid(conn);
535 chan->omtu = L2CAP_DEFAULT_MTU;
536 }
537 break;
538
539 case L2CAP_CHAN_CONN_LESS:
540 /* Connectionless socket */
541 chan->scid = L2CAP_CID_CONN_LESS;
542 chan->dcid = L2CAP_CID_CONN_LESS;
543 chan->omtu = L2CAP_DEFAULT_MTU;
544 break;
545
546 case L2CAP_CHAN_CONN_FIX_A2MP:
547 chan->scid = L2CAP_CID_A2MP;
548 chan->dcid = L2CAP_CID_A2MP;
549 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
550 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
551 break;
552
553 default:
554 /* Raw socket can send/recv signalling messages only */
555 chan->scid = L2CAP_CID_SIGNALING;
556 chan->dcid = L2CAP_CID_SIGNALING;
557 chan->omtu = L2CAP_DEFAULT_MTU;
558 }
559
560 chan->local_id = L2CAP_BESTEFFORT_ID;
561 chan->local_stype = L2CAP_SERV_BESTEFFORT;
562 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
563 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
564 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
565 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
566
567 l2cap_chan_hold(chan);
568
569 hci_conn_hold(conn->hcon);
570
571 list_add(&chan->list, &conn->chan_l);
572 }
573
574 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
575 {
576 mutex_lock(&conn->chan_lock);
577 __l2cap_chan_add(conn, chan);
578 mutex_unlock(&conn->chan_lock);
579 }
580
581 void l2cap_chan_del(struct l2cap_chan *chan, int err)
582 {
583 struct l2cap_conn *conn = chan->conn;
584
585 __clear_chan_timer(chan);
586
587 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
588
589 if (conn) {
590 struct amp_mgr *mgr = conn->hcon->amp_mgr;
591 /* Delete from channel list */
592 list_del(&chan->list);
593
594 l2cap_chan_put(chan);
595
596 chan->conn = NULL;
597
598 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
599 hci_conn_drop(conn->hcon);
600
601 if (mgr && mgr->bredr_chan == chan)
602 mgr->bredr_chan = NULL;
603 }
604
605 if (chan->hs_hchan) {
606 struct hci_chan *hs_hchan = chan->hs_hchan;
607
608 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
609 amp_disconnect_logical_link(hs_hchan);
610 }
611
612 chan->ops->teardown(chan, err);
613
614 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
615 return;
616
617 switch(chan->mode) {
618 case L2CAP_MODE_BASIC:
619 break;
620
621 case L2CAP_MODE_LE_FLOWCTL:
622 skb_queue_purge(&chan->tx_q);
623 break;
624
625 case L2CAP_MODE_ERTM:
626 __clear_retrans_timer(chan);
627 __clear_monitor_timer(chan);
628 __clear_ack_timer(chan);
629
630 skb_queue_purge(&chan->srej_q);
631
632 l2cap_seq_list_free(&chan->srej_list);
633 l2cap_seq_list_free(&chan->retrans_list);
634
635 /* fall through */
636
637 case L2CAP_MODE_STREAMING:
638 skb_queue_purge(&chan->tx_q);
639 break;
640 }
641
642 return;
643 }
644
645 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
646 {
647 struct l2cap_conn *conn = chan->conn;
648 struct l2cap_le_conn_rsp rsp;
649 u16 result;
650
651 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
652 result = L2CAP_CR_AUTHORIZATION;
653 else
654 result = L2CAP_CR_BAD_PSM;
655
656 l2cap_state_change(chan, BT_DISCONN);
657
658 rsp.dcid = cpu_to_le16(chan->scid);
659 rsp.mtu = cpu_to_le16(chan->imtu);
660 rsp.mps = cpu_to_le16(chan->mps);
661 rsp.credits = cpu_to_le16(chan->rx_credits);
662 rsp.result = cpu_to_le16(result);
663
664 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
665 &rsp);
666 }
667
668 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
669 {
670 struct l2cap_conn *conn = chan->conn;
671 struct l2cap_conn_rsp rsp;
672 u16 result;
673
674 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
675 result = L2CAP_CR_SEC_BLOCK;
676 else
677 result = L2CAP_CR_BAD_PSM;
678
679 l2cap_state_change(chan, BT_DISCONN);
680
681 rsp.scid = cpu_to_le16(chan->dcid);
682 rsp.dcid = cpu_to_le16(chan->scid);
683 rsp.result = cpu_to_le16(result);
684 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
685
686 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
687 }
688
689 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
690 {
691 struct l2cap_conn *conn = chan->conn;
692
693 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
694
695 switch (chan->state) {
696 case BT_LISTEN:
697 chan->ops->teardown(chan, 0);
698 break;
699
700 case BT_CONNECTED:
701 case BT_CONFIG:
702 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
703 * check for chan->psm.
704 */
705 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
706 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
707 l2cap_send_disconn_req(chan, reason);
708 } else
709 l2cap_chan_del(chan, reason);
710 break;
711
712 case BT_CONNECT2:
713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 if (conn->hcon->type == ACL_LINK)
715 l2cap_chan_connect_reject(chan);
716 else if (conn->hcon->type == LE_LINK)
717 l2cap_chan_le_connect_reject(chan);
718 }
719
720 l2cap_chan_del(chan, reason);
721 break;
722
723 case BT_CONNECT:
724 case BT_DISCONN:
725 l2cap_chan_del(chan, reason);
726 break;
727
728 default:
729 chan->ops->teardown(chan, 0);
730 break;
731 }
732 }
733
734 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
735 {
736 switch (chan->chan_type) {
737 case L2CAP_CHAN_RAW:
738 switch (chan->sec_level) {
739 case BT_SECURITY_HIGH:
740 return HCI_AT_DEDICATED_BONDING_MITM;
741 case BT_SECURITY_MEDIUM:
742 return HCI_AT_DEDICATED_BONDING;
743 default:
744 return HCI_AT_NO_BONDING;
745 }
746 break;
747 case L2CAP_CHAN_CONN_LESS:
748 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
749 if (chan->sec_level == BT_SECURITY_LOW)
750 chan->sec_level = BT_SECURITY_SDP;
751 }
752 if (chan->sec_level == BT_SECURITY_HIGH)
753 return HCI_AT_NO_BONDING_MITM;
754 else
755 return HCI_AT_NO_BONDING;
756 break;
757 case L2CAP_CHAN_CONN_ORIENTED:
758 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
761
762 if (chan->sec_level == BT_SECURITY_HIGH)
763 return HCI_AT_NO_BONDING_MITM;
764 else
765 return HCI_AT_NO_BONDING;
766 }
767 /* fall through */
768 default:
769 switch (chan->sec_level) {
770 case BT_SECURITY_HIGH:
771 return HCI_AT_GENERAL_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_GENERAL_BONDING;
774 default:
775 return HCI_AT_NO_BONDING;
776 }
777 break;
778 }
779 }
780
781 /* Service level security */
782 int l2cap_chan_check_security(struct l2cap_chan *chan)
783 {
784 struct l2cap_conn *conn = chan->conn;
785 __u8 auth_type;
786
787 if (conn->hcon->type == LE_LINK)
788 return smp_conn_security(conn->hcon, chan->sec_level);
789
790 auth_type = l2cap_get_auth_type(chan);
791
792 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
793 }
794
795 static u8 l2cap_get_ident(struct l2cap_conn *conn)
796 {
797 u8 id;
798
799 /* Get next available identificator.
800 * 1 - 128 are used by kernel.
801 * 129 - 199 are reserved.
802 * 200 - 254 are used by utilities like l2ping, etc.
803 */
804
805 spin_lock(&conn->lock);
806
807 if (++conn->tx_ident > 128)
808 conn->tx_ident = 1;
809
810 id = conn->tx_ident;
811
812 spin_unlock(&conn->lock);
813
814 return id;
815 }
816
817 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
818 void *data)
819 {
820 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
821 u8 flags;
822
823 BT_DBG("code 0x%2.2x", code);
824
825 if (!skb)
826 return;
827
828 if (lmp_no_flush_capable(conn->hcon->hdev))
829 flags = ACL_START_NO_FLUSH;
830 else
831 flags = ACL_START;
832
833 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
834 skb->priority = HCI_PRIO_MAX;
835
836 hci_send_acl(conn->hchan, skb, flags);
837 }
838
839 static bool __chan_is_moving(struct l2cap_chan *chan)
840 {
841 return chan->move_state != L2CAP_MOVE_STABLE &&
842 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
843 }
844
845 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
846 {
847 struct hci_conn *hcon = chan->conn->hcon;
848 u16 flags;
849
850 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
851 skb->priority);
852
853 if (chan->hs_hcon && !__chan_is_moving(chan)) {
854 if (chan->hs_hchan)
855 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
856 else
857 kfree_skb(skb);
858
859 return;
860 }
861
862 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
863 lmp_no_flush_capable(hcon->hdev))
864 flags = ACL_START_NO_FLUSH;
865 else
866 flags = ACL_START;
867
868 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
869 hci_send_acl(chan->conn->hchan, skb, flags);
870 }
871
872 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
873 {
874 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
875 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
876
877 if (enh & L2CAP_CTRL_FRAME_TYPE) {
878 /* S-Frame */
879 control->sframe = 1;
880 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
881 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
882
883 control->sar = 0;
884 control->txseq = 0;
885 } else {
886 /* I-Frame */
887 control->sframe = 0;
888 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
889 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
890
891 control->poll = 0;
892 control->super = 0;
893 }
894 }
895
896 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
897 {
898 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
899 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
900
901 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
902 /* S-Frame */
903 control->sframe = 1;
904 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
905 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
906
907 control->sar = 0;
908 control->txseq = 0;
909 } else {
910 /* I-Frame */
911 control->sframe = 0;
912 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
913 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
914
915 control->poll = 0;
916 control->super = 0;
917 }
918 }
919
920 static inline void __unpack_control(struct l2cap_chan *chan,
921 struct sk_buff *skb)
922 {
923 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
924 __unpack_extended_control(get_unaligned_le32(skb->data),
925 &bt_cb(skb)->control);
926 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
927 } else {
928 __unpack_enhanced_control(get_unaligned_le16(skb->data),
929 &bt_cb(skb)->control);
930 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
931 }
932 }
933
934 static u32 __pack_extended_control(struct l2cap_ctrl *control)
935 {
936 u32 packed;
937
938 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
939 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
940
941 if (control->sframe) {
942 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
943 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
944 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
945 } else {
946 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
947 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
948 }
949
950 return packed;
951 }
952
953 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
954 {
955 u16 packed;
956
957 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
958 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
959
960 if (control->sframe) {
961 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
962 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
963 packed |= L2CAP_CTRL_FRAME_TYPE;
964 } else {
965 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
966 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
967 }
968
969 return packed;
970 }
971
972 static inline void __pack_control(struct l2cap_chan *chan,
973 struct l2cap_ctrl *control,
974 struct sk_buff *skb)
975 {
976 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
977 put_unaligned_le32(__pack_extended_control(control),
978 skb->data + L2CAP_HDR_SIZE);
979 } else {
980 put_unaligned_le16(__pack_enhanced_control(control),
981 skb->data + L2CAP_HDR_SIZE);
982 }
983 }
984
985 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
986 {
987 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
988 return L2CAP_EXT_HDR_SIZE;
989 else
990 return L2CAP_ENH_HDR_SIZE;
991 }
992
993 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
994 u32 control)
995 {
996 struct sk_buff *skb;
997 struct l2cap_hdr *lh;
998 int hlen = __ertm_hdr_size(chan);
999
1000 if (chan->fcs == L2CAP_FCS_CRC16)
1001 hlen += L2CAP_FCS_SIZE;
1002
1003 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1004
1005 if (!skb)
1006 return ERR_PTR(-ENOMEM);
1007
1008 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1009 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1010 lh->cid = cpu_to_le16(chan->dcid);
1011
1012 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1013 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1014 else
1015 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1016
1017 if (chan->fcs == L2CAP_FCS_CRC16) {
1018 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1019 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1020 }
1021
1022 skb->priority = HCI_PRIO_MAX;
1023 return skb;
1024 }
1025
1026 static void l2cap_send_sframe(struct l2cap_chan *chan,
1027 struct l2cap_ctrl *control)
1028 {
1029 struct sk_buff *skb;
1030 u32 control_field;
1031
1032 BT_DBG("chan %p, control %p", chan, control);
1033
1034 if (!control->sframe)
1035 return;
1036
1037 if (__chan_is_moving(chan))
1038 return;
1039
1040 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1041 !control->poll)
1042 control->final = 1;
1043
1044 if (control->super == L2CAP_SUPER_RR)
1045 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1046 else if (control->super == L2CAP_SUPER_RNR)
1047 set_bit(CONN_RNR_SENT, &chan->conn_state);
1048
1049 if (control->super != L2CAP_SUPER_SREJ) {
1050 chan->last_acked_seq = control->reqseq;
1051 __clear_ack_timer(chan);
1052 }
1053
1054 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1055 control->final, control->poll, control->super);
1056
1057 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1058 control_field = __pack_extended_control(control);
1059 else
1060 control_field = __pack_enhanced_control(control);
1061
1062 skb = l2cap_create_sframe_pdu(chan, control_field);
1063 if (!IS_ERR(skb))
1064 l2cap_do_send(chan, skb);
1065 }
1066
1067 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1068 {
1069 struct l2cap_ctrl control;
1070
1071 BT_DBG("chan %p, poll %d", chan, poll);
1072
1073 memset(&control, 0, sizeof(control));
1074 control.sframe = 1;
1075 control.poll = poll;
1076
1077 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1078 control.super = L2CAP_SUPER_RNR;
1079 else
1080 control.super = L2CAP_SUPER_RR;
1081
1082 control.reqseq = chan->buffer_seq;
1083 l2cap_send_sframe(chan, &control);
1084 }
1085
1086 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1087 {
1088 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1089 }
1090
1091 static bool __amp_capable(struct l2cap_chan *chan)
1092 {
1093 struct l2cap_conn *conn = chan->conn;
1094 struct hci_dev *hdev;
1095 bool amp_available = false;
1096
1097 if (!conn->hs_enabled)
1098 return false;
1099
1100 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1101 return false;
1102
1103 read_lock(&hci_dev_list_lock);
1104 list_for_each_entry(hdev, &hci_dev_list, list) {
1105 if (hdev->amp_type != AMP_TYPE_BREDR &&
1106 test_bit(HCI_UP, &hdev->flags)) {
1107 amp_available = true;
1108 break;
1109 }
1110 }
1111 read_unlock(&hci_dev_list_lock);
1112
1113 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1114 return amp_available;
1115
1116 return false;
1117 }
1118
1119 static bool l2cap_check_efs(struct l2cap_chan *chan)
1120 {
1121 /* Check EFS parameters */
1122 return true;
1123 }
1124
1125 void l2cap_send_conn_req(struct l2cap_chan *chan)
1126 {
1127 struct l2cap_conn *conn = chan->conn;
1128 struct l2cap_conn_req req;
1129
1130 req.scid = cpu_to_le16(chan->scid);
1131 req.psm = chan->psm;
1132
1133 chan->ident = l2cap_get_ident(conn);
1134
1135 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1136
1137 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1138 }
1139
1140 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1141 {
1142 struct l2cap_create_chan_req req;
1143 req.scid = cpu_to_le16(chan->scid);
1144 req.psm = chan->psm;
1145 req.amp_id = amp_id;
1146
1147 chan->ident = l2cap_get_ident(chan->conn);
1148
1149 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1150 sizeof(req), &req);
1151 }
1152
1153 static void l2cap_move_setup(struct l2cap_chan *chan)
1154 {
1155 struct sk_buff *skb;
1156
1157 BT_DBG("chan %p", chan);
1158
1159 if (chan->mode != L2CAP_MODE_ERTM)
1160 return;
1161
1162 __clear_retrans_timer(chan);
1163 __clear_monitor_timer(chan);
1164 __clear_ack_timer(chan);
1165
1166 chan->retry_count = 0;
1167 skb_queue_walk(&chan->tx_q, skb) {
1168 if (bt_cb(skb)->control.retries)
1169 bt_cb(skb)->control.retries = 1;
1170 else
1171 break;
1172 }
1173
1174 chan->expected_tx_seq = chan->buffer_seq;
1175
1176 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1177 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1178 l2cap_seq_list_clear(&chan->retrans_list);
1179 l2cap_seq_list_clear(&chan->srej_list);
1180 skb_queue_purge(&chan->srej_q);
1181
1182 chan->tx_state = L2CAP_TX_STATE_XMIT;
1183 chan->rx_state = L2CAP_RX_STATE_MOVE;
1184
1185 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1186 }
1187
1188 static void l2cap_move_done(struct l2cap_chan *chan)
1189 {
1190 u8 move_role = chan->move_role;
1191 BT_DBG("chan %p", chan);
1192
1193 chan->move_state = L2CAP_MOVE_STABLE;
1194 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1195
1196 if (chan->mode != L2CAP_MODE_ERTM)
1197 return;
1198
1199 switch (move_role) {
1200 case L2CAP_MOVE_ROLE_INITIATOR:
1201 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1202 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1203 break;
1204 case L2CAP_MOVE_ROLE_RESPONDER:
1205 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1206 break;
1207 }
1208 }
1209
1210 static void l2cap_chan_ready(struct l2cap_chan *chan)
1211 {
1212 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1213 chan->conf_state = 0;
1214 __clear_chan_timer(chan);
1215
1216 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1217 chan->ops->suspend(chan);
1218
1219 chan->state = BT_CONNECTED;
1220
1221 chan->ops->ready(chan);
1222 }
1223
1224 static void l2cap_le_connect(struct l2cap_chan *chan)
1225 {
1226 struct l2cap_conn *conn = chan->conn;
1227 struct l2cap_le_conn_req req;
1228
1229 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1230 return;
1231
1232 req.psm = chan->psm;
1233 req.scid = cpu_to_le16(chan->scid);
1234 req.mtu = cpu_to_le16(chan->imtu);
1235 req.mps = cpu_to_le16(chan->mps);
1236 req.credits = cpu_to_le16(chan->rx_credits);
1237
1238 chan->ident = l2cap_get_ident(conn);
1239
1240 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1241 sizeof(req), &req);
1242 }
1243
1244 static void l2cap_le_start(struct l2cap_chan *chan)
1245 {
1246 struct l2cap_conn *conn = chan->conn;
1247
1248 if (!smp_conn_security(conn->hcon, chan->sec_level))
1249 return;
1250
1251 if (!chan->psm) {
1252 l2cap_chan_ready(chan);
1253 return;
1254 }
1255
1256 if (chan->state == BT_CONNECT)
1257 l2cap_le_connect(chan);
1258 }
1259
1260 static void l2cap_start_connection(struct l2cap_chan *chan)
1261 {
1262 if (__amp_capable(chan)) {
1263 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1264 a2mp_discover_amp(chan);
1265 } else if (chan->conn->hcon->type == LE_LINK) {
1266 l2cap_le_start(chan);
1267 } else {
1268 l2cap_send_conn_req(chan);
1269 }
1270 }
1271
1272 static void l2cap_do_start(struct l2cap_chan *chan)
1273 {
1274 struct l2cap_conn *conn = chan->conn;
1275
1276 if (conn->hcon->type == LE_LINK) {
1277 l2cap_le_start(chan);
1278 return;
1279 }
1280
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1282 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1283 return;
1284
1285 if (l2cap_chan_check_security(chan) &&
1286 __l2cap_no_conn_pending(chan)) {
1287 l2cap_start_connection(chan);
1288 }
1289 } else {
1290 struct l2cap_info_req req;
1291 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1292
1293 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1294 conn->info_ident = l2cap_get_ident(conn);
1295
1296 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1297
1298 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1299 sizeof(req), &req);
1300 }
1301 }
1302
1303 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1304 {
1305 u32 local_feat_mask = l2cap_feat_mask;
1306 if (!disable_ertm)
1307 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1308
1309 switch (mode) {
1310 case L2CAP_MODE_ERTM:
1311 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1312 case L2CAP_MODE_STREAMING:
1313 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1314 default:
1315 return 0x00;
1316 }
1317 }
1318
1319 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1320 {
1321 struct l2cap_conn *conn = chan->conn;
1322 struct l2cap_disconn_req req;
1323
1324 if (!conn)
1325 return;
1326
1327 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1328 __clear_retrans_timer(chan);
1329 __clear_monitor_timer(chan);
1330 __clear_ack_timer(chan);
1331 }
1332
1333 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1334 l2cap_state_change(chan, BT_DISCONN);
1335 return;
1336 }
1337
1338 req.dcid = cpu_to_le16(chan->dcid);
1339 req.scid = cpu_to_le16(chan->scid);
1340 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1341 sizeof(req), &req);
1342
1343 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1344 }
1345
1346 /* ---- L2CAP connections ---- */
1347 static void l2cap_conn_start(struct l2cap_conn *conn)
1348 {
1349 struct l2cap_chan *chan, *tmp;
1350
1351 BT_DBG("conn %p", conn);
1352
1353 mutex_lock(&conn->chan_lock);
1354
1355 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1356 l2cap_chan_lock(chan);
1357
1358 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1359 l2cap_chan_unlock(chan);
1360 continue;
1361 }
1362
1363 if (chan->state == BT_CONNECT) {
1364 if (!l2cap_chan_check_security(chan) ||
1365 !__l2cap_no_conn_pending(chan)) {
1366 l2cap_chan_unlock(chan);
1367 continue;
1368 }
1369
1370 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1371 && test_bit(CONF_STATE2_DEVICE,
1372 &chan->conf_state)) {
1373 l2cap_chan_close(chan, ECONNRESET);
1374 l2cap_chan_unlock(chan);
1375 continue;
1376 }
1377
1378 l2cap_start_connection(chan);
1379
1380 } else if (chan->state == BT_CONNECT2) {
1381 struct l2cap_conn_rsp rsp;
1382 char buf[128];
1383 rsp.scid = cpu_to_le16(chan->dcid);
1384 rsp.dcid = cpu_to_le16(chan->scid);
1385
1386 if (l2cap_chan_check_security(chan)) {
1387 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1388 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1389 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1390 chan->ops->defer(chan);
1391
1392 } else {
1393 l2cap_state_change(chan, BT_CONFIG);
1394 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1395 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1396 }
1397 } else {
1398 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1399 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1400 }
1401
1402 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1403 sizeof(rsp), &rsp);
1404
1405 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1406 rsp.result != L2CAP_CR_SUCCESS) {
1407 l2cap_chan_unlock(chan);
1408 continue;
1409 }
1410
1411 set_bit(CONF_REQ_SENT, &chan->conf_state);
1412 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1413 l2cap_build_conf_req(chan, buf), buf);
1414 chan->num_conf_req++;
1415 }
1416
1417 l2cap_chan_unlock(chan);
1418 }
1419
1420 mutex_unlock(&conn->chan_lock);
1421 }
1422
1423 /* Find socket with cid and source/destination bdaddr.
1424 * Returns closest match, locked.
1425 */
1426 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1427 bdaddr_t *src,
1428 bdaddr_t *dst)
1429 {
1430 struct l2cap_chan *c, *c1 = NULL;
1431
1432 read_lock(&chan_list_lock);
1433
1434 list_for_each_entry(c, &chan_list, global_l) {
1435 if (state && c->state != state)
1436 continue;
1437
1438 if (c->scid == cid) {
1439 int src_match, dst_match;
1440 int src_any, dst_any;
1441
1442 /* Exact match. */
1443 src_match = !bacmp(&c->src, src);
1444 dst_match = !bacmp(&c->dst, dst);
1445 if (src_match && dst_match) {
1446 read_unlock(&chan_list_lock);
1447 return c;
1448 }
1449
1450 /* Closest match */
1451 src_any = !bacmp(&c->src, BDADDR_ANY);
1452 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1453 if ((src_match && dst_any) || (src_any && dst_match) ||
1454 (src_any && dst_any))
1455 c1 = c;
1456 }
1457 }
1458
1459 read_unlock(&chan_list_lock);
1460
1461 return c1;
1462 }
1463
1464 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1465 {
1466 struct hci_conn *hcon = conn->hcon;
1467 struct l2cap_chan *chan, *pchan;
1468 u8 dst_type;
1469
1470 BT_DBG("");
1471
1472 bt_6lowpan_add_conn(conn);
1473
1474 /* Check if we have socket listening on cid */
1475 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1476 &hcon->src, &hcon->dst);
1477 if (!pchan)
1478 return;
1479
1480 /* Client ATT sockets should override the server one */
1481 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1482 return;
1483
1484 dst_type = bdaddr_type(hcon, hcon->dst_type);
1485
1486 /* If device is blocked, do not create a channel for it */
1487 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1488 return;
1489
1490 l2cap_chan_lock(pchan);
1491
1492 chan = pchan->ops->new_connection(pchan);
1493 if (!chan)
1494 goto clean;
1495
1496 chan->dcid = L2CAP_CID_ATT;
1497
1498 bacpy(&chan->src, &hcon->src);
1499 bacpy(&chan->dst, &hcon->dst);
1500 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1501 chan->dst_type = dst_type;
1502
1503 __l2cap_chan_add(conn, chan);
1504
1505 clean:
1506 l2cap_chan_unlock(pchan);
1507 }
1508
1509 static void l2cap_conn_ready(struct l2cap_conn *conn)
1510 {
1511 struct l2cap_chan *chan;
1512 struct hci_conn *hcon = conn->hcon;
1513
1514 BT_DBG("conn %p", conn);
1515
1516 /* For outgoing pairing which doesn't necessarily have an
1517 * associated socket (e.g. mgmt_pair_device).
1518 */
1519 if (hcon->out && hcon->type == LE_LINK)
1520 smp_conn_security(hcon, hcon->pending_sec_level);
1521
1522 mutex_lock(&conn->chan_lock);
1523
1524 if (hcon->type == LE_LINK)
1525 l2cap_le_conn_ready(conn);
1526
1527 list_for_each_entry(chan, &conn->chan_l, list) {
1528
1529 l2cap_chan_lock(chan);
1530
1531 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1532 l2cap_chan_unlock(chan);
1533 continue;
1534 }
1535
1536 if (hcon->type == LE_LINK) {
1537 l2cap_le_start(chan);
1538 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1539 l2cap_chan_ready(chan);
1540
1541 } else if (chan->state == BT_CONNECT) {
1542 l2cap_do_start(chan);
1543 }
1544
1545 l2cap_chan_unlock(chan);
1546 }
1547
1548 mutex_unlock(&conn->chan_lock);
1549 }
1550
1551 /* Notify sockets that we cannot guaranty reliability anymore */
1552 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1553 {
1554 struct l2cap_chan *chan;
1555
1556 BT_DBG("conn %p", conn);
1557
1558 mutex_lock(&conn->chan_lock);
1559
1560 list_for_each_entry(chan, &conn->chan_l, list) {
1561 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1562 l2cap_chan_set_err(chan, err);
1563 }
1564
1565 mutex_unlock(&conn->chan_lock);
1566 }
1567
1568 static void l2cap_info_timeout(struct work_struct *work)
1569 {
1570 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1571 info_timer.work);
1572
1573 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1574 conn->info_ident = 0;
1575
1576 l2cap_conn_start(conn);
1577 }
1578
1579 /*
1580 * l2cap_user
1581 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1582 * callback is called during registration. The ->remove callback is called
1583 * during unregistration.
1584 * An l2cap_user object can either be explicitly unregistered or when the
1585 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1586 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1587 * External modules must own a reference to the l2cap_conn object if they intend
1588 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1589 * any time if they don't.
1590 */
1591
1592 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1593 {
1594 struct hci_dev *hdev = conn->hcon->hdev;
1595 int ret;
1596
1597 /* We need to check whether l2cap_conn is registered. If it is not, we
1598 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1599 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1600 * relies on the parent hci_conn object to be locked. This itself relies
1601 * on the hci_dev object to be locked. So we must lock the hci device
1602 * here, too. */
1603
1604 hci_dev_lock(hdev);
1605
1606 if (user->list.next || user->list.prev) {
1607 ret = -EINVAL;
1608 goto out_unlock;
1609 }
1610
1611 /* conn->hchan is NULL after l2cap_conn_del() was called */
1612 if (!conn->hchan) {
1613 ret = -ENODEV;
1614 goto out_unlock;
1615 }
1616
1617 ret = user->probe(conn, user);
1618 if (ret)
1619 goto out_unlock;
1620
1621 list_add(&user->list, &conn->users);
1622 ret = 0;
1623
1624 out_unlock:
1625 hci_dev_unlock(hdev);
1626 return ret;
1627 }
1628 EXPORT_SYMBOL(l2cap_register_user);
1629
1630 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1631 {
1632 struct hci_dev *hdev = conn->hcon->hdev;
1633
1634 hci_dev_lock(hdev);
1635
1636 if (!user->list.next || !user->list.prev)
1637 goto out_unlock;
1638
1639 list_del(&user->list);
1640 user->list.next = NULL;
1641 user->list.prev = NULL;
1642 user->remove(conn, user);
1643
1644 out_unlock:
1645 hci_dev_unlock(hdev);
1646 }
1647 EXPORT_SYMBOL(l2cap_unregister_user);
1648
1649 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1650 {
1651 struct l2cap_user *user;
1652
1653 while (!list_empty(&conn->users)) {
1654 user = list_first_entry(&conn->users, struct l2cap_user, list);
1655 list_del(&user->list);
1656 user->list.next = NULL;
1657 user->list.prev = NULL;
1658 user->remove(conn, user);
1659 }
1660 }
1661
1662 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1663 {
1664 struct l2cap_conn *conn = hcon->l2cap_data;
1665 struct l2cap_chan *chan, *l;
1666
1667 if (!conn)
1668 return;
1669
1670 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1671
1672 kfree_skb(conn->rx_skb);
1673
1674 l2cap_unregister_all_users(conn);
1675
1676 mutex_lock(&conn->chan_lock);
1677
1678 /* Kill channels */
1679 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1680 l2cap_chan_hold(chan);
1681 l2cap_chan_lock(chan);
1682
1683 l2cap_chan_del(chan, err);
1684
1685 l2cap_chan_unlock(chan);
1686
1687 chan->ops->close(chan);
1688 l2cap_chan_put(chan);
1689 }
1690
1691 mutex_unlock(&conn->chan_lock);
1692
1693 hci_chan_del(conn->hchan);
1694
1695 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1696 cancel_delayed_work_sync(&conn->info_timer);
1697
1698 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1699 cancel_delayed_work_sync(&conn->security_timer);
1700 smp_chan_destroy(conn);
1701 }
1702
1703 hcon->l2cap_data = NULL;
1704 conn->hchan = NULL;
1705 l2cap_conn_put(conn);
1706 }
1707
1708 static void security_timeout(struct work_struct *work)
1709 {
1710 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1711 security_timer.work);
1712
1713 BT_DBG("conn %p", conn);
1714
1715 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1716 smp_chan_destroy(conn);
1717 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1718 }
1719 }
1720
1721 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1722 {
1723 struct l2cap_conn *conn = hcon->l2cap_data;
1724 struct hci_chan *hchan;
1725
1726 if (conn)
1727 return conn;
1728
1729 hchan = hci_chan_create(hcon);
1730 if (!hchan)
1731 return NULL;
1732
1733 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1734 if (!conn) {
1735 hci_chan_del(hchan);
1736 return NULL;
1737 }
1738
1739 kref_init(&conn->ref);
1740 hcon->l2cap_data = conn;
1741 conn->hcon = hcon;
1742 hci_conn_get(conn->hcon);
1743 conn->hchan = hchan;
1744
1745 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1746
1747 switch (hcon->type) {
1748 case LE_LINK:
1749 if (hcon->hdev->le_mtu) {
1750 conn->mtu = hcon->hdev->le_mtu;
1751 break;
1752 }
1753 /* fall through */
1754 default:
1755 conn->mtu = hcon->hdev->acl_mtu;
1756 break;
1757 }
1758
1759 conn->feat_mask = 0;
1760
1761 if (hcon->type == ACL_LINK)
1762 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1763 &hcon->hdev->dev_flags);
1764
1765 spin_lock_init(&conn->lock);
1766 mutex_init(&conn->chan_lock);
1767
1768 INIT_LIST_HEAD(&conn->chan_l);
1769 INIT_LIST_HEAD(&conn->users);
1770
1771 if (hcon->type == LE_LINK)
1772 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1773 else
1774 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1775
1776 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1777
1778 return conn;
1779 }
1780
1781 static void l2cap_conn_free(struct kref *ref)
1782 {
1783 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1784
1785 hci_conn_put(conn->hcon);
1786 kfree(conn);
1787 }
1788
1789 void l2cap_conn_get(struct l2cap_conn *conn)
1790 {
1791 kref_get(&conn->ref);
1792 }
1793 EXPORT_SYMBOL(l2cap_conn_get);
1794
1795 void l2cap_conn_put(struct l2cap_conn *conn)
1796 {
1797 kref_put(&conn->ref, l2cap_conn_free);
1798 }
1799 EXPORT_SYMBOL(l2cap_conn_put);
1800
1801 /* ---- Socket interface ---- */
1802
1803 /* Find socket with psm and source / destination bdaddr.
1804 * Returns closest match.
1805 */
1806 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1807 bdaddr_t *src,
1808 bdaddr_t *dst,
1809 u8 link_type)
1810 {
1811 struct l2cap_chan *c, *c1 = NULL;
1812
1813 read_lock(&chan_list_lock);
1814
1815 list_for_each_entry(c, &chan_list, global_l) {
1816 if (state && c->state != state)
1817 continue;
1818
1819 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1820 continue;
1821
1822 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1823 continue;
1824
1825 if (c->psm == psm) {
1826 int src_match, dst_match;
1827 int src_any, dst_any;
1828
1829 /* Exact match. */
1830 src_match = !bacmp(&c->src, src);
1831 dst_match = !bacmp(&c->dst, dst);
1832 if (src_match && dst_match) {
1833 read_unlock(&chan_list_lock);
1834 return c;
1835 }
1836
1837 /* Closest match */
1838 src_any = !bacmp(&c->src, BDADDR_ANY);
1839 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1840 if ((src_match && dst_any) || (src_any && dst_match) ||
1841 (src_any && dst_any))
1842 c1 = c;
1843 }
1844 }
1845
1846 read_unlock(&chan_list_lock);
1847
1848 return c1;
1849 }
1850
1851 static bool is_valid_psm(u16 psm, u8 dst_type)
1852 {
1853 if (!psm)
1854 return false;
1855
1856 if (bdaddr_type_is_le(dst_type))
1857 return (psm <= 0x00ff);
1858
1859 /* PSM must be odd and lsb of upper byte must be 0 */
1860 return ((psm & 0x0101) == 0x0001);
1861 }
1862
1863 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1864 bdaddr_t *dst, u8 dst_type)
1865 {
1866 struct l2cap_conn *conn;
1867 struct hci_conn *hcon;
1868 struct hci_dev *hdev;
1869 __u8 auth_type;
1870 int err;
1871
1872 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1873 dst_type, __le16_to_cpu(psm));
1874
1875 hdev = hci_get_route(dst, &chan->src);
1876 if (!hdev)
1877 return -EHOSTUNREACH;
1878
1879 hci_dev_lock(hdev);
1880
1881 l2cap_chan_lock(chan);
1882
1883 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
1884 chan->chan_type != L2CAP_CHAN_RAW) {
1885 err = -EINVAL;
1886 goto done;
1887 }
1888
1889 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1890 err = -EINVAL;
1891 goto done;
1892 }
1893
1894 switch (chan->mode) {
1895 case L2CAP_MODE_BASIC:
1896 break;
1897 case L2CAP_MODE_LE_FLOWCTL:
1898 l2cap_le_flowctl_init(chan);
1899 break;
1900 case L2CAP_MODE_ERTM:
1901 case L2CAP_MODE_STREAMING:
1902 if (!disable_ertm)
1903 break;
1904 /* fall through */
1905 default:
1906 err = -ENOTSUPP;
1907 goto done;
1908 }
1909
1910 switch (chan->state) {
1911 case BT_CONNECT:
1912 case BT_CONNECT2:
1913 case BT_CONFIG:
1914 /* Already connecting */
1915 err = 0;
1916 goto done;
1917
1918 case BT_CONNECTED:
1919 /* Already connected */
1920 err = -EISCONN;
1921 goto done;
1922
1923 case BT_OPEN:
1924 case BT_BOUND:
1925 /* Can connect */
1926 break;
1927
1928 default:
1929 err = -EBADFD;
1930 goto done;
1931 }
1932
1933 /* Set destination address and psm */
1934 bacpy(&chan->dst, dst);
1935 chan->dst_type = dst_type;
1936
1937 chan->psm = psm;
1938 chan->dcid = cid;
1939
1940 auth_type = l2cap_get_auth_type(chan);
1941
1942 if (bdaddr_type_is_le(dst_type))
1943 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1944 chan->sec_level, auth_type);
1945 else
1946 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1947 chan->sec_level, auth_type);
1948
1949 if (IS_ERR(hcon)) {
1950 err = PTR_ERR(hcon);
1951 goto done;
1952 }
1953
1954 conn = l2cap_conn_add(hcon);
1955 if (!conn) {
1956 hci_conn_drop(hcon);
1957 err = -ENOMEM;
1958 goto done;
1959 }
1960
1961 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1962 hci_conn_drop(hcon);
1963 err = -EBUSY;
1964 goto done;
1965 }
1966
1967 /* Update source addr of the socket */
1968 bacpy(&chan->src, &hcon->src);
1969 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1970
1971 l2cap_chan_unlock(chan);
1972 l2cap_chan_add(conn, chan);
1973 l2cap_chan_lock(chan);
1974
1975 /* l2cap_chan_add takes its own ref so we can drop this one */
1976 hci_conn_drop(hcon);
1977
1978 l2cap_state_change(chan, BT_CONNECT);
1979 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1980
1981 if (hcon->state == BT_CONNECTED) {
1982 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1983 __clear_chan_timer(chan);
1984 if (l2cap_chan_check_security(chan))
1985 l2cap_state_change(chan, BT_CONNECTED);
1986 } else
1987 l2cap_do_start(chan);
1988 }
1989
1990 err = 0;
1991
1992 done:
1993 l2cap_chan_unlock(chan);
1994 hci_dev_unlock(hdev);
1995 hci_dev_put(hdev);
1996 return err;
1997 }
1998
1999 static void l2cap_monitor_timeout(struct work_struct *work)
2000 {
2001 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2002 monitor_timer.work);
2003
2004 BT_DBG("chan %p", chan);
2005
2006 l2cap_chan_lock(chan);
2007
2008 if (!chan->conn) {
2009 l2cap_chan_unlock(chan);
2010 l2cap_chan_put(chan);
2011 return;
2012 }
2013
2014 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2015
2016 l2cap_chan_unlock(chan);
2017 l2cap_chan_put(chan);
2018 }
2019
2020 static void l2cap_retrans_timeout(struct work_struct *work)
2021 {
2022 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2023 retrans_timer.work);
2024
2025 BT_DBG("chan %p", chan);
2026
2027 l2cap_chan_lock(chan);
2028
2029 if (!chan->conn) {
2030 l2cap_chan_unlock(chan);
2031 l2cap_chan_put(chan);
2032 return;
2033 }
2034
2035 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2036 l2cap_chan_unlock(chan);
2037 l2cap_chan_put(chan);
2038 }
2039
2040 static void l2cap_streaming_send(struct l2cap_chan *chan,
2041 struct sk_buff_head *skbs)
2042 {
2043 struct sk_buff *skb;
2044 struct l2cap_ctrl *control;
2045
2046 BT_DBG("chan %p, skbs %p", chan, skbs);
2047
2048 if (__chan_is_moving(chan))
2049 return;
2050
2051 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2052
2053 while (!skb_queue_empty(&chan->tx_q)) {
2054
2055 skb = skb_dequeue(&chan->tx_q);
2056
2057 bt_cb(skb)->control.retries = 1;
2058 control = &bt_cb(skb)->control;
2059
2060 control->reqseq = 0;
2061 control->txseq = chan->next_tx_seq;
2062
2063 __pack_control(chan, control, skb);
2064
2065 if (chan->fcs == L2CAP_FCS_CRC16) {
2066 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2067 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2068 }
2069
2070 l2cap_do_send(chan, skb);
2071
2072 BT_DBG("Sent txseq %u", control->txseq);
2073
2074 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2075 chan->frames_sent++;
2076 }
2077 }
2078
2079 static int l2cap_ertm_send(struct l2cap_chan *chan)
2080 {
2081 struct sk_buff *skb, *tx_skb;
2082 struct l2cap_ctrl *control;
2083 int sent = 0;
2084
2085 BT_DBG("chan %p", chan);
2086
2087 if (chan->state != BT_CONNECTED)
2088 return -ENOTCONN;
2089
2090 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2091 return 0;
2092
2093 if (__chan_is_moving(chan))
2094 return 0;
2095
2096 while (chan->tx_send_head &&
2097 chan->unacked_frames < chan->remote_tx_win &&
2098 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2099
2100 skb = chan->tx_send_head;
2101
2102 bt_cb(skb)->control.retries = 1;
2103 control = &bt_cb(skb)->control;
2104
2105 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2106 control->final = 1;
2107
2108 control->reqseq = chan->buffer_seq;
2109 chan->last_acked_seq = chan->buffer_seq;
2110 control->txseq = chan->next_tx_seq;
2111
2112 __pack_control(chan, control, skb);
2113
2114 if (chan->fcs == L2CAP_FCS_CRC16) {
2115 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2116 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2117 }
2118
2119 /* Clone after data has been modified. Data is assumed to be
2120 read-only (for locking purposes) on cloned sk_buffs.
2121 */
2122 tx_skb = skb_clone(skb, GFP_KERNEL);
2123
2124 if (!tx_skb)
2125 break;
2126
2127 __set_retrans_timer(chan);
2128
2129 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2130 chan->unacked_frames++;
2131 chan->frames_sent++;
2132 sent++;
2133
2134 if (skb_queue_is_last(&chan->tx_q, skb))
2135 chan->tx_send_head = NULL;
2136 else
2137 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2138
2139 l2cap_do_send(chan, tx_skb);
2140 BT_DBG("Sent txseq %u", control->txseq);
2141 }
2142
2143 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2144 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2145
2146 return sent;
2147 }
2148
2149 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2150 {
2151 struct l2cap_ctrl control;
2152 struct sk_buff *skb;
2153 struct sk_buff *tx_skb;
2154 u16 seq;
2155
2156 BT_DBG("chan %p", chan);
2157
2158 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2159 return;
2160
2161 if (__chan_is_moving(chan))
2162 return;
2163
2164 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2165 seq = l2cap_seq_list_pop(&chan->retrans_list);
2166
2167 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2168 if (!skb) {
2169 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2170 seq);
2171 continue;
2172 }
2173
2174 bt_cb(skb)->control.retries++;
2175 control = bt_cb(skb)->control;
2176
2177 if (chan->max_tx != 0 &&
2178 bt_cb(skb)->control.retries > chan->max_tx) {
2179 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2180 l2cap_send_disconn_req(chan, ECONNRESET);
2181 l2cap_seq_list_clear(&chan->retrans_list);
2182 break;
2183 }
2184
2185 control.reqseq = chan->buffer_seq;
2186 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2187 control.final = 1;
2188 else
2189 control.final = 0;
2190
2191 if (skb_cloned(skb)) {
2192 /* Cloned sk_buffs are read-only, so we need a
2193 * writeable copy
2194 */
2195 tx_skb = skb_copy(skb, GFP_KERNEL);
2196 } else {
2197 tx_skb = skb_clone(skb, GFP_KERNEL);
2198 }
2199
2200 if (!tx_skb) {
2201 l2cap_seq_list_clear(&chan->retrans_list);
2202 break;
2203 }
2204
2205 /* Update skb contents */
2206 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2207 put_unaligned_le32(__pack_extended_control(&control),
2208 tx_skb->data + L2CAP_HDR_SIZE);
2209 } else {
2210 put_unaligned_le16(__pack_enhanced_control(&control),
2211 tx_skb->data + L2CAP_HDR_SIZE);
2212 }
2213
2214 if (chan->fcs == L2CAP_FCS_CRC16) {
2215 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2216 put_unaligned_le16(fcs, skb_put(tx_skb,
2217 L2CAP_FCS_SIZE));
2218 }
2219
2220 l2cap_do_send(chan, tx_skb);
2221
2222 BT_DBG("Resent txseq %d", control.txseq);
2223
2224 chan->last_acked_seq = chan->buffer_seq;
2225 }
2226 }
2227
2228 static void l2cap_retransmit(struct l2cap_chan *chan,
2229 struct l2cap_ctrl *control)
2230 {
2231 BT_DBG("chan %p, control %p", chan, control);
2232
2233 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2234 l2cap_ertm_resend(chan);
2235 }
2236
2237 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2238 struct l2cap_ctrl *control)
2239 {
2240 struct sk_buff *skb;
2241
2242 BT_DBG("chan %p, control %p", chan, control);
2243
2244 if (control->poll)
2245 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2246
2247 l2cap_seq_list_clear(&chan->retrans_list);
2248
2249 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2250 return;
2251
2252 if (chan->unacked_frames) {
2253 skb_queue_walk(&chan->tx_q, skb) {
2254 if (bt_cb(skb)->control.txseq == control->reqseq ||
2255 skb == chan->tx_send_head)
2256 break;
2257 }
2258
2259 skb_queue_walk_from(&chan->tx_q, skb) {
2260 if (skb == chan->tx_send_head)
2261 break;
2262
2263 l2cap_seq_list_append(&chan->retrans_list,
2264 bt_cb(skb)->control.txseq);
2265 }
2266
2267 l2cap_ertm_resend(chan);
2268 }
2269 }
2270
2271 static void l2cap_send_ack(struct l2cap_chan *chan)
2272 {
2273 struct l2cap_ctrl control;
2274 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2275 chan->last_acked_seq);
2276 int threshold;
2277
2278 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2279 chan, chan->last_acked_seq, chan->buffer_seq);
2280
2281 memset(&control, 0, sizeof(control));
2282 control.sframe = 1;
2283
2284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2285 chan->rx_state == L2CAP_RX_STATE_RECV) {
2286 __clear_ack_timer(chan);
2287 control.super = L2CAP_SUPER_RNR;
2288 control.reqseq = chan->buffer_seq;
2289 l2cap_send_sframe(chan, &control);
2290 } else {
2291 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2292 l2cap_ertm_send(chan);
2293 /* If any i-frames were sent, they included an ack */
2294 if (chan->buffer_seq == chan->last_acked_seq)
2295 frames_to_ack = 0;
2296 }
2297
2298 /* Ack now if the window is 3/4ths full.
2299 * Calculate without mul or div
2300 */
2301 threshold = chan->ack_win;
2302 threshold += threshold << 1;
2303 threshold >>= 2;
2304
2305 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2306 threshold);
2307
2308 if (frames_to_ack >= threshold) {
2309 __clear_ack_timer(chan);
2310 control.super = L2CAP_SUPER_RR;
2311 control.reqseq = chan->buffer_seq;
2312 l2cap_send_sframe(chan, &control);
2313 frames_to_ack = 0;
2314 }
2315
2316 if (frames_to_ack)
2317 __set_ack_timer(chan);
2318 }
2319 }
2320
2321 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2322 struct msghdr *msg, int len,
2323 int count, struct sk_buff *skb)
2324 {
2325 struct l2cap_conn *conn = chan->conn;
2326 struct sk_buff **frag;
2327 int sent = 0;
2328
2329 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2330 return -EFAULT;
2331
2332 sent += count;
2333 len -= count;
2334
2335 /* Continuation fragments (no L2CAP header) */
2336 frag = &skb_shinfo(skb)->frag_list;
2337 while (len) {
2338 struct sk_buff *tmp;
2339
2340 count = min_t(unsigned int, conn->mtu, len);
2341
2342 tmp = chan->ops->alloc_skb(chan, count,
2343 msg->msg_flags & MSG_DONTWAIT);
2344 if (IS_ERR(tmp))
2345 return PTR_ERR(tmp);
2346
2347 *frag = tmp;
2348
2349 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2350 return -EFAULT;
2351
2352 (*frag)->priority = skb->priority;
2353
2354 sent += count;
2355 len -= count;
2356
2357 skb->len += (*frag)->len;
2358 skb->data_len += (*frag)->len;
2359
2360 frag = &(*frag)->next;
2361 }
2362
2363 return sent;
2364 }
2365
2366 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2367 struct msghdr *msg, size_t len,
2368 u32 priority)
2369 {
2370 struct l2cap_conn *conn = chan->conn;
2371 struct sk_buff *skb;
2372 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2373 struct l2cap_hdr *lh;
2374
2375 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2376 __le16_to_cpu(chan->psm), len, priority);
2377
2378 count = min_t(unsigned int, (conn->mtu - hlen), len);
2379
2380 skb = chan->ops->alloc_skb(chan, count + hlen,
2381 msg->msg_flags & MSG_DONTWAIT);
2382 if (IS_ERR(skb))
2383 return skb;
2384
2385 skb->priority = priority;
2386
2387 /* Create L2CAP header */
2388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2389 lh->cid = cpu_to_le16(chan->dcid);
2390 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2391 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2392
2393 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2394 if (unlikely(err < 0)) {
2395 kfree_skb(skb);
2396 return ERR_PTR(err);
2397 }
2398 return skb;
2399 }
2400
2401 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2402 struct msghdr *msg, size_t len,
2403 u32 priority)
2404 {
2405 struct l2cap_conn *conn = chan->conn;
2406 struct sk_buff *skb;
2407 int err, count;
2408 struct l2cap_hdr *lh;
2409
2410 BT_DBG("chan %p len %zu", chan, len);
2411
2412 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2413
2414 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2415 msg->msg_flags & MSG_DONTWAIT);
2416 if (IS_ERR(skb))
2417 return skb;
2418
2419 skb->priority = priority;
2420
2421 /* Create L2CAP header */
2422 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2423 lh->cid = cpu_to_le16(chan->dcid);
2424 lh->len = cpu_to_le16(len);
2425
2426 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2427 if (unlikely(err < 0)) {
2428 kfree_skb(skb);
2429 return ERR_PTR(err);
2430 }
2431 return skb;
2432 }
2433
2434 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2435 struct msghdr *msg, size_t len,
2436 u16 sdulen)
2437 {
2438 struct l2cap_conn *conn = chan->conn;
2439 struct sk_buff *skb;
2440 int err, count, hlen;
2441 struct l2cap_hdr *lh;
2442
2443 BT_DBG("chan %p len %zu", chan, len);
2444
2445 if (!conn)
2446 return ERR_PTR(-ENOTCONN);
2447
2448 hlen = __ertm_hdr_size(chan);
2449
2450 if (sdulen)
2451 hlen += L2CAP_SDULEN_SIZE;
2452
2453 if (chan->fcs == L2CAP_FCS_CRC16)
2454 hlen += L2CAP_FCS_SIZE;
2455
2456 count = min_t(unsigned int, (conn->mtu - hlen), len);
2457
2458 skb = chan->ops->alloc_skb(chan, count + hlen,
2459 msg->msg_flags & MSG_DONTWAIT);
2460 if (IS_ERR(skb))
2461 return skb;
2462
2463 /* Create L2CAP header */
2464 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2465 lh->cid = cpu_to_le16(chan->dcid);
2466 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2467
2468 /* Control header is populated later */
2469 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2470 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2471 else
2472 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2473
2474 if (sdulen)
2475 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2476
2477 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2478 if (unlikely(err < 0)) {
2479 kfree_skb(skb);
2480 return ERR_PTR(err);
2481 }
2482
2483 bt_cb(skb)->control.fcs = chan->fcs;
2484 bt_cb(skb)->control.retries = 0;
2485 return skb;
2486 }
2487
2488 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2489 struct sk_buff_head *seg_queue,
2490 struct msghdr *msg, size_t len)
2491 {
2492 struct sk_buff *skb;
2493 u16 sdu_len;
2494 size_t pdu_len;
2495 u8 sar;
2496
2497 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2498
2499 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2500 * so fragmented skbs are not used. The HCI layer's handling
2501 * of fragmented skbs is not compatible with ERTM's queueing.
2502 */
2503
2504 /* PDU size is derived from the HCI MTU */
2505 pdu_len = chan->conn->mtu;
2506
2507 /* Constrain PDU size for BR/EDR connections */
2508 if (!chan->hs_hcon)
2509 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2510
2511 /* Adjust for largest possible L2CAP overhead. */
2512 if (chan->fcs)
2513 pdu_len -= L2CAP_FCS_SIZE;
2514
2515 pdu_len -= __ertm_hdr_size(chan);
2516
2517 /* Remote device may have requested smaller PDUs */
2518 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2519
2520 if (len <= pdu_len) {
2521 sar = L2CAP_SAR_UNSEGMENTED;
2522 sdu_len = 0;
2523 pdu_len = len;
2524 } else {
2525 sar = L2CAP_SAR_START;
2526 sdu_len = len;
2527 pdu_len -= L2CAP_SDULEN_SIZE;
2528 }
2529
2530 while (len > 0) {
2531 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2532
2533 if (IS_ERR(skb)) {
2534 __skb_queue_purge(seg_queue);
2535 return PTR_ERR(skb);
2536 }
2537
2538 bt_cb(skb)->control.sar = sar;
2539 __skb_queue_tail(seg_queue, skb);
2540
2541 len -= pdu_len;
2542 if (sdu_len) {
2543 sdu_len = 0;
2544 pdu_len += L2CAP_SDULEN_SIZE;
2545 }
2546
2547 if (len <= pdu_len) {
2548 sar = L2CAP_SAR_END;
2549 pdu_len = len;
2550 } else {
2551 sar = L2CAP_SAR_CONTINUE;
2552 }
2553 }
2554
2555 return 0;
2556 }
2557
2558 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2559 struct msghdr *msg,
2560 size_t len, u16 sdulen)
2561 {
2562 struct l2cap_conn *conn = chan->conn;
2563 struct sk_buff *skb;
2564 int err, count, hlen;
2565 struct l2cap_hdr *lh;
2566
2567 BT_DBG("chan %p len %zu", chan, len);
2568
2569 if (!conn)
2570 return ERR_PTR(-ENOTCONN);
2571
2572 hlen = L2CAP_HDR_SIZE;
2573
2574 if (sdulen)
2575 hlen += L2CAP_SDULEN_SIZE;
2576
2577 count = min_t(unsigned int, (conn->mtu - hlen), len);
2578
2579 skb = chan->ops->alloc_skb(chan, count + hlen,
2580 msg->msg_flags & MSG_DONTWAIT);
2581 if (IS_ERR(skb))
2582 return skb;
2583
2584 /* Create L2CAP header */
2585 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2586 lh->cid = cpu_to_le16(chan->dcid);
2587 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2588
2589 if (sdulen)
2590 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2591
2592 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2593 if (unlikely(err < 0)) {
2594 kfree_skb(skb);
2595 return ERR_PTR(err);
2596 }
2597
2598 return skb;
2599 }
2600
2601 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2602 struct sk_buff_head *seg_queue,
2603 struct msghdr *msg, size_t len)
2604 {
2605 struct sk_buff *skb;
2606 size_t pdu_len;
2607 u16 sdu_len;
2608
2609 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2610
2611 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2612
2613 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2614
2615 sdu_len = len;
2616 pdu_len -= L2CAP_SDULEN_SIZE;
2617
2618 while (len > 0) {
2619 if (len <= pdu_len)
2620 pdu_len = len;
2621
2622 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2623 if (IS_ERR(skb)) {
2624 __skb_queue_purge(seg_queue);
2625 return PTR_ERR(skb);
2626 }
2627
2628 __skb_queue_tail(seg_queue, skb);
2629
2630 len -= pdu_len;
2631
2632 if (sdu_len) {
2633 sdu_len = 0;
2634 pdu_len += L2CAP_SDULEN_SIZE;
2635 }
2636 }
2637
2638 return 0;
2639 }
2640
2641 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2642 u32 priority)
2643 {
2644 struct sk_buff *skb;
2645 int err;
2646 struct sk_buff_head seg_queue;
2647
2648 if (!chan->conn)
2649 return -ENOTCONN;
2650
2651 /* Connectionless channel */
2652 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2653 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2654 if (IS_ERR(skb))
2655 return PTR_ERR(skb);
2656
2657 l2cap_do_send(chan, skb);
2658 return len;
2659 }
2660
2661 switch (chan->mode) {
2662 case L2CAP_MODE_LE_FLOWCTL:
2663 /* Check outgoing MTU */
2664 if (len > chan->omtu)
2665 return -EMSGSIZE;
2666
2667 if (!chan->tx_credits)
2668 return -EAGAIN;
2669
2670 __skb_queue_head_init(&seg_queue);
2671
2672 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2673
2674 if (chan->state != BT_CONNECTED) {
2675 __skb_queue_purge(&seg_queue);
2676 err = -ENOTCONN;
2677 }
2678
2679 if (err)
2680 return err;
2681
2682 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2683
2684 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2685 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2686 chan->tx_credits--;
2687 }
2688
2689 if (!chan->tx_credits)
2690 chan->ops->suspend(chan);
2691
2692 err = len;
2693
2694 break;
2695
2696 case L2CAP_MODE_BASIC:
2697 /* Check outgoing MTU */
2698 if (len > chan->omtu)
2699 return -EMSGSIZE;
2700
2701 /* Create a basic PDU */
2702 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2703 if (IS_ERR(skb))
2704 return PTR_ERR(skb);
2705
2706 l2cap_do_send(chan, skb);
2707 err = len;
2708 break;
2709
2710 case L2CAP_MODE_ERTM:
2711 case L2CAP_MODE_STREAMING:
2712 /* Check outgoing MTU */
2713 if (len > chan->omtu) {
2714 err = -EMSGSIZE;
2715 break;
2716 }
2717
2718 __skb_queue_head_init(&seg_queue);
2719
2720 /* Do segmentation before calling in to the state machine,
2721 * since it's possible to block while waiting for memory
2722 * allocation.
2723 */
2724 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2725
2726 /* The channel could have been closed while segmenting,
2727 * check that it is still connected.
2728 */
2729 if (chan->state != BT_CONNECTED) {
2730 __skb_queue_purge(&seg_queue);
2731 err = -ENOTCONN;
2732 }
2733
2734 if (err)
2735 break;
2736
2737 if (chan->mode == L2CAP_MODE_ERTM)
2738 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2739 else
2740 l2cap_streaming_send(chan, &seg_queue);
2741
2742 err = len;
2743
2744 /* If the skbs were not queued for sending, they'll still be in
2745 * seg_queue and need to be purged.
2746 */
2747 __skb_queue_purge(&seg_queue);
2748 break;
2749
2750 default:
2751 BT_DBG("bad state %1.1x", chan->mode);
2752 err = -EBADFD;
2753 }
2754
2755 return err;
2756 }
2757
2758 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2759 {
2760 struct l2cap_ctrl control;
2761 u16 seq;
2762
2763 BT_DBG("chan %p, txseq %u", chan, txseq);
2764
2765 memset(&control, 0, sizeof(control));
2766 control.sframe = 1;
2767 control.super = L2CAP_SUPER_SREJ;
2768
2769 for (seq = chan->expected_tx_seq; seq != txseq;
2770 seq = __next_seq(chan, seq)) {
2771 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2772 control.reqseq = seq;
2773 l2cap_send_sframe(chan, &control);
2774 l2cap_seq_list_append(&chan->srej_list, seq);
2775 }
2776 }
2777
2778 chan->expected_tx_seq = __next_seq(chan, txseq);
2779 }
2780
2781 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2782 {
2783 struct l2cap_ctrl control;
2784
2785 BT_DBG("chan %p", chan);
2786
2787 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2788 return;
2789
2790 memset(&control, 0, sizeof(control));
2791 control.sframe = 1;
2792 control.super = L2CAP_SUPER_SREJ;
2793 control.reqseq = chan->srej_list.tail;
2794 l2cap_send_sframe(chan, &control);
2795 }
2796
2797 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2798 {
2799 struct l2cap_ctrl control;
2800 u16 initial_head;
2801 u16 seq;
2802
2803 BT_DBG("chan %p, txseq %u", chan, txseq);
2804
2805 memset(&control, 0, sizeof(control));
2806 control.sframe = 1;
2807 control.super = L2CAP_SUPER_SREJ;
2808
2809 /* Capture initial list head to allow only one pass through the list. */
2810 initial_head = chan->srej_list.head;
2811
2812 do {
2813 seq = l2cap_seq_list_pop(&chan->srej_list);
2814 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2815 break;
2816
2817 control.reqseq = seq;
2818 l2cap_send_sframe(chan, &control);
2819 l2cap_seq_list_append(&chan->srej_list, seq);
2820 } while (chan->srej_list.head != initial_head);
2821 }
2822
2823 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2824 {
2825 struct sk_buff *acked_skb;
2826 u16 ackseq;
2827
2828 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2829
2830 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2831 return;
2832
2833 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2834 chan->expected_ack_seq, chan->unacked_frames);
2835
2836 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2837 ackseq = __next_seq(chan, ackseq)) {
2838
2839 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2840 if (acked_skb) {
2841 skb_unlink(acked_skb, &chan->tx_q);
2842 kfree_skb(acked_skb);
2843 chan->unacked_frames--;
2844 }
2845 }
2846
2847 chan->expected_ack_seq = reqseq;
2848
2849 if (chan->unacked_frames == 0)
2850 __clear_retrans_timer(chan);
2851
2852 BT_DBG("unacked_frames %u", chan->unacked_frames);
2853 }
2854
2855 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2856 {
2857 BT_DBG("chan %p", chan);
2858
2859 chan->expected_tx_seq = chan->buffer_seq;
2860 l2cap_seq_list_clear(&chan->srej_list);
2861 skb_queue_purge(&chan->srej_q);
2862 chan->rx_state = L2CAP_RX_STATE_RECV;
2863 }
2864
2865 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2866 struct l2cap_ctrl *control,
2867 struct sk_buff_head *skbs, u8 event)
2868 {
2869 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2870 event);
2871
2872 switch (event) {
2873 case L2CAP_EV_DATA_REQUEST:
2874 if (chan->tx_send_head == NULL)
2875 chan->tx_send_head = skb_peek(skbs);
2876
2877 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2878 l2cap_ertm_send(chan);
2879 break;
2880 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2881 BT_DBG("Enter LOCAL_BUSY");
2882 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2883
2884 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2885 /* The SREJ_SENT state must be aborted if we are to
2886 * enter the LOCAL_BUSY state.
2887 */
2888 l2cap_abort_rx_srej_sent(chan);
2889 }
2890
2891 l2cap_send_ack(chan);
2892
2893 break;
2894 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2895 BT_DBG("Exit LOCAL_BUSY");
2896 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2897
2898 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2899 struct l2cap_ctrl local_control;
2900
2901 memset(&local_control, 0, sizeof(local_control));
2902 local_control.sframe = 1;
2903 local_control.super = L2CAP_SUPER_RR;
2904 local_control.poll = 1;
2905 local_control.reqseq = chan->buffer_seq;
2906 l2cap_send_sframe(chan, &local_control);
2907
2908 chan->retry_count = 1;
2909 __set_monitor_timer(chan);
2910 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2911 }
2912 break;
2913 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2914 l2cap_process_reqseq(chan, control->reqseq);
2915 break;
2916 case L2CAP_EV_EXPLICIT_POLL:
2917 l2cap_send_rr_or_rnr(chan, 1);
2918 chan->retry_count = 1;
2919 __set_monitor_timer(chan);
2920 __clear_ack_timer(chan);
2921 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2922 break;
2923 case L2CAP_EV_RETRANS_TO:
2924 l2cap_send_rr_or_rnr(chan, 1);
2925 chan->retry_count = 1;
2926 __set_monitor_timer(chan);
2927 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2928 break;
2929 case L2CAP_EV_RECV_FBIT:
2930 /* Nothing to process */
2931 break;
2932 default:
2933 break;
2934 }
2935 }
2936
2937 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2938 struct l2cap_ctrl *control,
2939 struct sk_buff_head *skbs, u8 event)
2940 {
2941 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2942 event);
2943
2944 switch (event) {
2945 case L2CAP_EV_DATA_REQUEST:
2946 if (chan->tx_send_head == NULL)
2947 chan->tx_send_head = skb_peek(skbs);
2948 /* Queue data, but don't send. */
2949 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2950 break;
2951 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2952 BT_DBG("Enter LOCAL_BUSY");
2953 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2954
2955 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2956 /* The SREJ_SENT state must be aborted if we are to
2957 * enter the LOCAL_BUSY state.
2958 */
2959 l2cap_abort_rx_srej_sent(chan);
2960 }
2961
2962 l2cap_send_ack(chan);
2963
2964 break;
2965 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2966 BT_DBG("Exit LOCAL_BUSY");
2967 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2968
2969 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2970 struct l2cap_ctrl local_control;
2971 memset(&local_control, 0, sizeof(local_control));
2972 local_control.sframe = 1;
2973 local_control.super = L2CAP_SUPER_RR;
2974 local_control.poll = 1;
2975 local_control.reqseq = chan->buffer_seq;
2976 l2cap_send_sframe(chan, &local_control);
2977
2978 chan->retry_count = 1;
2979 __set_monitor_timer(chan);
2980 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2981 }
2982 break;
2983 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2984 l2cap_process_reqseq(chan, control->reqseq);
2985
2986 /* Fall through */
2987
2988 case L2CAP_EV_RECV_FBIT:
2989 if (control && control->final) {
2990 __clear_monitor_timer(chan);
2991 if (chan->unacked_frames > 0)
2992 __set_retrans_timer(chan);
2993 chan->retry_count = 0;
2994 chan->tx_state = L2CAP_TX_STATE_XMIT;
2995 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2996 }
2997 break;
2998 case L2CAP_EV_EXPLICIT_POLL:
2999 /* Ignore */
3000 break;
3001 case L2CAP_EV_MONITOR_TO:
3002 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3003 l2cap_send_rr_or_rnr(chan, 1);
3004 __set_monitor_timer(chan);
3005 chan->retry_count++;
3006 } else {
3007 l2cap_send_disconn_req(chan, ECONNABORTED);
3008 }
3009 break;
3010 default:
3011 break;
3012 }
3013 }
3014
3015 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3016 struct sk_buff_head *skbs, u8 event)
3017 {
3018 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3019 chan, control, skbs, event, chan->tx_state);
3020
3021 switch (chan->tx_state) {
3022 case L2CAP_TX_STATE_XMIT:
3023 l2cap_tx_state_xmit(chan, control, skbs, event);
3024 break;
3025 case L2CAP_TX_STATE_WAIT_F:
3026 l2cap_tx_state_wait_f(chan, control, skbs, event);
3027 break;
3028 default:
3029 /* Ignore event */
3030 break;
3031 }
3032 }
3033
3034 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3035 struct l2cap_ctrl *control)
3036 {
3037 BT_DBG("chan %p, control %p", chan, control);
3038 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3039 }
3040
3041 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3042 struct l2cap_ctrl *control)
3043 {
3044 BT_DBG("chan %p, control %p", chan, control);
3045 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3046 }
3047
3048 /* Copy frame to all raw sockets on that connection */
3049 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3050 {
3051 struct sk_buff *nskb;
3052 struct l2cap_chan *chan;
3053
3054 BT_DBG("conn %p", conn);
3055
3056 mutex_lock(&conn->chan_lock);
3057
3058 list_for_each_entry(chan, &conn->chan_l, list) {
3059 if (chan->chan_type != L2CAP_CHAN_RAW)
3060 continue;
3061
3062 /* Don't send frame to the channel it came from */
3063 if (bt_cb(skb)->chan == chan)
3064 continue;
3065
3066 nskb = skb_clone(skb, GFP_KERNEL);
3067 if (!nskb)
3068 continue;
3069 if (chan->ops->recv(chan, nskb))
3070 kfree_skb(nskb);
3071 }
3072
3073 mutex_unlock(&conn->chan_lock);
3074 }
3075
3076 /* ---- L2CAP signalling commands ---- */
3077 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3078 u8 ident, u16 dlen, void *data)
3079 {
3080 struct sk_buff *skb, **frag;
3081 struct l2cap_cmd_hdr *cmd;
3082 struct l2cap_hdr *lh;
3083 int len, count;
3084
3085 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3086 conn, code, ident, dlen);
3087
3088 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3089 return NULL;
3090
3091 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3092 count = min_t(unsigned int, conn->mtu, len);
3093
3094 skb = bt_skb_alloc(count, GFP_KERNEL);
3095 if (!skb)
3096 return NULL;
3097
3098 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3099 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3100
3101 if (conn->hcon->type == LE_LINK)
3102 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3103 else
3104 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3105
3106 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3107 cmd->code = code;
3108 cmd->ident = ident;
3109 cmd->len = cpu_to_le16(dlen);
3110
3111 if (dlen) {
3112 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3113 memcpy(skb_put(skb, count), data, count);
3114 data += count;
3115 }
3116
3117 len -= skb->len;
3118
3119 /* Continuation fragments (no L2CAP header) */
3120 frag = &skb_shinfo(skb)->frag_list;
3121 while (len) {
3122 count = min_t(unsigned int, conn->mtu, len);
3123
3124 *frag = bt_skb_alloc(count, GFP_KERNEL);
3125 if (!*frag)
3126 goto fail;
3127
3128 memcpy(skb_put(*frag, count), data, count);
3129
3130 len -= count;
3131 data += count;
3132
3133 frag = &(*frag)->next;
3134 }
3135
3136 return skb;
3137
3138 fail:
3139 kfree_skb(skb);
3140 return NULL;
3141 }
3142
3143 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3144 unsigned long *val)
3145 {
3146 struct l2cap_conf_opt *opt = *ptr;
3147 int len;
3148
3149 len = L2CAP_CONF_OPT_SIZE + opt->len;
3150 *ptr += len;
3151
3152 *type = opt->type;
3153 *olen = opt->len;
3154
3155 switch (opt->len) {
3156 case 1:
3157 *val = *((u8 *) opt->val);
3158 break;
3159
3160 case 2:
3161 *val = get_unaligned_le16(opt->val);
3162 break;
3163
3164 case 4:
3165 *val = get_unaligned_le32(opt->val);
3166 break;
3167
3168 default:
3169 *val = (unsigned long) opt->val;
3170 break;
3171 }
3172
3173 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3174 return len;
3175 }
3176
3177 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3178 {
3179 struct l2cap_conf_opt *opt = *ptr;
3180
3181 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3182
3183 opt->type = type;
3184 opt->len = len;
3185
3186 switch (len) {
3187 case 1:
3188 *((u8 *) opt->val) = val;
3189 break;
3190
3191 case 2:
3192 put_unaligned_le16(val, opt->val);
3193 break;
3194
3195 case 4:
3196 put_unaligned_le32(val, opt->val);
3197 break;
3198
3199 default:
3200 memcpy(opt->val, (void *) val, len);
3201 break;
3202 }
3203
3204 *ptr += L2CAP_CONF_OPT_SIZE + len;
3205 }
3206
3207 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3208 {
3209 struct l2cap_conf_efs efs;
3210
3211 switch (chan->mode) {
3212 case L2CAP_MODE_ERTM:
3213 efs.id = chan->local_id;
3214 efs.stype = chan->local_stype;
3215 efs.msdu = cpu_to_le16(chan->local_msdu);
3216 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3217 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3218 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3219 break;
3220
3221 case L2CAP_MODE_STREAMING:
3222 efs.id = 1;
3223 efs.stype = L2CAP_SERV_BESTEFFORT;
3224 efs.msdu = cpu_to_le16(chan->local_msdu);
3225 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3226 efs.acc_lat = 0;
3227 efs.flush_to = 0;
3228 break;
3229
3230 default:
3231 return;
3232 }
3233
3234 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3235 (unsigned long) &efs);
3236 }
3237
3238 static void l2cap_ack_timeout(struct work_struct *work)
3239 {
3240 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3241 ack_timer.work);
3242 u16 frames_to_ack;
3243
3244 BT_DBG("chan %p", chan);
3245
3246 l2cap_chan_lock(chan);
3247
3248 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3249 chan->last_acked_seq);
3250
3251 if (frames_to_ack)
3252 l2cap_send_rr_or_rnr(chan, 0);
3253
3254 l2cap_chan_unlock(chan);
3255 l2cap_chan_put(chan);
3256 }
3257
3258 int l2cap_ertm_init(struct l2cap_chan *chan)
3259 {
3260 int err;
3261
3262 chan->next_tx_seq = 0;
3263 chan->expected_tx_seq = 0;
3264 chan->expected_ack_seq = 0;
3265 chan->unacked_frames = 0;
3266 chan->buffer_seq = 0;
3267 chan->frames_sent = 0;
3268 chan->last_acked_seq = 0;
3269 chan->sdu = NULL;
3270 chan->sdu_last_frag = NULL;
3271 chan->sdu_len = 0;
3272
3273 skb_queue_head_init(&chan->tx_q);
3274
3275 chan->local_amp_id = AMP_ID_BREDR;
3276 chan->move_id = AMP_ID_BREDR;
3277 chan->move_state = L2CAP_MOVE_STABLE;
3278 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3279
3280 if (chan->mode != L2CAP_MODE_ERTM)
3281 return 0;
3282
3283 chan->rx_state = L2CAP_RX_STATE_RECV;
3284 chan->tx_state = L2CAP_TX_STATE_XMIT;
3285
3286 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3287 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3288 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3289
3290 skb_queue_head_init(&chan->srej_q);
3291
3292 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3293 if (err < 0)
3294 return err;
3295
3296 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3297 if (err < 0)
3298 l2cap_seq_list_free(&chan->srej_list);
3299
3300 return err;
3301 }
3302
3303 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3304 {
3305 switch (mode) {
3306 case L2CAP_MODE_STREAMING:
3307 case L2CAP_MODE_ERTM:
3308 if (l2cap_mode_supported(mode, remote_feat_mask))
3309 return mode;
3310 /* fall through */
3311 default:
3312 return L2CAP_MODE_BASIC;
3313 }
3314 }
3315
3316 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3317 {
3318 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3319 }
3320
3321 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3322 {
3323 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3324 }
3325
3326 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3327 struct l2cap_conf_rfc *rfc)
3328 {
3329 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3330 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3331
3332 /* Class 1 devices have must have ERTM timeouts
3333 * exceeding the Link Supervision Timeout. The
3334 * default Link Supervision Timeout for AMP
3335 * controllers is 10 seconds.
3336 *
3337 * Class 1 devices use 0xffffffff for their
3338 * best-effort flush timeout, so the clamping logic
3339 * will result in a timeout that meets the above
3340 * requirement. ERTM timeouts are 16-bit values, so
3341 * the maximum timeout is 65.535 seconds.
3342 */
3343
3344 /* Convert timeout to milliseconds and round */
3345 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3346
3347 /* This is the recommended formula for class 2 devices
3348 * that start ERTM timers when packets are sent to the
3349 * controller.
3350 */
3351 ertm_to = 3 * ertm_to + 500;
3352
3353 if (ertm_to > 0xffff)
3354 ertm_to = 0xffff;
3355
3356 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3357 rfc->monitor_timeout = rfc->retrans_timeout;
3358 } else {
3359 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3360 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3361 }
3362 }
3363
3364 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3365 {
3366 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3367 __l2cap_ews_supported(chan->conn)) {
3368 /* use extended control field */
3369 set_bit(FLAG_EXT_CTRL, &chan->flags);
3370 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3371 } else {
3372 chan->tx_win = min_t(u16, chan->tx_win,
3373 L2CAP_DEFAULT_TX_WINDOW);
3374 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3375 }
3376 chan->ack_win = chan->tx_win;
3377 }
3378
3379 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3380 {
3381 struct l2cap_conf_req *req = data;
3382 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3383 void *ptr = req->data;
3384 u16 size;
3385
3386 BT_DBG("chan %p", chan);
3387
3388 if (chan->num_conf_req || chan->num_conf_rsp)
3389 goto done;
3390
3391 switch (chan->mode) {
3392 case L2CAP_MODE_STREAMING:
3393 case L2CAP_MODE_ERTM:
3394 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3395 break;
3396
3397 if (__l2cap_efs_supported(chan->conn))
3398 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3399
3400 /* fall through */
3401 default:
3402 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3403 break;
3404 }
3405
3406 done:
3407 if (chan->imtu != L2CAP_DEFAULT_MTU)
3408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3409
3410 switch (chan->mode) {
3411 case L2CAP_MODE_BASIC:
3412 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3413 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3414 break;
3415
3416 rfc.mode = L2CAP_MODE_BASIC;
3417 rfc.txwin_size = 0;
3418 rfc.max_transmit = 0;
3419 rfc.retrans_timeout = 0;
3420 rfc.monitor_timeout = 0;
3421 rfc.max_pdu_size = 0;
3422
3423 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3424 (unsigned long) &rfc);
3425 break;
3426
3427 case L2CAP_MODE_ERTM:
3428 rfc.mode = L2CAP_MODE_ERTM;
3429 rfc.max_transmit = chan->max_tx;
3430
3431 __l2cap_set_ertm_timeouts(chan, &rfc);
3432
3433 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3434 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3435 L2CAP_FCS_SIZE);
3436 rfc.max_pdu_size = cpu_to_le16(size);
3437
3438 l2cap_txwin_setup(chan);
3439
3440 rfc.txwin_size = min_t(u16, chan->tx_win,
3441 L2CAP_DEFAULT_TX_WINDOW);
3442
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3444 (unsigned long) &rfc);
3445
3446 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3447 l2cap_add_opt_efs(&ptr, chan);
3448
3449 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3451 chan->tx_win);
3452
3453 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3454 if (chan->fcs == L2CAP_FCS_NONE ||
3455 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3456 chan->fcs = L2CAP_FCS_NONE;
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3458 chan->fcs);
3459 }
3460 break;
3461
3462 case L2CAP_MODE_STREAMING:
3463 l2cap_txwin_setup(chan);
3464 rfc.mode = L2CAP_MODE_STREAMING;
3465 rfc.txwin_size = 0;
3466 rfc.max_transmit = 0;
3467 rfc.retrans_timeout = 0;
3468 rfc.monitor_timeout = 0;
3469
3470 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3471 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3472 L2CAP_FCS_SIZE);
3473 rfc.max_pdu_size = cpu_to_le16(size);
3474
3475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3476 (unsigned long) &rfc);
3477
3478 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3479 l2cap_add_opt_efs(&ptr, chan);
3480
3481 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3482 if (chan->fcs == L2CAP_FCS_NONE ||
3483 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3484 chan->fcs = L2CAP_FCS_NONE;
3485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3486 chan->fcs);
3487 }
3488 break;
3489 }
3490
3491 req->dcid = cpu_to_le16(chan->dcid);
3492 req->flags = __constant_cpu_to_le16(0);
3493
3494 return ptr - data;
3495 }
3496
3497 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3498 {
3499 struct l2cap_conf_rsp *rsp = data;
3500 void *ptr = rsp->data;
3501 void *req = chan->conf_req;
3502 int len = chan->conf_len;
3503 int type, hint, olen;
3504 unsigned long val;
3505 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3506 struct l2cap_conf_efs efs;
3507 u8 remote_efs = 0;
3508 u16 mtu = L2CAP_DEFAULT_MTU;
3509 u16 result = L2CAP_CONF_SUCCESS;
3510 u16 size;
3511
3512 BT_DBG("chan %p", chan);
3513
3514 while (len >= L2CAP_CONF_OPT_SIZE) {
3515 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3516
3517 hint = type & L2CAP_CONF_HINT;
3518 type &= L2CAP_CONF_MASK;
3519
3520 switch (type) {
3521 case L2CAP_CONF_MTU:
3522 mtu = val;
3523 break;
3524
3525 case L2CAP_CONF_FLUSH_TO:
3526 chan->flush_to = val;
3527 break;
3528
3529 case L2CAP_CONF_QOS:
3530 break;
3531
3532 case L2CAP_CONF_RFC:
3533 if (olen == sizeof(rfc))
3534 memcpy(&rfc, (void *) val, olen);
3535 break;
3536
3537 case L2CAP_CONF_FCS:
3538 if (val == L2CAP_FCS_NONE)
3539 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3540 break;
3541
3542 case L2CAP_CONF_EFS:
3543 remote_efs = 1;
3544 if (olen == sizeof(efs))
3545 memcpy(&efs, (void *) val, olen);
3546 break;
3547
3548 case L2CAP_CONF_EWS:
3549 if (!chan->conn->hs_enabled)
3550 return -ECONNREFUSED;
3551
3552 set_bit(FLAG_EXT_CTRL, &chan->flags);
3553 set_bit(CONF_EWS_RECV, &chan->conf_state);
3554 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3555 chan->remote_tx_win = val;
3556 break;
3557
3558 default:
3559 if (hint)
3560 break;
3561
3562 result = L2CAP_CONF_UNKNOWN;
3563 *((u8 *) ptr++) = type;
3564 break;
3565 }
3566 }
3567
3568 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3569 goto done;
3570
3571 switch (chan->mode) {
3572 case L2CAP_MODE_STREAMING:
3573 case L2CAP_MODE_ERTM:
3574 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3575 chan->mode = l2cap_select_mode(rfc.mode,
3576 chan->conn->feat_mask);
3577 break;
3578 }
3579
3580 if (remote_efs) {
3581 if (__l2cap_efs_supported(chan->conn))
3582 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3583 else
3584 return -ECONNREFUSED;
3585 }
3586
3587 if (chan->mode != rfc.mode)
3588 return -ECONNREFUSED;
3589
3590 break;
3591 }
3592
3593 done:
3594 if (chan->mode != rfc.mode) {
3595 result = L2CAP_CONF_UNACCEPT;
3596 rfc.mode = chan->mode;
3597
3598 if (chan->num_conf_rsp == 1)
3599 return -ECONNREFUSED;
3600
3601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3602 (unsigned long) &rfc);
3603 }
3604
3605 if (result == L2CAP_CONF_SUCCESS) {
3606 /* Configure output options and let the other side know
3607 * which ones we don't like. */
3608
3609 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3610 result = L2CAP_CONF_UNACCEPT;
3611 else {
3612 chan->omtu = mtu;
3613 set_bit(CONF_MTU_DONE, &chan->conf_state);
3614 }
3615 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3616
3617 if (remote_efs) {
3618 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3619 efs.stype != L2CAP_SERV_NOTRAFIC &&
3620 efs.stype != chan->local_stype) {
3621
3622 result = L2CAP_CONF_UNACCEPT;
3623
3624 if (chan->num_conf_req >= 1)
3625 return -ECONNREFUSED;
3626
3627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3628 sizeof(efs),
3629 (unsigned long) &efs);
3630 } else {
3631 /* Send PENDING Conf Rsp */
3632 result = L2CAP_CONF_PENDING;
3633 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3634 }
3635 }
3636
3637 switch (rfc.mode) {
3638 case L2CAP_MODE_BASIC:
3639 chan->fcs = L2CAP_FCS_NONE;
3640 set_bit(CONF_MODE_DONE, &chan->conf_state);
3641 break;
3642
3643 case L2CAP_MODE_ERTM:
3644 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3645 chan->remote_tx_win = rfc.txwin_size;
3646 else
3647 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3648
3649 chan->remote_max_tx = rfc.max_transmit;
3650
3651 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3652 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3653 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3654 rfc.max_pdu_size = cpu_to_le16(size);
3655 chan->remote_mps = size;
3656
3657 __l2cap_set_ertm_timeouts(chan, &rfc);
3658
3659 set_bit(CONF_MODE_DONE, &chan->conf_state);
3660
3661 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3662 sizeof(rfc), (unsigned long) &rfc);
3663
3664 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3665 chan->remote_id = efs.id;
3666 chan->remote_stype = efs.stype;
3667 chan->remote_msdu = le16_to_cpu(efs.msdu);
3668 chan->remote_flush_to =
3669 le32_to_cpu(efs.flush_to);
3670 chan->remote_acc_lat =
3671 le32_to_cpu(efs.acc_lat);
3672 chan->remote_sdu_itime =
3673 le32_to_cpu(efs.sdu_itime);
3674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3675 sizeof(efs),
3676 (unsigned long) &efs);
3677 }
3678 break;
3679
3680 case L2CAP_MODE_STREAMING:
3681 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3682 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3683 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3684 rfc.max_pdu_size = cpu_to_le16(size);
3685 chan->remote_mps = size;
3686
3687 set_bit(CONF_MODE_DONE, &chan->conf_state);
3688
3689 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3690 (unsigned long) &rfc);
3691
3692 break;
3693
3694 default:
3695 result = L2CAP_CONF_UNACCEPT;
3696
3697 memset(&rfc, 0, sizeof(rfc));
3698 rfc.mode = chan->mode;
3699 }
3700
3701 if (result == L2CAP_CONF_SUCCESS)
3702 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3703 }
3704 rsp->scid = cpu_to_le16(chan->dcid);
3705 rsp->result = cpu_to_le16(result);
3706 rsp->flags = __constant_cpu_to_le16(0);
3707
3708 return ptr - data;
3709 }
3710
3711 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3712 void *data, u16 *result)
3713 {
3714 struct l2cap_conf_req *req = data;
3715 void *ptr = req->data;
3716 int type, olen;
3717 unsigned long val;
3718 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3719 struct l2cap_conf_efs efs;
3720
3721 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3722
3723 while (len >= L2CAP_CONF_OPT_SIZE) {
3724 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3725
3726 switch (type) {
3727 case L2CAP_CONF_MTU:
3728 if (val < L2CAP_DEFAULT_MIN_MTU) {
3729 *result = L2CAP_CONF_UNACCEPT;
3730 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3731 } else
3732 chan->imtu = val;
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3734 break;
3735
3736 case L2CAP_CONF_FLUSH_TO:
3737 chan->flush_to = val;
3738 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3739 2, chan->flush_to);
3740 break;
3741
3742 case L2CAP_CONF_RFC:
3743 if (olen == sizeof(rfc))
3744 memcpy(&rfc, (void *)val, olen);
3745
3746 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3747 rfc.mode != chan->mode)
3748 return -ECONNREFUSED;
3749
3750 chan->fcs = 0;
3751
3752 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3753 sizeof(rfc), (unsigned long) &rfc);
3754 break;
3755
3756 case L2CAP_CONF_EWS:
3757 chan->ack_win = min_t(u16, val, chan->ack_win);
3758 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3759 chan->tx_win);
3760 break;
3761
3762 case L2CAP_CONF_EFS:
3763 if (olen == sizeof(efs))
3764 memcpy(&efs, (void *)val, olen);
3765
3766 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3767 efs.stype != L2CAP_SERV_NOTRAFIC &&
3768 efs.stype != chan->local_stype)
3769 return -ECONNREFUSED;
3770
3771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3772 (unsigned long) &efs);
3773 break;
3774
3775 case L2CAP_CONF_FCS:
3776 if (*result == L2CAP_CONF_PENDING)
3777 if (val == L2CAP_FCS_NONE)
3778 set_bit(CONF_RECV_NO_FCS,
3779 &chan->conf_state);
3780 break;
3781 }
3782 }
3783
3784 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3785 return -ECONNREFUSED;
3786
3787 chan->mode = rfc.mode;
3788
3789 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3790 switch (rfc.mode) {
3791 case L2CAP_MODE_ERTM:
3792 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3793 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3794 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3795 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3796 chan->ack_win = min_t(u16, chan->ack_win,
3797 rfc.txwin_size);
3798
3799 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3800 chan->local_msdu = le16_to_cpu(efs.msdu);
3801 chan->local_sdu_itime =
3802 le32_to_cpu(efs.sdu_itime);
3803 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3804 chan->local_flush_to =
3805 le32_to_cpu(efs.flush_to);
3806 }
3807 break;
3808
3809 case L2CAP_MODE_STREAMING:
3810 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3811 }
3812 }
3813
3814 req->dcid = cpu_to_le16(chan->dcid);
3815 req->flags = __constant_cpu_to_le16(0);
3816
3817 return ptr - data;
3818 }
3819
3820 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3821 u16 result, u16 flags)
3822 {
3823 struct l2cap_conf_rsp *rsp = data;
3824 void *ptr = rsp->data;
3825
3826 BT_DBG("chan %p", chan);
3827
3828 rsp->scid = cpu_to_le16(chan->dcid);
3829 rsp->result = cpu_to_le16(result);
3830 rsp->flags = cpu_to_le16(flags);
3831
3832 return ptr - data;
3833 }
3834
3835 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3836 {
3837 struct l2cap_le_conn_rsp rsp;
3838 struct l2cap_conn *conn = chan->conn;
3839
3840 BT_DBG("chan %p", chan);
3841
3842 rsp.dcid = cpu_to_le16(chan->scid);
3843 rsp.mtu = cpu_to_le16(chan->imtu);
3844 rsp.mps = cpu_to_le16(chan->mps);
3845 rsp.credits = cpu_to_le16(chan->rx_credits);
3846 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3847
3848 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3849 &rsp);
3850 }
3851
3852 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3853 {
3854 struct l2cap_conn_rsp rsp;
3855 struct l2cap_conn *conn = chan->conn;
3856 u8 buf[128];
3857 u8 rsp_code;
3858
3859 rsp.scid = cpu_to_le16(chan->dcid);
3860 rsp.dcid = cpu_to_le16(chan->scid);
3861 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3862 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3863
3864 if (chan->hs_hcon)
3865 rsp_code = L2CAP_CREATE_CHAN_RSP;
3866 else
3867 rsp_code = L2CAP_CONN_RSP;
3868
3869 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3870
3871 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3872
3873 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3874 return;
3875
3876 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3877 l2cap_build_conf_req(chan, buf), buf);
3878 chan->num_conf_req++;
3879 }
3880
3881 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3882 {
3883 int type, olen;
3884 unsigned long val;
3885 /* Use sane default values in case a misbehaving remote device
3886 * did not send an RFC or extended window size option.
3887 */
3888 u16 txwin_ext = chan->ack_win;
3889 struct l2cap_conf_rfc rfc = {
3890 .mode = chan->mode,
3891 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3892 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3893 .max_pdu_size = cpu_to_le16(chan->imtu),
3894 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3895 };
3896
3897 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3898
3899 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3900 return;
3901
3902 while (len >= L2CAP_CONF_OPT_SIZE) {
3903 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3904
3905 switch (type) {
3906 case L2CAP_CONF_RFC:
3907 if (olen == sizeof(rfc))
3908 memcpy(&rfc, (void *)val, olen);
3909 break;
3910 case L2CAP_CONF_EWS:
3911 txwin_ext = val;
3912 break;
3913 }
3914 }
3915
3916 switch (rfc.mode) {
3917 case L2CAP_MODE_ERTM:
3918 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3919 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3920 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3921 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3922 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3923 else
3924 chan->ack_win = min_t(u16, chan->ack_win,
3925 rfc.txwin_size);
3926 break;
3927 case L2CAP_MODE_STREAMING:
3928 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3929 }
3930 }
3931
3932 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3933 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3934 u8 *data)
3935 {
3936 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3937
3938 if (cmd_len < sizeof(*rej))
3939 return -EPROTO;
3940
3941 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3942 return 0;
3943
3944 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3945 cmd->ident == conn->info_ident) {
3946 cancel_delayed_work(&conn->info_timer);
3947
3948 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3949 conn->info_ident = 0;
3950
3951 l2cap_conn_start(conn);
3952 }
3953
3954 return 0;
3955 }
3956
3957 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3958 struct l2cap_cmd_hdr *cmd,
3959 u8 *data, u8 rsp_code, u8 amp_id)
3960 {
3961 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3962 struct l2cap_conn_rsp rsp;
3963 struct l2cap_chan *chan = NULL, *pchan;
3964 int result, status = L2CAP_CS_NO_INFO;
3965
3966 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3967 __le16 psm = req->psm;
3968
3969 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3970
3971 /* Check if we have socket listening on psm */
3972 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3973 &conn->hcon->dst, ACL_LINK);
3974 if (!pchan) {
3975 result = L2CAP_CR_BAD_PSM;
3976 goto sendresp;
3977 }
3978
3979 mutex_lock(&conn->chan_lock);
3980 l2cap_chan_lock(pchan);
3981
3982 /* Check if the ACL is secure enough (if not SDP) */
3983 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3984 !hci_conn_check_link_mode(conn->hcon)) {
3985 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3986 result = L2CAP_CR_SEC_BLOCK;
3987 goto response;
3988 }
3989
3990 result = L2CAP_CR_NO_MEM;
3991
3992 /* Check if we already have channel with that dcid */
3993 if (__l2cap_get_chan_by_dcid(conn, scid))
3994 goto response;
3995
3996 chan = pchan->ops->new_connection(pchan);
3997 if (!chan)
3998 goto response;
3999
4000 /* For certain devices (ex: HID mouse), support for authentication,
4001 * pairing and bonding is optional. For such devices, inorder to avoid
4002 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4003 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4004 */
4005 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4006
4007 bacpy(&chan->src, &conn->hcon->src);
4008 bacpy(&chan->dst, &conn->hcon->dst);
4009 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4010 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4011 chan->psm = psm;
4012 chan->dcid = scid;
4013 chan->local_amp_id = amp_id;
4014
4015 __l2cap_chan_add(conn, chan);
4016
4017 dcid = chan->scid;
4018
4019 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4020
4021 chan->ident = cmd->ident;
4022
4023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4024 if (l2cap_chan_check_security(chan)) {
4025 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4026 l2cap_state_change(chan, BT_CONNECT2);
4027 result = L2CAP_CR_PEND;
4028 status = L2CAP_CS_AUTHOR_PEND;
4029 chan->ops->defer(chan);
4030 } else {
4031 /* Force pending result for AMP controllers.
4032 * The connection will succeed after the
4033 * physical link is up.
4034 */
4035 if (amp_id == AMP_ID_BREDR) {
4036 l2cap_state_change(chan, BT_CONFIG);
4037 result = L2CAP_CR_SUCCESS;
4038 } else {
4039 l2cap_state_change(chan, BT_CONNECT2);
4040 result = L2CAP_CR_PEND;
4041 }
4042 status = L2CAP_CS_NO_INFO;
4043 }
4044 } else {
4045 l2cap_state_change(chan, BT_CONNECT2);
4046 result = L2CAP_CR_PEND;
4047 status = L2CAP_CS_AUTHEN_PEND;
4048 }
4049 } else {
4050 l2cap_state_change(chan, BT_CONNECT2);
4051 result = L2CAP_CR_PEND;
4052 status = L2CAP_CS_NO_INFO;
4053 }
4054
4055 response:
4056 l2cap_chan_unlock(pchan);
4057 mutex_unlock(&conn->chan_lock);
4058
4059 sendresp:
4060 rsp.scid = cpu_to_le16(scid);
4061 rsp.dcid = cpu_to_le16(dcid);
4062 rsp.result = cpu_to_le16(result);
4063 rsp.status = cpu_to_le16(status);
4064 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4065
4066 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4067 struct l2cap_info_req info;
4068 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4069
4070 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4071 conn->info_ident = l2cap_get_ident(conn);
4072
4073 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4074
4075 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4076 sizeof(info), &info);
4077 }
4078
4079 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4080 result == L2CAP_CR_SUCCESS) {
4081 u8 buf[128];
4082 set_bit(CONF_REQ_SENT, &chan->conf_state);
4083 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4084 l2cap_build_conf_req(chan, buf), buf);
4085 chan->num_conf_req++;
4086 }
4087
4088 return chan;
4089 }
4090
4091 static int l2cap_connect_req(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4093 {
4094 struct hci_dev *hdev = conn->hcon->hdev;
4095 struct hci_conn *hcon = conn->hcon;
4096
4097 if (cmd_len < sizeof(struct l2cap_conn_req))
4098 return -EPROTO;
4099
4100 hci_dev_lock(hdev);
4101 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4102 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4103 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4104 hcon->dst_type, 0, NULL, 0,
4105 hcon->dev_class);
4106 hci_dev_unlock(hdev);
4107
4108 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4109 return 0;
4110 }
4111
4112 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4113 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4114 u8 *data)
4115 {
4116 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4117 u16 scid, dcid, result, status;
4118 struct l2cap_chan *chan;
4119 u8 req[128];
4120 int err;
4121
4122 if (cmd_len < sizeof(*rsp))
4123 return -EPROTO;
4124
4125 scid = __le16_to_cpu(rsp->scid);
4126 dcid = __le16_to_cpu(rsp->dcid);
4127 result = __le16_to_cpu(rsp->result);
4128 status = __le16_to_cpu(rsp->status);
4129
4130 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4131 dcid, scid, result, status);
4132
4133 mutex_lock(&conn->chan_lock);
4134
4135 if (scid) {
4136 chan = __l2cap_get_chan_by_scid(conn, scid);
4137 if (!chan) {
4138 err = -EBADSLT;
4139 goto unlock;
4140 }
4141 } else {
4142 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4143 if (!chan) {
4144 err = -EBADSLT;
4145 goto unlock;
4146 }
4147 }
4148
4149 err = 0;
4150
4151 l2cap_chan_lock(chan);
4152
4153 switch (result) {
4154 case L2CAP_CR_SUCCESS:
4155 l2cap_state_change(chan, BT_CONFIG);
4156 chan->ident = 0;
4157 chan->dcid = dcid;
4158 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4159
4160 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4161 break;
4162
4163 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4164 l2cap_build_conf_req(chan, req), req);
4165 chan->num_conf_req++;
4166 break;
4167
4168 case L2CAP_CR_PEND:
4169 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4170 break;
4171
4172 default:
4173 l2cap_chan_del(chan, ECONNREFUSED);
4174 break;
4175 }
4176
4177 l2cap_chan_unlock(chan);
4178
4179 unlock:
4180 mutex_unlock(&conn->chan_lock);
4181
4182 return err;
4183 }
4184
4185 static inline void set_default_fcs(struct l2cap_chan *chan)
4186 {
4187 /* FCS is enabled only in ERTM or streaming mode, if one or both
4188 * sides request it.
4189 */
4190 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4191 chan->fcs = L2CAP_FCS_NONE;
4192 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4193 chan->fcs = L2CAP_FCS_CRC16;
4194 }
4195
4196 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4197 u8 ident, u16 flags)
4198 {
4199 struct l2cap_conn *conn = chan->conn;
4200
4201 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4202 flags);
4203
4204 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4205 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4206
4207 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4208 l2cap_build_conf_rsp(chan, data,
4209 L2CAP_CONF_SUCCESS, flags), data);
4210 }
4211
4212 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4213 u16 scid, u16 dcid)
4214 {
4215 struct l2cap_cmd_rej_cid rej;
4216
4217 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4218 rej.scid = __cpu_to_le16(scid);
4219 rej.dcid = __cpu_to_le16(dcid);
4220
4221 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4222 }
4223
4224 static inline int l2cap_config_req(struct l2cap_conn *conn,
4225 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4226 u8 *data)
4227 {
4228 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4229 u16 dcid, flags;
4230 u8 rsp[64];
4231 struct l2cap_chan *chan;
4232 int len, err = 0;
4233
4234 if (cmd_len < sizeof(*req))
4235 return -EPROTO;
4236
4237 dcid = __le16_to_cpu(req->dcid);
4238 flags = __le16_to_cpu(req->flags);
4239
4240 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4241
4242 chan = l2cap_get_chan_by_scid(conn, dcid);
4243 if (!chan) {
4244 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4245 return 0;
4246 }
4247
4248 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4249 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4250 chan->dcid);
4251 goto unlock;
4252 }
4253
4254 /* Reject if config buffer is too small. */
4255 len = cmd_len - sizeof(*req);
4256 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4257 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4258 l2cap_build_conf_rsp(chan, rsp,
4259 L2CAP_CONF_REJECT, flags), rsp);
4260 goto unlock;
4261 }
4262
4263 /* Store config. */
4264 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4265 chan->conf_len += len;
4266
4267 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4268 /* Incomplete config. Send empty response. */
4269 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4270 l2cap_build_conf_rsp(chan, rsp,
4271 L2CAP_CONF_SUCCESS, flags), rsp);
4272 goto unlock;
4273 }
4274
4275 /* Complete config. */
4276 len = l2cap_parse_conf_req(chan, rsp);
4277 if (len < 0) {
4278 l2cap_send_disconn_req(chan, ECONNRESET);
4279 goto unlock;
4280 }
4281
4282 chan->ident = cmd->ident;
4283 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4284 chan->num_conf_rsp++;
4285
4286 /* Reset config buffer. */
4287 chan->conf_len = 0;
4288
4289 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4290 goto unlock;
4291
4292 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4293 set_default_fcs(chan);
4294
4295 if (chan->mode == L2CAP_MODE_ERTM ||
4296 chan->mode == L2CAP_MODE_STREAMING)
4297 err = l2cap_ertm_init(chan);
4298
4299 if (err < 0)
4300 l2cap_send_disconn_req(chan, -err);
4301 else
4302 l2cap_chan_ready(chan);
4303
4304 goto unlock;
4305 }
4306
4307 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4308 u8 buf[64];
4309 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4310 l2cap_build_conf_req(chan, buf), buf);
4311 chan->num_conf_req++;
4312 }
4313
4314 /* Got Conf Rsp PENDING from remote side and asume we sent
4315 Conf Rsp PENDING in the code above */
4316 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4317 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4318
4319 /* check compatibility */
4320
4321 /* Send rsp for BR/EDR channel */
4322 if (!chan->hs_hcon)
4323 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4324 else
4325 chan->ident = cmd->ident;
4326 }
4327
4328 unlock:
4329 l2cap_chan_unlock(chan);
4330 return err;
4331 }
4332
4333 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4334 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4335 u8 *data)
4336 {
4337 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4338 u16 scid, flags, result;
4339 struct l2cap_chan *chan;
4340 int len = cmd_len - sizeof(*rsp);
4341 int err = 0;
4342
4343 if (cmd_len < sizeof(*rsp))
4344 return -EPROTO;
4345
4346 scid = __le16_to_cpu(rsp->scid);
4347 flags = __le16_to_cpu(rsp->flags);
4348 result = __le16_to_cpu(rsp->result);
4349
4350 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4351 result, len);
4352
4353 chan = l2cap_get_chan_by_scid(conn, scid);
4354 if (!chan)
4355 return 0;
4356
4357 switch (result) {
4358 case L2CAP_CONF_SUCCESS:
4359 l2cap_conf_rfc_get(chan, rsp->data, len);
4360 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4361 break;
4362
4363 case L2CAP_CONF_PENDING:
4364 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4365
4366 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4367 char buf[64];
4368
4369 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4370 buf, &result);
4371 if (len < 0) {
4372 l2cap_send_disconn_req(chan, ECONNRESET);
4373 goto done;
4374 }
4375
4376 if (!chan->hs_hcon) {
4377 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4378 0);
4379 } else {
4380 if (l2cap_check_efs(chan)) {
4381 amp_create_logical_link(chan);
4382 chan->ident = cmd->ident;
4383 }
4384 }
4385 }
4386 goto done;
4387
4388 case L2CAP_CONF_UNACCEPT:
4389 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4390 char req[64];
4391
4392 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4393 l2cap_send_disconn_req(chan, ECONNRESET);
4394 goto done;
4395 }
4396
4397 /* throw out any old stored conf requests */
4398 result = L2CAP_CONF_SUCCESS;
4399 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4400 req, &result);
4401 if (len < 0) {
4402 l2cap_send_disconn_req(chan, ECONNRESET);
4403 goto done;
4404 }
4405
4406 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4407 L2CAP_CONF_REQ, len, req);
4408 chan->num_conf_req++;
4409 if (result != L2CAP_CONF_SUCCESS)
4410 goto done;
4411 break;
4412 }
4413
4414 default:
4415 l2cap_chan_set_err(chan, ECONNRESET);
4416
4417 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4418 l2cap_send_disconn_req(chan, ECONNRESET);
4419 goto done;
4420 }
4421
4422 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4423 goto done;
4424
4425 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4426
4427 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4428 set_default_fcs(chan);
4429
4430 if (chan->mode == L2CAP_MODE_ERTM ||
4431 chan->mode == L2CAP_MODE_STREAMING)
4432 err = l2cap_ertm_init(chan);
4433
4434 if (err < 0)
4435 l2cap_send_disconn_req(chan, -err);
4436 else
4437 l2cap_chan_ready(chan);
4438 }
4439
4440 done:
4441 l2cap_chan_unlock(chan);
4442 return err;
4443 }
4444
4445 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4446 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4447 u8 *data)
4448 {
4449 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4450 struct l2cap_disconn_rsp rsp;
4451 u16 dcid, scid;
4452 struct l2cap_chan *chan;
4453
4454 if (cmd_len != sizeof(*req))
4455 return -EPROTO;
4456
4457 scid = __le16_to_cpu(req->scid);
4458 dcid = __le16_to_cpu(req->dcid);
4459
4460 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4461
4462 mutex_lock(&conn->chan_lock);
4463
4464 chan = __l2cap_get_chan_by_scid(conn, dcid);
4465 if (!chan) {
4466 mutex_unlock(&conn->chan_lock);
4467 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4468 return 0;
4469 }
4470
4471 l2cap_chan_lock(chan);
4472
4473 rsp.dcid = cpu_to_le16(chan->scid);
4474 rsp.scid = cpu_to_le16(chan->dcid);
4475 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4476
4477 chan->ops->set_shutdown(chan);
4478
4479 l2cap_chan_hold(chan);
4480 l2cap_chan_del(chan, ECONNRESET);
4481
4482 l2cap_chan_unlock(chan);
4483
4484 chan->ops->close(chan);
4485 l2cap_chan_put(chan);
4486
4487 mutex_unlock(&conn->chan_lock);
4488
4489 return 0;
4490 }
4491
4492 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4493 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4494 u8 *data)
4495 {
4496 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4497 u16 dcid, scid;
4498 struct l2cap_chan *chan;
4499
4500 if (cmd_len != sizeof(*rsp))
4501 return -EPROTO;
4502
4503 scid = __le16_to_cpu(rsp->scid);
4504 dcid = __le16_to_cpu(rsp->dcid);
4505
4506 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4507
4508 mutex_lock(&conn->chan_lock);
4509
4510 chan = __l2cap_get_chan_by_scid(conn, scid);
4511 if (!chan) {
4512 mutex_unlock(&conn->chan_lock);
4513 return 0;
4514 }
4515
4516 l2cap_chan_lock(chan);
4517
4518 l2cap_chan_hold(chan);
4519 l2cap_chan_del(chan, 0);
4520
4521 l2cap_chan_unlock(chan);
4522
4523 chan->ops->close(chan);
4524 l2cap_chan_put(chan);
4525
4526 mutex_unlock(&conn->chan_lock);
4527
4528 return 0;
4529 }
4530
4531 static inline int l2cap_information_req(struct l2cap_conn *conn,
4532 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4533 u8 *data)
4534 {
4535 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4536 u16 type;
4537
4538 if (cmd_len != sizeof(*req))
4539 return -EPROTO;
4540
4541 type = __le16_to_cpu(req->type);
4542
4543 BT_DBG("type 0x%4.4x", type);
4544
4545 if (type == L2CAP_IT_FEAT_MASK) {
4546 u8 buf[8];
4547 u32 feat_mask = l2cap_feat_mask;
4548 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4549 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4550 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4551 if (!disable_ertm)
4552 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4553 | L2CAP_FEAT_FCS;
4554 if (conn->hs_enabled)
4555 feat_mask |= L2CAP_FEAT_EXT_FLOW
4556 | L2CAP_FEAT_EXT_WINDOW;
4557
4558 put_unaligned_le32(feat_mask, rsp->data);
4559 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4560 buf);
4561 } else if (type == L2CAP_IT_FIXED_CHAN) {
4562 u8 buf[12];
4563 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4564
4565 if (conn->hs_enabled)
4566 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4567 else
4568 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4569
4570 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4571 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4572 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4573 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4574 buf);
4575 } else {
4576 struct l2cap_info_rsp rsp;
4577 rsp.type = cpu_to_le16(type);
4578 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4579 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4580 &rsp);
4581 }
4582
4583 return 0;
4584 }
4585
4586 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4587 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4588 u8 *data)
4589 {
4590 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4591 u16 type, result;
4592
4593 if (cmd_len < sizeof(*rsp))
4594 return -EPROTO;
4595
4596 type = __le16_to_cpu(rsp->type);
4597 result = __le16_to_cpu(rsp->result);
4598
4599 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4600
4601 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4602 if (cmd->ident != conn->info_ident ||
4603 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4604 return 0;
4605
4606 cancel_delayed_work(&conn->info_timer);
4607
4608 if (result != L2CAP_IR_SUCCESS) {
4609 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4610 conn->info_ident = 0;
4611
4612 l2cap_conn_start(conn);
4613
4614 return 0;
4615 }
4616
4617 switch (type) {
4618 case L2CAP_IT_FEAT_MASK:
4619 conn->feat_mask = get_unaligned_le32(rsp->data);
4620
4621 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4622 struct l2cap_info_req req;
4623 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4624
4625 conn->info_ident = l2cap_get_ident(conn);
4626
4627 l2cap_send_cmd(conn, conn->info_ident,
4628 L2CAP_INFO_REQ, sizeof(req), &req);
4629 } else {
4630 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4631 conn->info_ident = 0;
4632
4633 l2cap_conn_start(conn);
4634 }
4635 break;
4636
4637 case L2CAP_IT_FIXED_CHAN:
4638 conn->fixed_chan_mask = rsp->data[0];
4639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4640 conn->info_ident = 0;
4641
4642 l2cap_conn_start(conn);
4643 break;
4644 }
4645
4646 return 0;
4647 }
4648
4649 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4650 struct l2cap_cmd_hdr *cmd,
4651 u16 cmd_len, void *data)
4652 {
4653 struct l2cap_create_chan_req *req = data;
4654 struct l2cap_create_chan_rsp rsp;
4655 struct l2cap_chan *chan;
4656 struct hci_dev *hdev;
4657 u16 psm, scid;
4658
4659 if (cmd_len != sizeof(*req))
4660 return -EPROTO;
4661
4662 if (!conn->hs_enabled)
4663 return -EINVAL;
4664
4665 psm = le16_to_cpu(req->psm);
4666 scid = le16_to_cpu(req->scid);
4667
4668 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4669
4670 /* For controller id 0 make BR/EDR connection */
4671 if (req->amp_id == AMP_ID_BREDR) {
4672 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4673 req->amp_id);
4674 return 0;
4675 }
4676
4677 /* Validate AMP controller id */
4678 hdev = hci_dev_get(req->amp_id);
4679 if (!hdev)
4680 goto error;
4681
4682 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4683 hci_dev_put(hdev);
4684 goto error;
4685 }
4686
4687 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4688 req->amp_id);
4689 if (chan) {
4690 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4691 struct hci_conn *hs_hcon;
4692
4693 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4694 &conn->hcon->dst);
4695 if (!hs_hcon) {
4696 hci_dev_put(hdev);
4697 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4698 chan->dcid);
4699 return 0;
4700 }
4701
4702 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4703
4704 mgr->bredr_chan = chan;
4705 chan->hs_hcon = hs_hcon;
4706 chan->fcs = L2CAP_FCS_NONE;
4707 conn->mtu = hdev->block_mtu;
4708 }
4709
4710 hci_dev_put(hdev);
4711
4712 return 0;
4713
4714 error:
4715 rsp.dcid = 0;
4716 rsp.scid = cpu_to_le16(scid);
4717 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4718 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4719
4720 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4721 sizeof(rsp), &rsp);
4722
4723 return 0;
4724 }
4725
4726 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4727 {
4728 struct l2cap_move_chan_req req;
4729 u8 ident;
4730
4731 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4732
4733 ident = l2cap_get_ident(chan->conn);
4734 chan->ident = ident;
4735
4736 req.icid = cpu_to_le16(chan->scid);
4737 req.dest_amp_id = dest_amp_id;
4738
4739 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4740 &req);
4741
4742 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4743 }
4744
4745 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4746 {
4747 struct l2cap_move_chan_rsp rsp;
4748
4749 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4750
4751 rsp.icid = cpu_to_le16(chan->dcid);
4752 rsp.result = cpu_to_le16(result);
4753
4754 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4755 sizeof(rsp), &rsp);
4756 }
4757
4758 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4759 {
4760 struct l2cap_move_chan_cfm cfm;
4761
4762 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4763
4764 chan->ident = l2cap_get_ident(chan->conn);
4765
4766 cfm.icid = cpu_to_le16(chan->scid);
4767 cfm.result = cpu_to_le16(result);
4768
4769 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4770 sizeof(cfm), &cfm);
4771
4772 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4773 }
4774
4775 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4776 {
4777 struct l2cap_move_chan_cfm cfm;
4778
4779 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4780
4781 cfm.icid = cpu_to_le16(icid);
4782 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4783
4784 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4785 sizeof(cfm), &cfm);
4786 }
4787
4788 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4789 u16 icid)
4790 {
4791 struct l2cap_move_chan_cfm_rsp rsp;
4792
4793 BT_DBG("icid 0x%4.4x", icid);
4794
4795 rsp.icid = cpu_to_le16(icid);
4796 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4797 }
4798
4799 static void __release_logical_link(struct l2cap_chan *chan)
4800 {
4801 chan->hs_hchan = NULL;
4802 chan->hs_hcon = NULL;
4803
4804 /* Placeholder - release the logical link */
4805 }
4806
4807 static void l2cap_logical_fail(struct l2cap_chan *chan)
4808 {
4809 /* Logical link setup failed */
4810 if (chan->state != BT_CONNECTED) {
4811 /* Create channel failure, disconnect */
4812 l2cap_send_disconn_req(chan, ECONNRESET);
4813 return;
4814 }
4815
4816 switch (chan->move_role) {
4817 case L2CAP_MOVE_ROLE_RESPONDER:
4818 l2cap_move_done(chan);
4819 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4820 break;
4821 case L2CAP_MOVE_ROLE_INITIATOR:
4822 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4823 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4824 /* Remote has only sent pending or
4825 * success responses, clean up
4826 */
4827 l2cap_move_done(chan);
4828 }
4829
4830 /* Other amp move states imply that the move
4831 * has already aborted
4832 */
4833 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4834 break;
4835 }
4836 }
4837
4838 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4839 struct hci_chan *hchan)
4840 {
4841 struct l2cap_conf_rsp rsp;
4842
4843 chan->hs_hchan = hchan;
4844 chan->hs_hcon->l2cap_data = chan->conn;
4845
4846 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4847
4848 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4849 int err;
4850
4851 set_default_fcs(chan);
4852
4853 err = l2cap_ertm_init(chan);
4854 if (err < 0)
4855 l2cap_send_disconn_req(chan, -err);
4856 else
4857 l2cap_chan_ready(chan);
4858 }
4859 }
4860
4861 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4862 struct hci_chan *hchan)
4863 {
4864 chan->hs_hcon = hchan->conn;
4865 chan->hs_hcon->l2cap_data = chan->conn;
4866
4867 BT_DBG("move_state %d", chan->move_state);
4868
4869 switch (chan->move_state) {
4870 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4871 /* Move confirm will be sent after a success
4872 * response is received
4873 */
4874 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4875 break;
4876 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4877 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4878 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4879 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4880 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4881 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4882 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4883 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4884 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4885 }
4886 break;
4887 default:
4888 /* Move was not in expected state, free the channel */
4889 __release_logical_link(chan);
4890
4891 chan->move_state = L2CAP_MOVE_STABLE;
4892 }
4893 }
4894
4895 /* Call with chan locked */
4896 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4897 u8 status)
4898 {
4899 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4900
4901 if (status) {
4902 l2cap_logical_fail(chan);
4903 __release_logical_link(chan);
4904 return;
4905 }
4906
4907 if (chan->state != BT_CONNECTED) {
4908 /* Ignore logical link if channel is on BR/EDR */
4909 if (chan->local_amp_id != AMP_ID_BREDR)
4910 l2cap_logical_finish_create(chan, hchan);
4911 } else {
4912 l2cap_logical_finish_move(chan, hchan);
4913 }
4914 }
4915
4916 void l2cap_move_start(struct l2cap_chan *chan)
4917 {
4918 BT_DBG("chan %p", chan);
4919
4920 if (chan->local_amp_id == AMP_ID_BREDR) {
4921 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4922 return;
4923 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4924 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4925 /* Placeholder - start physical link setup */
4926 } else {
4927 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4928 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4929 chan->move_id = 0;
4930 l2cap_move_setup(chan);
4931 l2cap_send_move_chan_req(chan, 0);
4932 }
4933 }
4934
4935 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4936 u8 local_amp_id, u8 remote_amp_id)
4937 {
4938 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4939 local_amp_id, remote_amp_id);
4940
4941 chan->fcs = L2CAP_FCS_NONE;
4942
4943 /* Outgoing channel on AMP */
4944 if (chan->state == BT_CONNECT) {
4945 if (result == L2CAP_CR_SUCCESS) {
4946 chan->local_amp_id = local_amp_id;
4947 l2cap_send_create_chan_req(chan, remote_amp_id);
4948 } else {
4949 /* Revert to BR/EDR connect */
4950 l2cap_send_conn_req(chan);
4951 }
4952
4953 return;
4954 }
4955
4956 /* Incoming channel on AMP */
4957 if (__l2cap_no_conn_pending(chan)) {
4958 struct l2cap_conn_rsp rsp;
4959 char buf[128];
4960 rsp.scid = cpu_to_le16(chan->dcid);
4961 rsp.dcid = cpu_to_le16(chan->scid);
4962
4963 if (result == L2CAP_CR_SUCCESS) {
4964 /* Send successful response */
4965 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4966 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4967 } else {
4968 /* Send negative response */
4969 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4970 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4971 }
4972
4973 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4974 sizeof(rsp), &rsp);
4975
4976 if (result == L2CAP_CR_SUCCESS) {
4977 l2cap_state_change(chan, BT_CONFIG);
4978 set_bit(CONF_REQ_SENT, &chan->conf_state);
4979 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4980 L2CAP_CONF_REQ,
4981 l2cap_build_conf_req(chan, buf), buf);
4982 chan->num_conf_req++;
4983 }
4984 }
4985 }
4986
4987 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4988 u8 remote_amp_id)
4989 {
4990 l2cap_move_setup(chan);
4991 chan->move_id = local_amp_id;
4992 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4993
4994 l2cap_send_move_chan_req(chan, remote_amp_id);
4995 }
4996
4997 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4998 {
4999 struct hci_chan *hchan = NULL;
5000
5001 /* Placeholder - get hci_chan for logical link */
5002
5003 if (hchan) {
5004 if (hchan->state == BT_CONNECTED) {
5005 /* Logical link is ready to go */
5006 chan->hs_hcon = hchan->conn;
5007 chan->hs_hcon->l2cap_data = chan->conn;
5008 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5009 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5010
5011 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5012 } else {
5013 /* Wait for logical link to be ready */
5014 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5015 }
5016 } else {
5017 /* Logical link not available */
5018 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5019 }
5020 }
5021
5022 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5023 {
5024 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5025 u8 rsp_result;
5026 if (result == -EINVAL)
5027 rsp_result = L2CAP_MR_BAD_ID;
5028 else
5029 rsp_result = L2CAP_MR_NOT_ALLOWED;
5030
5031 l2cap_send_move_chan_rsp(chan, rsp_result);
5032 }
5033
5034 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5035 chan->move_state = L2CAP_MOVE_STABLE;
5036
5037 /* Restart data transmission */
5038 l2cap_ertm_send(chan);
5039 }
5040
5041 /* Invoke with locked chan */
5042 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5043 {
5044 u8 local_amp_id = chan->local_amp_id;
5045 u8 remote_amp_id = chan->remote_amp_id;
5046
5047 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5048 chan, result, local_amp_id, remote_amp_id);
5049
5050 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5051 l2cap_chan_unlock(chan);
5052 return;
5053 }
5054
5055 if (chan->state != BT_CONNECTED) {
5056 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5057 } else if (result != L2CAP_MR_SUCCESS) {
5058 l2cap_do_move_cancel(chan, result);
5059 } else {
5060 switch (chan->move_role) {
5061 case L2CAP_MOVE_ROLE_INITIATOR:
5062 l2cap_do_move_initiate(chan, local_amp_id,
5063 remote_amp_id);
5064 break;
5065 case L2CAP_MOVE_ROLE_RESPONDER:
5066 l2cap_do_move_respond(chan, result);
5067 break;
5068 default:
5069 l2cap_do_move_cancel(chan, result);
5070 break;
5071 }
5072 }
5073 }
5074
5075 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5076 struct l2cap_cmd_hdr *cmd,
5077 u16 cmd_len, void *data)
5078 {
5079 struct l2cap_move_chan_req *req = data;
5080 struct l2cap_move_chan_rsp rsp;
5081 struct l2cap_chan *chan;
5082 u16 icid = 0;
5083 u16 result = L2CAP_MR_NOT_ALLOWED;
5084
5085 if (cmd_len != sizeof(*req))
5086 return -EPROTO;
5087
5088 icid = le16_to_cpu(req->icid);
5089
5090 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5091
5092 if (!conn->hs_enabled)
5093 return -EINVAL;
5094
5095 chan = l2cap_get_chan_by_dcid(conn, icid);
5096 if (!chan) {
5097 rsp.icid = cpu_to_le16(icid);
5098 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5099 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5100 sizeof(rsp), &rsp);
5101 return 0;
5102 }
5103
5104 chan->ident = cmd->ident;
5105
5106 if (chan->scid < L2CAP_CID_DYN_START ||
5107 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5108 (chan->mode != L2CAP_MODE_ERTM &&
5109 chan->mode != L2CAP_MODE_STREAMING)) {
5110 result = L2CAP_MR_NOT_ALLOWED;
5111 goto send_move_response;
5112 }
5113
5114 if (chan->local_amp_id == req->dest_amp_id) {
5115 result = L2CAP_MR_SAME_ID;
5116 goto send_move_response;
5117 }
5118
5119 if (req->dest_amp_id != AMP_ID_BREDR) {
5120 struct hci_dev *hdev;
5121 hdev = hci_dev_get(req->dest_amp_id);
5122 if (!hdev || hdev->dev_type != HCI_AMP ||
5123 !test_bit(HCI_UP, &hdev->flags)) {
5124 if (hdev)
5125 hci_dev_put(hdev);
5126
5127 result = L2CAP_MR_BAD_ID;
5128 goto send_move_response;
5129 }
5130 hci_dev_put(hdev);
5131 }
5132
5133 /* Detect a move collision. Only send a collision response
5134 * if this side has "lost", otherwise proceed with the move.
5135 * The winner has the larger bd_addr.
5136 */
5137 if ((__chan_is_moving(chan) ||
5138 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5139 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5140 result = L2CAP_MR_COLLISION;
5141 goto send_move_response;
5142 }
5143
5144 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5145 l2cap_move_setup(chan);
5146 chan->move_id = req->dest_amp_id;
5147 icid = chan->dcid;
5148
5149 if (req->dest_amp_id == AMP_ID_BREDR) {
5150 /* Moving to BR/EDR */
5151 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5152 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5153 result = L2CAP_MR_PEND;
5154 } else {
5155 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5156 result = L2CAP_MR_SUCCESS;
5157 }
5158 } else {
5159 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5160 /* Placeholder - uncomment when amp functions are available */
5161 /*amp_accept_physical(chan, req->dest_amp_id);*/
5162 result = L2CAP_MR_PEND;
5163 }
5164
5165 send_move_response:
5166 l2cap_send_move_chan_rsp(chan, result);
5167
5168 l2cap_chan_unlock(chan);
5169
5170 return 0;
5171 }
5172
5173 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5174 {
5175 struct l2cap_chan *chan;
5176 struct hci_chan *hchan = NULL;
5177
5178 chan = l2cap_get_chan_by_scid(conn, icid);
5179 if (!chan) {
5180 l2cap_send_move_chan_cfm_icid(conn, icid);
5181 return;
5182 }
5183
5184 __clear_chan_timer(chan);
5185 if (result == L2CAP_MR_PEND)
5186 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5187
5188 switch (chan->move_state) {
5189 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5190 /* Move confirm will be sent when logical link
5191 * is complete.
5192 */
5193 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5194 break;
5195 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5196 if (result == L2CAP_MR_PEND) {
5197 break;
5198 } else if (test_bit(CONN_LOCAL_BUSY,
5199 &chan->conn_state)) {
5200 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5201 } else {
5202 /* Logical link is up or moving to BR/EDR,
5203 * proceed with move
5204 */
5205 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5206 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5207 }
5208 break;
5209 case L2CAP_MOVE_WAIT_RSP:
5210 /* Moving to AMP */
5211 if (result == L2CAP_MR_SUCCESS) {
5212 /* Remote is ready, send confirm immediately
5213 * after logical link is ready
5214 */
5215 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5216 } else {
5217 /* Both logical link and move success
5218 * are required to confirm
5219 */
5220 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5221 }
5222
5223 /* Placeholder - get hci_chan for logical link */
5224 if (!hchan) {
5225 /* Logical link not available */
5226 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5227 break;
5228 }
5229
5230 /* If the logical link is not yet connected, do not
5231 * send confirmation.
5232 */
5233 if (hchan->state != BT_CONNECTED)
5234 break;
5235
5236 /* Logical link is already ready to go */
5237
5238 chan->hs_hcon = hchan->conn;
5239 chan->hs_hcon->l2cap_data = chan->conn;
5240
5241 if (result == L2CAP_MR_SUCCESS) {
5242 /* Can confirm now */
5243 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5244 } else {
5245 /* Now only need move success
5246 * to confirm
5247 */
5248 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5249 }
5250
5251 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5252 break;
5253 default:
5254 /* Any other amp move state means the move failed. */
5255 chan->move_id = chan->local_amp_id;
5256 l2cap_move_done(chan);
5257 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5258 }
5259
5260 l2cap_chan_unlock(chan);
5261 }
5262
5263 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5264 u16 result)
5265 {
5266 struct l2cap_chan *chan;
5267
5268 chan = l2cap_get_chan_by_ident(conn, ident);
5269 if (!chan) {
5270 /* Could not locate channel, icid is best guess */
5271 l2cap_send_move_chan_cfm_icid(conn, icid);
5272 return;
5273 }
5274
5275 __clear_chan_timer(chan);
5276
5277 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5278 if (result == L2CAP_MR_COLLISION) {
5279 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5280 } else {
5281 /* Cleanup - cancel move */
5282 chan->move_id = chan->local_amp_id;
5283 l2cap_move_done(chan);
5284 }
5285 }
5286
5287 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5288
5289 l2cap_chan_unlock(chan);
5290 }
5291
5292 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5293 struct l2cap_cmd_hdr *cmd,
5294 u16 cmd_len, void *data)
5295 {
5296 struct l2cap_move_chan_rsp *rsp = data;
5297 u16 icid, result;
5298
5299 if (cmd_len != sizeof(*rsp))
5300 return -EPROTO;
5301
5302 icid = le16_to_cpu(rsp->icid);
5303 result = le16_to_cpu(rsp->result);
5304
5305 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5306
5307 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5308 l2cap_move_continue(conn, icid, result);
5309 else
5310 l2cap_move_fail(conn, cmd->ident, icid, result);
5311
5312 return 0;
5313 }
5314
5315 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5316 struct l2cap_cmd_hdr *cmd,
5317 u16 cmd_len, void *data)
5318 {
5319 struct l2cap_move_chan_cfm *cfm = data;
5320 struct l2cap_chan *chan;
5321 u16 icid, result;
5322
5323 if (cmd_len != sizeof(*cfm))
5324 return -EPROTO;
5325
5326 icid = le16_to_cpu(cfm->icid);
5327 result = le16_to_cpu(cfm->result);
5328
5329 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5330
5331 chan = l2cap_get_chan_by_dcid(conn, icid);
5332 if (!chan) {
5333 /* Spec requires a response even if the icid was not found */
5334 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5335 return 0;
5336 }
5337
5338 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5339 if (result == L2CAP_MC_CONFIRMED) {
5340 chan->local_amp_id = chan->move_id;
5341 if (chan->local_amp_id == AMP_ID_BREDR)
5342 __release_logical_link(chan);
5343 } else {
5344 chan->move_id = chan->local_amp_id;
5345 }
5346
5347 l2cap_move_done(chan);
5348 }
5349
5350 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5351
5352 l2cap_chan_unlock(chan);
5353
5354 return 0;
5355 }
5356
5357 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5358 struct l2cap_cmd_hdr *cmd,
5359 u16 cmd_len, void *data)
5360 {
5361 struct l2cap_move_chan_cfm_rsp *rsp = data;
5362 struct l2cap_chan *chan;
5363 u16 icid;
5364
5365 if (cmd_len != sizeof(*rsp))
5366 return -EPROTO;
5367
5368 icid = le16_to_cpu(rsp->icid);
5369
5370 BT_DBG("icid 0x%4.4x", icid);
5371
5372 chan = l2cap_get_chan_by_scid(conn, icid);
5373 if (!chan)
5374 return 0;
5375
5376 __clear_chan_timer(chan);
5377
5378 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5379 chan->local_amp_id = chan->move_id;
5380
5381 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5382 __release_logical_link(chan);
5383
5384 l2cap_move_done(chan);
5385 }
5386
5387 l2cap_chan_unlock(chan);
5388
5389 return 0;
5390 }
5391
5392 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5393 u16 to_multiplier)
5394 {
5395 u16 max_latency;
5396
5397 if (min > max || min < 6 || max > 3200)
5398 return -EINVAL;
5399
5400 if (to_multiplier < 10 || to_multiplier > 3200)
5401 return -EINVAL;
5402
5403 if (max >= to_multiplier * 8)
5404 return -EINVAL;
5405
5406 max_latency = (to_multiplier * 8 / max) - 1;
5407 if (latency > 499 || latency > max_latency)
5408 return -EINVAL;
5409
5410 return 0;
5411 }
5412
5413 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5414 struct l2cap_cmd_hdr *cmd,
5415 u16 cmd_len, u8 *data)
5416 {
5417 struct hci_conn *hcon = conn->hcon;
5418 struct l2cap_conn_param_update_req *req;
5419 struct l2cap_conn_param_update_rsp rsp;
5420 u16 min, max, latency, to_multiplier;
5421 int err;
5422
5423 if (!(hcon->link_mode & HCI_LM_MASTER))
5424 return -EINVAL;
5425
5426 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5427 return -EPROTO;
5428
5429 req = (struct l2cap_conn_param_update_req *) data;
5430 min = __le16_to_cpu(req->min);
5431 max = __le16_to_cpu(req->max);
5432 latency = __le16_to_cpu(req->latency);
5433 to_multiplier = __le16_to_cpu(req->to_multiplier);
5434
5435 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5436 min, max, latency, to_multiplier);
5437
5438 memset(&rsp, 0, sizeof(rsp));
5439
5440 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5441 if (err)
5442 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5443 else
5444 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5445
5446 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5447 sizeof(rsp), &rsp);
5448
5449 if (!err)
5450 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5451
5452 return 0;
5453 }
5454
5455 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5456 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5457 u8 *data)
5458 {
5459 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5460 u16 dcid, mtu, mps, credits, result;
5461 struct l2cap_chan *chan;
5462 int err;
5463
5464 if (cmd_len < sizeof(*rsp))
5465 return -EPROTO;
5466
5467 dcid = __le16_to_cpu(rsp->dcid);
5468 mtu = __le16_to_cpu(rsp->mtu);
5469 mps = __le16_to_cpu(rsp->mps);
5470 credits = __le16_to_cpu(rsp->credits);
5471 result = __le16_to_cpu(rsp->result);
5472
5473 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5474 return -EPROTO;
5475
5476 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5477 dcid, mtu, mps, credits, result);
5478
5479 mutex_lock(&conn->chan_lock);
5480
5481 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5482 if (!chan) {
5483 err = -EBADSLT;
5484 goto unlock;
5485 }
5486
5487 err = 0;
5488
5489 l2cap_chan_lock(chan);
5490
5491 switch (result) {
5492 case L2CAP_CR_SUCCESS:
5493 chan->ident = 0;
5494 chan->dcid = dcid;
5495 chan->omtu = mtu;
5496 chan->remote_mps = mps;
5497 chan->tx_credits = credits;
5498 l2cap_chan_ready(chan);
5499 break;
5500
5501 default:
5502 l2cap_chan_del(chan, ECONNREFUSED);
5503 break;
5504 }
5505
5506 l2cap_chan_unlock(chan);
5507
5508 unlock:
5509 mutex_unlock(&conn->chan_lock);
5510
5511 return err;
5512 }
5513
5514 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5515 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5516 u8 *data)
5517 {
5518 int err = 0;
5519
5520 switch (cmd->code) {
5521 case L2CAP_COMMAND_REJ:
5522 l2cap_command_rej(conn, cmd, cmd_len, data);
5523 break;
5524
5525 case L2CAP_CONN_REQ:
5526 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5527 break;
5528
5529 case L2CAP_CONN_RSP:
5530 case L2CAP_CREATE_CHAN_RSP:
5531 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5532 break;
5533
5534 case L2CAP_CONF_REQ:
5535 err = l2cap_config_req(conn, cmd, cmd_len, data);
5536 break;
5537
5538 case L2CAP_CONF_RSP:
5539 l2cap_config_rsp(conn, cmd, cmd_len, data);
5540 break;
5541
5542 case L2CAP_DISCONN_REQ:
5543 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5544 break;
5545
5546 case L2CAP_DISCONN_RSP:
5547 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5548 break;
5549
5550 case L2CAP_ECHO_REQ:
5551 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5552 break;
5553
5554 case L2CAP_ECHO_RSP:
5555 break;
5556
5557 case L2CAP_INFO_REQ:
5558 err = l2cap_information_req(conn, cmd, cmd_len, data);
5559 break;
5560
5561 case L2CAP_INFO_RSP:
5562 l2cap_information_rsp(conn, cmd, cmd_len, data);
5563 break;
5564
5565 case L2CAP_CREATE_CHAN_REQ:
5566 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5567 break;
5568
5569 case L2CAP_MOVE_CHAN_REQ:
5570 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5571 break;
5572
5573 case L2CAP_MOVE_CHAN_RSP:
5574 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5575 break;
5576
5577 case L2CAP_MOVE_CHAN_CFM:
5578 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5579 break;
5580
5581 case L2CAP_MOVE_CHAN_CFM_RSP:
5582 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5583 break;
5584
5585 default:
5586 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5587 err = -EINVAL;
5588 break;
5589 }
5590
5591 return err;
5592 }
5593
5594 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5595 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 u8 *data)
5597 {
5598 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5599 struct l2cap_le_conn_rsp rsp;
5600 struct l2cap_chan *chan, *pchan;
5601 u16 dcid, scid, credits, mtu, mps;
5602 __le16 psm;
5603 u8 result;
5604
5605 if (cmd_len != sizeof(*req))
5606 return -EPROTO;
5607
5608 scid = __le16_to_cpu(req->scid);
5609 mtu = __le16_to_cpu(req->mtu);
5610 mps = __le16_to_cpu(req->mps);
5611 psm = req->psm;
5612 dcid = 0;
5613 credits = 0;
5614
5615 if (mtu < 23 || mps < 23)
5616 return -EPROTO;
5617
5618 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5619 scid, mtu, mps);
5620
5621 /* Check if we have socket listening on psm */
5622 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5623 &conn->hcon->dst, LE_LINK);
5624 if (!pchan) {
5625 result = L2CAP_CR_BAD_PSM;
5626 chan = NULL;
5627 goto response;
5628 }
5629
5630 mutex_lock(&conn->chan_lock);
5631 l2cap_chan_lock(pchan);
5632
5633 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5634 result = L2CAP_CR_AUTHENTICATION;
5635 chan = NULL;
5636 goto response_unlock;
5637 }
5638
5639 /* Check if we already have channel with that dcid */
5640 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5641 result = L2CAP_CR_NO_MEM;
5642 chan = NULL;
5643 goto response_unlock;
5644 }
5645
5646 chan = pchan->ops->new_connection(pchan);
5647 if (!chan) {
5648 result = L2CAP_CR_NO_MEM;
5649 goto response_unlock;
5650 }
5651
5652 l2cap_le_flowctl_init(chan);
5653
5654 bacpy(&chan->src, &conn->hcon->src);
5655 bacpy(&chan->dst, &conn->hcon->dst);
5656 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5657 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5658 chan->psm = psm;
5659 chan->dcid = scid;
5660 chan->omtu = mtu;
5661 chan->remote_mps = mps;
5662 chan->tx_credits = __le16_to_cpu(req->credits);
5663
5664 __l2cap_chan_add(conn, chan);
5665 dcid = chan->scid;
5666 credits = chan->rx_credits;
5667
5668 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5669
5670 chan->ident = cmd->ident;
5671
5672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5673 l2cap_state_change(chan, BT_CONNECT2);
5674 result = L2CAP_CR_PEND;
5675 chan->ops->defer(chan);
5676 } else {
5677 l2cap_chan_ready(chan);
5678 result = L2CAP_CR_SUCCESS;
5679 }
5680
5681 response_unlock:
5682 l2cap_chan_unlock(pchan);
5683 mutex_unlock(&conn->chan_lock);
5684
5685 if (result == L2CAP_CR_PEND)
5686 return 0;
5687
5688 response:
5689 if (chan) {
5690 rsp.mtu = cpu_to_le16(chan->imtu);
5691 rsp.mps = cpu_to_le16(chan->mps);
5692 } else {
5693 rsp.mtu = 0;
5694 rsp.mps = 0;
5695 }
5696
5697 rsp.dcid = cpu_to_le16(dcid);
5698 rsp.credits = cpu_to_le16(credits);
5699 rsp.result = cpu_to_le16(result);
5700
5701 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5702
5703 return 0;
5704 }
5705
5706 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5707 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5708 u8 *data)
5709 {
5710 struct l2cap_le_credits *pkt;
5711 struct l2cap_chan *chan;
5712 u16 cid, credits;
5713
5714 if (cmd_len != sizeof(*pkt))
5715 return -EPROTO;
5716
5717 pkt = (struct l2cap_le_credits *) data;
5718 cid = __le16_to_cpu(pkt->cid);
5719 credits = __le16_to_cpu(pkt->credits);
5720
5721 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5722
5723 chan = l2cap_get_chan_by_dcid(conn, cid);
5724 if (!chan)
5725 return -EBADSLT;
5726
5727 chan->tx_credits += credits;
5728
5729 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5730 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5731 chan->tx_credits--;
5732 }
5733
5734 if (chan->tx_credits)
5735 chan->ops->resume(chan);
5736
5737 l2cap_chan_unlock(chan);
5738
5739 return 0;
5740 }
5741
5742 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5743 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5744 u8 *data)
5745 {
5746 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5747 struct l2cap_chan *chan;
5748
5749 if (cmd_len < sizeof(*rej))
5750 return -EPROTO;
5751
5752 mutex_lock(&conn->chan_lock);
5753
5754 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5755 if (!chan)
5756 goto done;
5757
5758 l2cap_chan_lock(chan);
5759 l2cap_chan_del(chan, ECONNREFUSED);
5760 l2cap_chan_unlock(chan);
5761
5762 done:
5763 mutex_unlock(&conn->chan_lock);
5764 return 0;
5765 }
5766
5767 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5768 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5769 u8 *data)
5770 {
5771 int err = 0;
5772
5773 if (!enable_lecoc) {
5774 switch (cmd->code) {
5775 case L2CAP_LE_CONN_REQ:
5776 case L2CAP_LE_CONN_RSP:
5777 case L2CAP_LE_CREDITS:
5778 case L2CAP_DISCONN_REQ:
5779 case L2CAP_DISCONN_RSP:
5780 return -EINVAL;
5781 }
5782 }
5783
5784 switch (cmd->code) {
5785 case L2CAP_COMMAND_REJ:
5786 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5787 break;
5788
5789 case L2CAP_CONN_PARAM_UPDATE_REQ:
5790 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5791 break;
5792
5793 case L2CAP_CONN_PARAM_UPDATE_RSP:
5794 break;
5795
5796 case L2CAP_LE_CONN_RSP:
5797 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5798 break;
5799
5800 case L2CAP_LE_CONN_REQ:
5801 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5802 break;
5803
5804 case L2CAP_LE_CREDITS:
5805 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5806 break;
5807
5808 case L2CAP_DISCONN_REQ:
5809 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5810 break;
5811
5812 case L2CAP_DISCONN_RSP:
5813 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5814 break;
5815
5816 default:
5817 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5818 err = -EINVAL;
5819 break;
5820 }
5821
5822 return err;
5823 }
5824
5825 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5826 struct sk_buff *skb)
5827 {
5828 struct hci_conn *hcon = conn->hcon;
5829 struct l2cap_cmd_hdr *cmd;
5830 u16 len;
5831 int err;
5832
5833 if (hcon->type != LE_LINK)
5834 goto drop;
5835
5836 if (skb->len < L2CAP_CMD_HDR_SIZE)
5837 goto drop;
5838
5839 cmd = (void *) skb->data;
5840 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5841
5842 len = le16_to_cpu(cmd->len);
5843
5844 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5845
5846 if (len != skb->len || !cmd->ident) {
5847 BT_DBG("corrupted command");
5848 goto drop;
5849 }
5850
5851 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5852 if (err) {
5853 struct l2cap_cmd_rej_unk rej;
5854
5855 BT_ERR("Wrong link type (%d)", err);
5856
5857 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5858 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5859 sizeof(rej), &rej);
5860 }
5861
5862 drop:
5863 kfree_skb(skb);
5864 }
5865
5866 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5867 struct sk_buff *skb)
5868 {
5869 struct hci_conn *hcon = conn->hcon;
5870 u8 *data = skb->data;
5871 int len = skb->len;
5872 struct l2cap_cmd_hdr cmd;
5873 int err;
5874
5875 l2cap_raw_recv(conn, skb);
5876
5877 if (hcon->type != ACL_LINK)
5878 goto drop;
5879
5880 while (len >= L2CAP_CMD_HDR_SIZE) {
5881 u16 cmd_len;
5882 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5883 data += L2CAP_CMD_HDR_SIZE;
5884 len -= L2CAP_CMD_HDR_SIZE;
5885
5886 cmd_len = le16_to_cpu(cmd.len);
5887
5888 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5889 cmd.ident);
5890
5891 if (cmd_len > len || !cmd.ident) {
5892 BT_DBG("corrupted command");
5893 break;
5894 }
5895
5896 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5897 if (err) {
5898 struct l2cap_cmd_rej_unk rej;
5899
5900 BT_ERR("Wrong link type (%d)", err);
5901
5902 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5903 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5904 sizeof(rej), &rej);
5905 }
5906
5907 data += cmd_len;
5908 len -= cmd_len;
5909 }
5910
5911 drop:
5912 kfree_skb(skb);
5913 }
5914
5915 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5916 {
5917 u16 our_fcs, rcv_fcs;
5918 int hdr_size;
5919
5920 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5921 hdr_size = L2CAP_EXT_HDR_SIZE;
5922 else
5923 hdr_size = L2CAP_ENH_HDR_SIZE;
5924
5925 if (chan->fcs == L2CAP_FCS_CRC16) {
5926 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5927 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5928 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5929
5930 if (our_fcs != rcv_fcs)
5931 return -EBADMSG;
5932 }
5933 return 0;
5934 }
5935
5936 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5937 {
5938 struct l2cap_ctrl control;
5939
5940 BT_DBG("chan %p", chan);
5941
5942 memset(&control, 0, sizeof(control));
5943 control.sframe = 1;
5944 control.final = 1;
5945 control.reqseq = chan->buffer_seq;
5946 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5947
5948 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5949 control.super = L2CAP_SUPER_RNR;
5950 l2cap_send_sframe(chan, &control);
5951 }
5952
5953 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5954 chan->unacked_frames > 0)
5955 __set_retrans_timer(chan);
5956
5957 /* Send pending iframes */
5958 l2cap_ertm_send(chan);
5959
5960 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5961 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5962 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5963 * send it now.
5964 */
5965 control.super = L2CAP_SUPER_RR;
5966 l2cap_send_sframe(chan, &control);
5967 }
5968 }
5969
5970 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5971 struct sk_buff **last_frag)
5972 {
5973 /* skb->len reflects data in skb as well as all fragments
5974 * skb->data_len reflects only data in fragments
5975 */
5976 if (!skb_has_frag_list(skb))
5977 skb_shinfo(skb)->frag_list = new_frag;
5978
5979 new_frag->next = NULL;
5980
5981 (*last_frag)->next = new_frag;
5982 *last_frag = new_frag;
5983
5984 skb->len += new_frag->len;
5985 skb->data_len += new_frag->len;
5986 skb->truesize += new_frag->truesize;
5987 }
5988
5989 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5990 struct l2cap_ctrl *control)
5991 {
5992 int err = -EINVAL;
5993
5994 switch (control->sar) {
5995 case L2CAP_SAR_UNSEGMENTED:
5996 if (chan->sdu)
5997 break;
5998
5999 err = chan->ops->recv(chan, skb);
6000 break;
6001
6002 case L2CAP_SAR_START:
6003 if (chan->sdu)
6004 break;
6005
6006 chan->sdu_len = get_unaligned_le16(skb->data);
6007 skb_pull(skb, L2CAP_SDULEN_SIZE);
6008
6009 if (chan->sdu_len > chan->imtu) {
6010 err = -EMSGSIZE;
6011 break;
6012 }
6013
6014 if (skb->len >= chan->sdu_len)
6015 break;
6016
6017 chan->sdu = skb;
6018 chan->sdu_last_frag = skb;
6019
6020 skb = NULL;
6021 err = 0;
6022 break;
6023
6024 case L2CAP_SAR_CONTINUE:
6025 if (!chan->sdu)
6026 break;
6027
6028 append_skb_frag(chan->sdu, skb,
6029 &chan->sdu_last_frag);
6030 skb = NULL;
6031
6032 if (chan->sdu->len >= chan->sdu_len)
6033 break;
6034
6035 err = 0;
6036 break;
6037
6038 case L2CAP_SAR_END:
6039 if (!chan->sdu)
6040 break;
6041
6042 append_skb_frag(chan->sdu, skb,
6043 &chan->sdu_last_frag);
6044 skb = NULL;
6045
6046 if (chan->sdu->len != chan->sdu_len)
6047 break;
6048
6049 err = chan->ops->recv(chan, chan->sdu);
6050
6051 if (!err) {
6052 /* Reassembly complete */
6053 chan->sdu = NULL;
6054 chan->sdu_last_frag = NULL;
6055 chan->sdu_len = 0;
6056 }
6057 break;
6058 }
6059
6060 if (err) {
6061 kfree_skb(skb);
6062 kfree_skb(chan->sdu);
6063 chan->sdu = NULL;
6064 chan->sdu_last_frag = NULL;
6065 chan->sdu_len = 0;
6066 }
6067
6068 return err;
6069 }
6070
6071 static int l2cap_resegment(struct l2cap_chan *chan)
6072 {
6073 /* Placeholder */
6074 return 0;
6075 }
6076
6077 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6078 {
6079 u8 event;
6080
6081 if (chan->mode != L2CAP_MODE_ERTM)
6082 return;
6083
6084 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6085 l2cap_tx(chan, NULL, NULL, event);
6086 }
6087
6088 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6089 {
6090 int err = 0;
6091 /* Pass sequential frames to l2cap_reassemble_sdu()
6092 * until a gap is encountered.
6093 */
6094
6095 BT_DBG("chan %p", chan);
6096
6097 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6098 struct sk_buff *skb;
6099 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6100 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6101
6102 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6103
6104 if (!skb)
6105 break;
6106
6107 skb_unlink(skb, &chan->srej_q);
6108 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6109 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6110 if (err)
6111 break;
6112 }
6113
6114 if (skb_queue_empty(&chan->srej_q)) {
6115 chan->rx_state = L2CAP_RX_STATE_RECV;
6116 l2cap_send_ack(chan);
6117 }
6118
6119 return err;
6120 }
6121
6122 static void l2cap_handle_srej(struct l2cap_chan *chan,
6123 struct l2cap_ctrl *control)
6124 {
6125 struct sk_buff *skb;
6126
6127 BT_DBG("chan %p, control %p", chan, control);
6128
6129 if (control->reqseq == chan->next_tx_seq) {
6130 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6131 l2cap_send_disconn_req(chan, ECONNRESET);
6132 return;
6133 }
6134
6135 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6136
6137 if (skb == NULL) {
6138 BT_DBG("Seq %d not available for retransmission",
6139 control->reqseq);
6140 return;
6141 }
6142
6143 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6144 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6145 l2cap_send_disconn_req(chan, ECONNRESET);
6146 return;
6147 }
6148
6149 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6150
6151 if (control->poll) {
6152 l2cap_pass_to_tx(chan, control);
6153
6154 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6155 l2cap_retransmit(chan, control);
6156 l2cap_ertm_send(chan);
6157
6158 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6159 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6160 chan->srej_save_reqseq = control->reqseq;
6161 }
6162 } else {
6163 l2cap_pass_to_tx_fbit(chan, control);
6164
6165 if (control->final) {
6166 if (chan->srej_save_reqseq != control->reqseq ||
6167 !test_and_clear_bit(CONN_SREJ_ACT,
6168 &chan->conn_state))
6169 l2cap_retransmit(chan, control);
6170 } else {
6171 l2cap_retransmit(chan, control);
6172 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6173 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6174 chan->srej_save_reqseq = control->reqseq;
6175 }
6176 }
6177 }
6178 }
6179
6180 static void l2cap_handle_rej(struct l2cap_chan *chan,
6181 struct l2cap_ctrl *control)
6182 {
6183 struct sk_buff *skb;
6184
6185 BT_DBG("chan %p, control %p", chan, control);
6186
6187 if (control->reqseq == chan->next_tx_seq) {
6188 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6189 l2cap_send_disconn_req(chan, ECONNRESET);
6190 return;
6191 }
6192
6193 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6194
6195 if (chan->max_tx && skb &&
6196 bt_cb(skb)->control.retries >= chan->max_tx) {
6197 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6198 l2cap_send_disconn_req(chan, ECONNRESET);
6199 return;
6200 }
6201
6202 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6203
6204 l2cap_pass_to_tx(chan, control);
6205
6206 if (control->final) {
6207 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6208 l2cap_retransmit_all(chan, control);
6209 } else {
6210 l2cap_retransmit_all(chan, control);
6211 l2cap_ertm_send(chan);
6212 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6213 set_bit(CONN_REJ_ACT, &chan->conn_state);
6214 }
6215 }
6216
6217 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6218 {
6219 BT_DBG("chan %p, txseq %d", chan, txseq);
6220
6221 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6222 chan->expected_tx_seq);
6223
6224 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6225 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6226 chan->tx_win) {
6227 /* See notes below regarding "double poll" and
6228 * invalid packets.
6229 */
6230 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6231 BT_DBG("Invalid/Ignore - after SREJ");
6232 return L2CAP_TXSEQ_INVALID_IGNORE;
6233 } else {
6234 BT_DBG("Invalid - in window after SREJ sent");
6235 return L2CAP_TXSEQ_INVALID;
6236 }
6237 }
6238
6239 if (chan->srej_list.head == txseq) {
6240 BT_DBG("Expected SREJ");
6241 return L2CAP_TXSEQ_EXPECTED_SREJ;
6242 }
6243
6244 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6245 BT_DBG("Duplicate SREJ - txseq already stored");
6246 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6247 }
6248
6249 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6250 BT_DBG("Unexpected SREJ - not requested");
6251 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6252 }
6253 }
6254
6255 if (chan->expected_tx_seq == txseq) {
6256 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6257 chan->tx_win) {
6258 BT_DBG("Invalid - txseq outside tx window");
6259 return L2CAP_TXSEQ_INVALID;
6260 } else {
6261 BT_DBG("Expected");
6262 return L2CAP_TXSEQ_EXPECTED;
6263 }
6264 }
6265
6266 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6267 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6268 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6269 return L2CAP_TXSEQ_DUPLICATE;
6270 }
6271
6272 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6273 /* A source of invalid packets is a "double poll" condition,
6274 * where delays cause us to send multiple poll packets. If
6275 * the remote stack receives and processes both polls,
6276 * sequence numbers can wrap around in such a way that a
6277 * resent frame has a sequence number that looks like new data
6278 * with a sequence gap. This would trigger an erroneous SREJ
6279 * request.
6280 *
6281 * Fortunately, this is impossible with a tx window that's
6282 * less than half of the maximum sequence number, which allows
6283 * invalid frames to be safely ignored.
6284 *
6285 * With tx window sizes greater than half of the tx window
6286 * maximum, the frame is invalid and cannot be ignored. This
6287 * causes a disconnect.
6288 */
6289
6290 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6291 BT_DBG("Invalid/Ignore - txseq outside tx window");
6292 return L2CAP_TXSEQ_INVALID_IGNORE;
6293 } else {
6294 BT_DBG("Invalid - txseq outside tx window");
6295 return L2CAP_TXSEQ_INVALID;
6296 }
6297 } else {
6298 BT_DBG("Unexpected - txseq indicates missing frames");
6299 return L2CAP_TXSEQ_UNEXPECTED;
6300 }
6301 }
6302
6303 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6304 struct l2cap_ctrl *control,
6305 struct sk_buff *skb, u8 event)
6306 {
6307 int err = 0;
6308 bool skb_in_use = false;
6309
6310 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6311 event);
6312
6313 switch (event) {
6314 case L2CAP_EV_RECV_IFRAME:
6315 switch (l2cap_classify_txseq(chan, control->txseq)) {
6316 case L2CAP_TXSEQ_EXPECTED:
6317 l2cap_pass_to_tx(chan, control);
6318
6319 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6320 BT_DBG("Busy, discarding expected seq %d",
6321 control->txseq);
6322 break;
6323 }
6324
6325 chan->expected_tx_seq = __next_seq(chan,
6326 control->txseq);
6327
6328 chan->buffer_seq = chan->expected_tx_seq;
6329 skb_in_use = true;
6330
6331 err = l2cap_reassemble_sdu(chan, skb, control);
6332 if (err)
6333 break;
6334
6335 if (control->final) {
6336 if (!test_and_clear_bit(CONN_REJ_ACT,
6337 &chan->conn_state)) {
6338 control->final = 0;
6339 l2cap_retransmit_all(chan, control);
6340 l2cap_ertm_send(chan);
6341 }
6342 }
6343
6344 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6345 l2cap_send_ack(chan);
6346 break;
6347 case L2CAP_TXSEQ_UNEXPECTED:
6348 l2cap_pass_to_tx(chan, control);
6349
6350 /* Can't issue SREJ frames in the local busy state.
6351 * Drop this frame, it will be seen as missing
6352 * when local busy is exited.
6353 */
6354 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6355 BT_DBG("Busy, discarding unexpected seq %d",
6356 control->txseq);
6357 break;
6358 }
6359
6360 /* There was a gap in the sequence, so an SREJ
6361 * must be sent for each missing frame. The
6362 * current frame is stored for later use.
6363 */
6364 skb_queue_tail(&chan->srej_q, skb);
6365 skb_in_use = true;
6366 BT_DBG("Queued %p (queue len %d)", skb,
6367 skb_queue_len(&chan->srej_q));
6368
6369 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6370 l2cap_seq_list_clear(&chan->srej_list);
6371 l2cap_send_srej(chan, control->txseq);
6372
6373 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6374 break;
6375 case L2CAP_TXSEQ_DUPLICATE:
6376 l2cap_pass_to_tx(chan, control);
6377 break;
6378 case L2CAP_TXSEQ_INVALID_IGNORE:
6379 break;
6380 case L2CAP_TXSEQ_INVALID:
6381 default:
6382 l2cap_send_disconn_req(chan, ECONNRESET);
6383 break;
6384 }
6385 break;
6386 case L2CAP_EV_RECV_RR:
6387 l2cap_pass_to_tx(chan, control);
6388 if (control->final) {
6389 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6390
6391 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6392 !__chan_is_moving(chan)) {
6393 control->final = 0;
6394 l2cap_retransmit_all(chan, control);
6395 }
6396
6397 l2cap_ertm_send(chan);
6398 } else if (control->poll) {
6399 l2cap_send_i_or_rr_or_rnr(chan);
6400 } else {
6401 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6402 &chan->conn_state) &&
6403 chan->unacked_frames)
6404 __set_retrans_timer(chan);
6405
6406 l2cap_ertm_send(chan);
6407 }
6408 break;
6409 case L2CAP_EV_RECV_RNR:
6410 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6411 l2cap_pass_to_tx(chan, control);
6412 if (control && control->poll) {
6413 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6414 l2cap_send_rr_or_rnr(chan, 0);
6415 }
6416 __clear_retrans_timer(chan);
6417 l2cap_seq_list_clear(&chan->retrans_list);
6418 break;
6419 case L2CAP_EV_RECV_REJ:
6420 l2cap_handle_rej(chan, control);
6421 break;
6422 case L2CAP_EV_RECV_SREJ:
6423 l2cap_handle_srej(chan, control);
6424 break;
6425 default:
6426 break;
6427 }
6428
6429 if (skb && !skb_in_use) {
6430 BT_DBG("Freeing %p", skb);
6431 kfree_skb(skb);
6432 }
6433
6434 return err;
6435 }
6436
6437 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6438 struct l2cap_ctrl *control,
6439 struct sk_buff *skb, u8 event)
6440 {
6441 int err = 0;
6442 u16 txseq = control->txseq;
6443 bool skb_in_use = false;
6444
6445 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6446 event);
6447
6448 switch (event) {
6449 case L2CAP_EV_RECV_IFRAME:
6450 switch (l2cap_classify_txseq(chan, txseq)) {
6451 case L2CAP_TXSEQ_EXPECTED:
6452 /* Keep frame for reassembly later */
6453 l2cap_pass_to_tx(chan, control);
6454 skb_queue_tail(&chan->srej_q, skb);
6455 skb_in_use = true;
6456 BT_DBG("Queued %p (queue len %d)", skb,
6457 skb_queue_len(&chan->srej_q));
6458
6459 chan->expected_tx_seq = __next_seq(chan, txseq);
6460 break;
6461 case L2CAP_TXSEQ_EXPECTED_SREJ:
6462 l2cap_seq_list_pop(&chan->srej_list);
6463
6464 l2cap_pass_to_tx(chan, control);
6465 skb_queue_tail(&chan->srej_q, skb);
6466 skb_in_use = true;
6467 BT_DBG("Queued %p (queue len %d)", skb,
6468 skb_queue_len(&chan->srej_q));
6469
6470 err = l2cap_rx_queued_iframes(chan);
6471 if (err)
6472 break;
6473
6474 break;
6475 case L2CAP_TXSEQ_UNEXPECTED:
6476 /* Got a frame that can't be reassembled yet.
6477 * Save it for later, and send SREJs to cover
6478 * the missing frames.
6479 */
6480 skb_queue_tail(&chan->srej_q, skb);
6481 skb_in_use = true;
6482 BT_DBG("Queued %p (queue len %d)", skb,
6483 skb_queue_len(&chan->srej_q));
6484
6485 l2cap_pass_to_tx(chan, control);
6486 l2cap_send_srej(chan, control->txseq);
6487 break;
6488 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6489 /* This frame was requested with an SREJ, but
6490 * some expected retransmitted frames are
6491 * missing. Request retransmission of missing
6492 * SREJ'd frames.
6493 */
6494 skb_queue_tail(&chan->srej_q, skb);
6495 skb_in_use = true;
6496 BT_DBG("Queued %p (queue len %d)", skb,
6497 skb_queue_len(&chan->srej_q));
6498
6499 l2cap_pass_to_tx(chan, control);
6500 l2cap_send_srej_list(chan, control->txseq);
6501 break;
6502 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6503 /* We've already queued this frame. Drop this copy. */
6504 l2cap_pass_to_tx(chan, control);
6505 break;
6506 case L2CAP_TXSEQ_DUPLICATE:
6507 /* Expecting a later sequence number, so this frame
6508 * was already received. Ignore it completely.
6509 */
6510 break;
6511 case L2CAP_TXSEQ_INVALID_IGNORE:
6512 break;
6513 case L2CAP_TXSEQ_INVALID:
6514 default:
6515 l2cap_send_disconn_req(chan, ECONNRESET);
6516 break;
6517 }
6518 break;
6519 case L2CAP_EV_RECV_RR:
6520 l2cap_pass_to_tx(chan, control);
6521 if (control->final) {
6522 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6523
6524 if (!test_and_clear_bit(CONN_REJ_ACT,
6525 &chan->conn_state)) {
6526 control->final = 0;
6527 l2cap_retransmit_all(chan, control);
6528 }
6529
6530 l2cap_ertm_send(chan);
6531 } else if (control->poll) {
6532 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6533 &chan->conn_state) &&
6534 chan->unacked_frames) {
6535 __set_retrans_timer(chan);
6536 }
6537
6538 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6539 l2cap_send_srej_tail(chan);
6540 } else {
6541 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6542 &chan->conn_state) &&
6543 chan->unacked_frames)
6544 __set_retrans_timer(chan);
6545
6546 l2cap_send_ack(chan);
6547 }
6548 break;
6549 case L2CAP_EV_RECV_RNR:
6550 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6551 l2cap_pass_to_tx(chan, control);
6552 if (control->poll) {
6553 l2cap_send_srej_tail(chan);
6554 } else {
6555 struct l2cap_ctrl rr_control;
6556 memset(&rr_control, 0, sizeof(rr_control));
6557 rr_control.sframe = 1;
6558 rr_control.super = L2CAP_SUPER_RR;
6559 rr_control.reqseq = chan->buffer_seq;
6560 l2cap_send_sframe(chan, &rr_control);
6561 }
6562
6563 break;
6564 case L2CAP_EV_RECV_REJ:
6565 l2cap_handle_rej(chan, control);
6566 break;
6567 case L2CAP_EV_RECV_SREJ:
6568 l2cap_handle_srej(chan, control);
6569 break;
6570 }
6571
6572 if (skb && !skb_in_use) {
6573 BT_DBG("Freeing %p", skb);
6574 kfree_skb(skb);
6575 }
6576
6577 return err;
6578 }
6579
6580 static int l2cap_finish_move(struct l2cap_chan *chan)
6581 {
6582 BT_DBG("chan %p", chan);
6583
6584 chan->rx_state = L2CAP_RX_STATE_RECV;
6585
6586 if (chan->hs_hcon)
6587 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6588 else
6589 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6590
6591 return l2cap_resegment(chan);
6592 }
6593
6594 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6595 struct l2cap_ctrl *control,
6596 struct sk_buff *skb, u8 event)
6597 {
6598 int err;
6599
6600 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6601 event);
6602
6603 if (!control->poll)
6604 return -EPROTO;
6605
6606 l2cap_process_reqseq(chan, control->reqseq);
6607
6608 if (!skb_queue_empty(&chan->tx_q))
6609 chan->tx_send_head = skb_peek(&chan->tx_q);
6610 else
6611 chan->tx_send_head = NULL;
6612
6613 /* Rewind next_tx_seq to the point expected
6614 * by the receiver.
6615 */
6616 chan->next_tx_seq = control->reqseq;
6617 chan->unacked_frames = 0;
6618
6619 err = l2cap_finish_move(chan);
6620 if (err)
6621 return err;
6622
6623 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6624 l2cap_send_i_or_rr_or_rnr(chan);
6625
6626 if (event == L2CAP_EV_RECV_IFRAME)
6627 return -EPROTO;
6628
6629 return l2cap_rx_state_recv(chan, control, NULL, event);
6630 }
6631
6632 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6633 struct l2cap_ctrl *control,
6634 struct sk_buff *skb, u8 event)
6635 {
6636 int err;
6637
6638 if (!control->final)
6639 return -EPROTO;
6640
6641 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6642
6643 chan->rx_state = L2CAP_RX_STATE_RECV;
6644 l2cap_process_reqseq(chan, control->reqseq);
6645
6646 if (!skb_queue_empty(&chan->tx_q))
6647 chan->tx_send_head = skb_peek(&chan->tx_q);
6648 else
6649 chan->tx_send_head = NULL;
6650
6651 /* Rewind next_tx_seq to the point expected
6652 * by the receiver.
6653 */
6654 chan->next_tx_seq = control->reqseq;
6655 chan->unacked_frames = 0;
6656
6657 if (chan->hs_hcon)
6658 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6659 else
6660 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6661
6662 err = l2cap_resegment(chan);
6663
6664 if (!err)
6665 err = l2cap_rx_state_recv(chan, control, skb, event);
6666
6667 return err;
6668 }
6669
6670 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6671 {
6672 /* Make sure reqseq is for a packet that has been sent but not acked */
6673 u16 unacked;
6674
6675 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6676 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6677 }
6678
6679 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6680 struct sk_buff *skb, u8 event)
6681 {
6682 int err = 0;
6683
6684 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6685 control, skb, event, chan->rx_state);
6686
6687 if (__valid_reqseq(chan, control->reqseq)) {
6688 switch (chan->rx_state) {
6689 case L2CAP_RX_STATE_RECV:
6690 err = l2cap_rx_state_recv(chan, control, skb, event);
6691 break;
6692 case L2CAP_RX_STATE_SREJ_SENT:
6693 err = l2cap_rx_state_srej_sent(chan, control, skb,
6694 event);
6695 break;
6696 case L2CAP_RX_STATE_WAIT_P:
6697 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6698 break;
6699 case L2CAP_RX_STATE_WAIT_F:
6700 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6701 break;
6702 default:
6703 /* shut it down */
6704 break;
6705 }
6706 } else {
6707 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6708 control->reqseq, chan->next_tx_seq,
6709 chan->expected_ack_seq);
6710 l2cap_send_disconn_req(chan, ECONNRESET);
6711 }
6712
6713 return err;
6714 }
6715
6716 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6717 struct sk_buff *skb)
6718 {
6719 int err = 0;
6720
6721 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6722 chan->rx_state);
6723
6724 if (l2cap_classify_txseq(chan, control->txseq) ==
6725 L2CAP_TXSEQ_EXPECTED) {
6726 l2cap_pass_to_tx(chan, control);
6727
6728 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6729 __next_seq(chan, chan->buffer_seq));
6730
6731 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6732
6733 l2cap_reassemble_sdu(chan, skb, control);
6734 } else {
6735 if (chan->sdu) {
6736 kfree_skb(chan->sdu);
6737 chan->sdu = NULL;
6738 }
6739 chan->sdu_last_frag = NULL;
6740 chan->sdu_len = 0;
6741
6742 if (skb) {
6743 BT_DBG("Freeing %p", skb);
6744 kfree_skb(skb);
6745 }
6746 }
6747
6748 chan->last_acked_seq = control->txseq;
6749 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6750
6751 return err;
6752 }
6753
6754 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6755 {
6756 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6757 u16 len;
6758 u8 event;
6759
6760 __unpack_control(chan, skb);
6761
6762 len = skb->len;
6763
6764 /*
6765 * We can just drop the corrupted I-frame here.
6766 * Receiver will miss it and start proper recovery
6767 * procedures and ask for retransmission.
6768 */
6769 if (l2cap_check_fcs(chan, skb))
6770 goto drop;
6771
6772 if (!control->sframe && control->sar == L2CAP_SAR_START)
6773 len -= L2CAP_SDULEN_SIZE;
6774
6775 if (chan->fcs == L2CAP_FCS_CRC16)
6776 len -= L2CAP_FCS_SIZE;
6777
6778 if (len > chan->mps) {
6779 l2cap_send_disconn_req(chan, ECONNRESET);
6780 goto drop;
6781 }
6782
6783 if (!control->sframe) {
6784 int err;
6785
6786 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6787 control->sar, control->reqseq, control->final,
6788 control->txseq);
6789
6790 /* Validate F-bit - F=0 always valid, F=1 only
6791 * valid in TX WAIT_F
6792 */
6793 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6794 goto drop;
6795
6796 if (chan->mode != L2CAP_MODE_STREAMING) {
6797 event = L2CAP_EV_RECV_IFRAME;
6798 err = l2cap_rx(chan, control, skb, event);
6799 } else {
6800 err = l2cap_stream_rx(chan, control, skb);
6801 }
6802
6803 if (err)
6804 l2cap_send_disconn_req(chan, ECONNRESET);
6805 } else {
6806 const u8 rx_func_to_event[4] = {
6807 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6808 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6809 };
6810
6811 /* Only I-frames are expected in streaming mode */
6812 if (chan->mode == L2CAP_MODE_STREAMING)
6813 goto drop;
6814
6815 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6816 control->reqseq, control->final, control->poll,
6817 control->super);
6818
6819 if (len != 0) {
6820 BT_ERR("Trailing bytes: %d in sframe", len);
6821 l2cap_send_disconn_req(chan, ECONNRESET);
6822 goto drop;
6823 }
6824
6825 /* Validate F and P bits */
6826 if (control->final && (control->poll ||
6827 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6828 goto drop;
6829
6830 event = rx_func_to_event[control->super];
6831 if (l2cap_rx(chan, control, skb, event))
6832 l2cap_send_disconn_req(chan, ECONNRESET);
6833 }
6834
6835 return 0;
6836
6837 drop:
6838 kfree_skb(skb);
6839 return 0;
6840 }
6841
6842 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6843 {
6844 struct l2cap_conn *conn = chan->conn;
6845 struct l2cap_le_credits pkt;
6846 u16 return_credits;
6847
6848 /* We return more credits to the sender only after the amount of
6849 * credits falls below half of the initial amount.
6850 */
6851 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6852 return;
6853
6854 return_credits = le_max_credits - chan->rx_credits;
6855
6856 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6857
6858 chan->rx_credits += return_credits;
6859
6860 pkt.cid = cpu_to_le16(chan->scid);
6861 pkt.credits = cpu_to_le16(return_credits);
6862
6863 chan->ident = l2cap_get_ident(conn);
6864
6865 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6866 }
6867
6868 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6869 {
6870 int err;
6871
6872 if (!chan->rx_credits) {
6873 BT_ERR("No credits to receive LE L2CAP data");
6874 return -ENOBUFS;
6875 }
6876
6877 if (chan->imtu < skb->len) {
6878 BT_ERR("Too big LE L2CAP PDU");
6879 return -ENOBUFS;
6880 }
6881
6882 chan->rx_credits--;
6883 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6884
6885 l2cap_chan_le_send_credits(chan);
6886
6887 err = 0;
6888
6889 if (!chan->sdu) {
6890 u16 sdu_len;
6891
6892 sdu_len = get_unaligned_le16(skb->data);
6893 skb_pull(skb, L2CAP_SDULEN_SIZE);
6894
6895 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6896 sdu_len, skb->len, chan->imtu);
6897
6898 if (sdu_len > chan->imtu) {
6899 BT_ERR("Too big LE L2CAP SDU length received");
6900 err = -EMSGSIZE;
6901 goto failed;
6902 }
6903
6904 if (skb->len > sdu_len) {
6905 BT_ERR("Too much LE L2CAP data received");
6906 err = -EINVAL;
6907 goto failed;
6908 }
6909
6910 if (skb->len == sdu_len)
6911 return chan->ops->recv(chan, skb);
6912
6913 chan->sdu = skb;
6914 chan->sdu_len = sdu_len;
6915 chan->sdu_last_frag = skb;
6916
6917 return 0;
6918 }
6919
6920 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6921 chan->sdu->len, skb->len, chan->sdu_len);
6922
6923 if (chan->sdu->len + skb->len > chan->sdu_len) {
6924 BT_ERR("Too much LE L2CAP data received");
6925 err = -EINVAL;
6926 goto failed;
6927 }
6928
6929 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6930 skb = NULL;
6931
6932 if (chan->sdu->len == chan->sdu_len) {
6933 err = chan->ops->recv(chan, chan->sdu);
6934 if (!err) {
6935 chan->sdu = NULL;
6936 chan->sdu_last_frag = NULL;
6937 chan->sdu_len = 0;
6938 }
6939 }
6940
6941 failed:
6942 if (err) {
6943 kfree_skb(skb);
6944 kfree_skb(chan->sdu);
6945 chan->sdu = NULL;
6946 chan->sdu_last_frag = NULL;
6947 chan->sdu_len = 0;
6948 }
6949
6950 /* We can't return an error here since we took care of the skb
6951 * freeing internally. An error return would cause the caller to
6952 * do a double-free of the skb.
6953 */
6954 return 0;
6955 }
6956
6957 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6958 struct sk_buff *skb)
6959 {
6960 struct l2cap_chan *chan;
6961
6962 chan = l2cap_get_chan_by_scid(conn, cid);
6963 if (!chan) {
6964 if (cid == L2CAP_CID_A2MP) {
6965 chan = a2mp_channel_create(conn, skb);
6966 if (!chan) {
6967 kfree_skb(skb);
6968 return;
6969 }
6970
6971 l2cap_chan_lock(chan);
6972 } else {
6973 BT_DBG("unknown cid 0x%4.4x", cid);
6974 /* Drop packet and return */
6975 kfree_skb(skb);
6976 return;
6977 }
6978 }
6979
6980 BT_DBG("chan %p, len %d", chan, skb->len);
6981
6982 if (chan->state != BT_CONNECTED)
6983 goto drop;
6984
6985 switch (chan->mode) {
6986 case L2CAP_MODE_LE_FLOWCTL:
6987 if (l2cap_le_data_rcv(chan, skb) < 0)
6988 goto drop;
6989
6990 goto done;
6991
6992 case L2CAP_MODE_BASIC:
6993 /* If socket recv buffers overflows we drop data here
6994 * which is *bad* because L2CAP has to be reliable.
6995 * But we don't have any other choice. L2CAP doesn't
6996 * provide flow control mechanism. */
6997
6998 if (chan->imtu < skb->len)
6999 goto drop;
7000
7001 if (!chan->ops->recv(chan, skb))
7002 goto done;
7003 break;
7004
7005 case L2CAP_MODE_ERTM:
7006 case L2CAP_MODE_STREAMING:
7007 l2cap_data_rcv(chan, skb);
7008 goto done;
7009
7010 default:
7011 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7012 break;
7013 }
7014
7015 drop:
7016 kfree_skb(skb);
7017
7018 done:
7019 l2cap_chan_unlock(chan);
7020 }
7021
7022 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7023 struct sk_buff *skb)
7024 {
7025 struct hci_conn *hcon = conn->hcon;
7026 struct l2cap_chan *chan;
7027
7028 if (hcon->type != ACL_LINK)
7029 goto drop;
7030
7031 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7032 ACL_LINK);
7033 if (!chan)
7034 goto drop;
7035
7036 BT_DBG("chan %p, len %d", chan, skb->len);
7037
7038 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7039 goto drop;
7040
7041 if (chan->imtu < skb->len)
7042 goto drop;
7043
7044 /* Store remote BD_ADDR and PSM for msg_name */
7045 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7046 bt_cb(skb)->psm = psm;
7047
7048 if (!chan->ops->recv(chan, skb))
7049 return;
7050
7051 drop:
7052 kfree_skb(skb);
7053 }
7054
7055 static void l2cap_att_channel(struct l2cap_conn *conn,
7056 struct sk_buff *skb)
7057 {
7058 struct hci_conn *hcon = conn->hcon;
7059 struct l2cap_chan *chan;
7060
7061 if (hcon->type != LE_LINK)
7062 goto drop;
7063
7064 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7065 &hcon->src, &hcon->dst);
7066 if (!chan)
7067 goto drop;
7068
7069 BT_DBG("chan %p, len %d", chan, skb->len);
7070
7071 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7072 goto drop;
7073
7074 if (chan->imtu < skb->len)
7075 goto drop;
7076
7077 if (!chan->ops->recv(chan, skb))
7078 return;
7079
7080 drop:
7081 kfree_skb(skb);
7082 }
7083
7084 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7085 {
7086 struct l2cap_hdr *lh = (void *) skb->data;
7087 u16 cid, len;
7088 __le16 psm;
7089
7090 skb_pull(skb, L2CAP_HDR_SIZE);
7091 cid = __le16_to_cpu(lh->cid);
7092 len = __le16_to_cpu(lh->len);
7093
7094 if (len != skb->len) {
7095 kfree_skb(skb);
7096 return;
7097 }
7098
7099 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7100
7101 switch (cid) {
7102 case L2CAP_CID_SIGNALING:
7103 l2cap_sig_channel(conn, skb);
7104 break;
7105
7106 case L2CAP_CID_CONN_LESS:
7107 psm = get_unaligned((__le16 *) skb->data);
7108 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7109 l2cap_conless_channel(conn, psm, skb);
7110 break;
7111
7112 case L2CAP_CID_ATT:
7113 l2cap_att_channel(conn, skb);
7114 break;
7115
7116 case L2CAP_CID_LE_SIGNALING:
7117 l2cap_le_sig_channel(conn, skb);
7118 break;
7119
7120 case L2CAP_CID_SMP:
7121 if (smp_sig_channel(conn, skb))
7122 l2cap_conn_del(conn->hcon, EACCES);
7123 break;
7124
7125 case L2CAP_FC_6LOWPAN:
7126 bt_6lowpan_recv(conn, skb);
7127 break;
7128
7129 default:
7130 l2cap_data_channel(conn, cid, skb);
7131 break;
7132 }
7133 }
7134
7135 /* ---- L2CAP interface with lower layer (HCI) ---- */
7136
7137 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7138 {
7139 int exact = 0, lm1 = 0, lm2 = 0;
7140 struct l2cap_chan *c;
7141
7142 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7143
7144 /* Find listening sockets and check their link_mode */
7145 read_lock(&chan_list_lock);
7146 list_for_each_entry(c, &chan_list, global_l) {
7147 if (c->state != BT_LISTEN)
7148 continue;
7149
7150 if (!bacmp(&c->src, &hdev->bdaddr)) {
7151 lm1 |= HCI_LM_ACCEPT;
7152 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7153 lm1 |= HCI_LM_MASTER;
7154 exact++;
7155 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7156 lm2 |= HCI_LM_ACCEPT;
7157 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7158 lm2 |= HCI_LM_MASTER;
7159 }
7160 }
7161 read_unlock(&chan_list_lock);
7162
7163 return exact ? lm1 : lm2;
7164 }
7165
7166 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7167 {
7168 struct l2cap_conn *conn;
7169
7170 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7171
7172 if (!status) {
7173 conn = l2cap_conn_add(hcon);
7174 if (conn)
7175 l2cap_conn_ready(conn);
7176 } else {
7177 l2cap_conn_del(hcon, bt_to_errno(status));
7178 }
7179 }
7180
7181 int l2cap_disconn_ind(struct hci_conn *hcon)
7182 {
7183 struct l2cap_conn *conn = hcon->l2cap_data;
7184
7185 BT_DBG("hcon %p", hcon);
7186
7187 if (!conn)
7188 return HCI_ERROR_REMOTE_USER_TERM;
7189 return conn->disc_reason;
7190 }
7191
7192 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7193 {
7194 BT_DBG("hcon %p reason %d", hcon, reason);
7195
7196 bt_6lowpan_del_conn(hcon->l2cap_data);
7197
7198 l2cap_conn_del(hcon, bt_to_errno(reason));
7199 }
7200
7201 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7202 {
7203 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7204 return;
7205
7206 if (encrypt == 0x00) {
7207 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7208 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7209 } else if (chan->sec_level == BT_SECURITY_HIGH)
7210 l2cap_chan_close(chan, ECONNREFUSED);
7211 } else {
7212 if (chan->sec_level == BT_SECURITY_MEDIUM)
7213 __clear_chan_timer(chan);
7214 }
7215 }
7216
7217 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7218 {
7219 struct l2cap_conn *conn = hcon->l2cap_data;
7220 struct l2cap_chan *chan;
7221
7222 if (!conn)
7223 return 0;
7224
7225 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7226
7227 if (hcon->type == LE_LINK) {
7228 if (!status && encrypt)
7229 smp_distribute_keys(conn, 0);
7230 cancel_delayed_work(&conn->security_timer);
7231 }
7232
7233 mutex_lock(&conn->chan_lock);
7234
7235 list_for_each_entry(chan, &conn->chan_l, list) {
7236 l2cap_chan_lock(chan);
7237
7238 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7239 state_to_string(chan->state));
7240
7241 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7242 l2cap_chan_unlock(chan);
7243 continue;
7244 }
7245
7246 if (chan->scid == L2CAP_CID_ATT) {
7247 if (!status && encrypt) {
7248 chan->sec_level = hcon->sec_level;
7249 l2cap_chan_ready(chan);
7250 }
7251
7252 l2cap_chan_unlock(chan);
7253 continue;
7254 }
7255
7256 if (!__l2cap_no_conn_pending(chan)) {
7257 l2cap_chan_unlock(chan);
7258 continue;
7259 }
7260
7261 if (!status && (chan->state == BT_CONNECTED ||
7262 chan->state == BT_CONFIG)) {
7263 chan->ops->resume(chan);
7264 l2cap_check_encryption(chan, encrypt);
7265 l2cap_chan_unlock(chan);
7266 continue;
7267 }
7268
7269 if (chan->state == BT_CONNECT) {
7270 if (!status)
7271 l2cap_start_connection(chan);
7272 else
7273 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7274 } else if (chan->state == BT_CONNECT2) {
7275 struct l2cap_conn_rsp rsp;
7276 __u16 res, stat;
7277
7278 if (!status) {
7279 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7280 res = L2CAP_CR_PEND;
7281 stat = L2CAP_CS_AUTHOR_PEND;
7282 chan->ops->defer(chan);
7283 } else {
7284 l2cap_state_change(chan, BT_CONFIG);
7285 res = L2CAP_CR_SUCCESS;
7286 stat = L2CAP_CS_NO_INFO;
7287 }
7288 } else {
7289 l2cap_state_change(chan, BT_DISCONN);
7290 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7291 res = L2CAP_CR_SEC_BLOCK;
7292 stat = L2CAP_CS_NO_INFO;
7293 }
7294
7295 rsp.scid = cpu_to_le16(chan->dcid);
7296 rsp.dcid = cpu_to_le16(chan->scid);
7297 rsp.result = cpu_to_le16(res);
7298 rsp.status = cpu_to_le16(stat);
7299 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7300 sizeof(rsp), &rsp);
7301
7302 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7303 res == L2CAP_CR_SUCCESS) {
7304 char buf[128];
7305 set_bit(CONF_REQ_SENT, &chan->conf_state);
7306 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7307 L2CAP_CONF_REQ,
7308 l2cap_build_conf_req(chan, buf),
7309 buf);
7310 chan->num_conf_req++;
7311 }
7312 }
7313
7314 l2cap_chan_unlock(chan);
7315 }
7316
7317 mutex_unlock(&conn->chan_lock);
7318
7319 return 0;
7320 }
7321
7322 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7323 {
7324 struct l2cap_conn *conn = hcon->l2cap_data;
7325 struct l2cap_hdr *hdr;
7326 int len;
7327
7328 /* For AMP controller do not create l2cap conn */
7329 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7330 goto drop;
7331
7332 if (!conn)
7333 conn = l2cap_conn_add(hcon);
7334
7335 if (!conn)
7336 goto drop;
7337
7338 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7339
7340 switch (flags) {
7341 case ACL_START:
7342 case ACL_START_NO_FLUSH:
7343 case ACL_COMPLETE:
7344 if (conn->rx_len) {
7345 BT_ERR("Unexpected start frame (len %d)", skb->len);
7346 kfree_skb(conn->rx_skb);
7347 conn->rx_skb = NULL;
7348 conn->rx_len = 0;
7349 l2cap_conn_unreliable(conn, ECOMM);
7350 }
7351
7352 /* Start fragment always begin with Basic L2CAP header */
7353 if (skb->len < L2CAP_HDR_SIZE) {
7354 BT_ERR("Frame is too short (len %d)", skb->len);
7355 l2cap_conn_unreliable(conn, ECOMM);
7356 goto drop;
7357 }
7358
7359 hdr = (struct l2cap_hdr *) skb->data;
7360 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7361
7362 if (len == skb->len) {
7363 /* Complete frame received */
7364 l2cap_recv_frame(conn, skb);
7365 return 0;
7366 }
7367
7368 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7369
7370 if (skb->len > len) {
7371 BT_ERR("Frame is too long (len %d, expected len %d)",
7372 skb->len, len);
7373 l2cap_conn_unreliable(conn, ECOMM);
7374 goto drop;
7375 }
7376
7377 /* Allocate skb for the complete frame (with header) */
7378 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7379 if (!conn->rx_skb)
7380 goto drop;
7381
7382 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7383 skb->len);
7384 conn->rx_len = len - skb->len;
7385 break;
7386
7387 case ACL_CONT:
7388 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7389
7390 if (!conn->rx_len) {
7391 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7392 l2cap_conn_unreliable(conn, ECOMM);
7393 goto drop;
7394 }
7395
7396 if (skb->len > conn->rx_len) {
7397 BT_ERR("Fragment is too long (len %d, expected %d)",
7398 skb->len, conn->rx_len);
7399 kfree_skb(conn->rx_skb);
7400 conn->rx_skb = NULL;
7401 conn->rx_len = 0;
7402 l2cap_conn_unreliable(conn, ECOMM);
7403 goto drop;
7404 }
7405
7406 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7407 skb->len);
7408 conn->rx_len -= skb->len;
7409
7410 if (!conn->rx_len) {
7411 /* Complete frame received. l2cap_recv_frame
7412 * takes ownership of the skb so set the global
7413 * rx_skb pointer to NULL first.
7414 */
7415 struct sk_buff *rx_skb = conn->rx_skb;
7416 conn->rx_skb = NULL;
7417 l2cap_recv_frame(conn, rx_skb);
7418 }
7419 break;
7420 }
7421
7422 drop:
7423 kfree_skb(skb);
7424 return 0;
7425 }
7426
7427 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7428 {
7429 struct l2cap_chan *c;
7430
7431 read_lock(&chan_list_lock);
7432
7433 list_for_each_entry(c, &chan_list, global_l) {
7434 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7435 &c->src, &c->dst,
7436 c->state, __le16_to_cpu(c->psm),
7437 c->scid, c->dcid, c->imtu, c->omtu,
7438 c->sec_level, c->mode);
7439 }
7440
7441 read_unlock(&chan_list_lock);
7442
7443 return 0;
7444 }
7445
7446 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7447 {
7448 return single_open(file, l2cap_debugfs_show, inode->i_private);
7449 }
7450
7451 static const struct file_operations l2cap_debugfs_fops = {
7452 .open = l2cap_debugfs_open,
7453 .read = seq_read,
7454 .llseek = seq_lseek,
7455 .release = single_release,
7456 };
7457
7458 static struct dentry *l2cap_debugfs;
7459
7460 int __init l2cap_init(void)
7461 {
7462 int err;
7463
7464 err = l2cap_init_sockets();
7465 if (err < 0)
7466 return err;
7467
7468 if (IS_ERR_OR_NULL(bt_debugfs))
7469 return 0;
7470
7471 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7472 NULL, &l2cap_debugfs_fops);
7473
7474 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7475 &le_max_credits);
7476 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7477 &le_default_mps);
7478
7479 bt_6lowpan_init();
7480
7481 return 0;
7482 }
7483
7484 void l2cap_exit(void)
7485 {
7486 bt_6lowpan_cleanup();
7487 debugfs_remove(l2cap_debugfs);
7488 l2cap_cleanup_sockets();
7489 }
7490
7491 module_param(disable_ertm, bool, 0644);
7492 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");