]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/l2cap_core.c
dccp: defer ccid_hc_tx_delete() at dismantle time
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46
47 bool disable_ertm;
48
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 {
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77 }
78
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 {
81 return bdaddr_type(hcon->type, hcon->src_type);
82 }
83
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 {
86 return bdaddr_type(hcon->type, hcon->dst_type);
87 }
88
89 /* ---- L2CAP channels ---- */
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
92 u16 cid)
93 {
94 struct l2cap_chan *c;
95
96 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->dcid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 u16 cid)
105 {
106 struct l2cap_chan *c;
107
108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->scid == cid)
110 return c;
111 }
112 return NULL;
113 }
114
115 /* Find channel with given SCID.
116 * Returns locked channel. */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
119 {
120 struct l2cap_chan *c;
121
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
124 if (c)
125 l2cap_chan_lock(c);
126 mutex_unlock(&conn->chan_lock);
127
128 return c;
129 }
130
131 /* Find channel with given DCID.
132 * Returns locked channel.
133 */
134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 u16 cid)
136 {
137 struct l2cap_chan *c;
138
139 mutex_lock(&conn->chan_lock);
140 c = __l2cap_get_chan_by_dcid(conn, cid);
141 if (c)
142 l2cap_chan_lock(c);
143 mutex_unlock(&conn->chan_lock);
144
145 return c;
146 }
147
148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 u8 ident)
150 {
151 struct l2cap_chan *c;
152
153 list_for_each_entry(c, &conn->chan_l, list) {
154 if (c->ident == ident)
155 return c;
156 }
157 return NULL;
158 }
159
160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 u8 ident)
162 {
163 struct l2cap_chan *c;
164
165 mutex_lock(&conn->chan_lock);
166 c = __l2cap_get_chan_by_ident(conn, ident);
167 if (c)
168 l2cap_chan_lock(c);
169 mutex_unlock(&conn->chan_lock);
170
171 return c;
172 }
173
174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 {
176 struct l2cap_chan *c;
177
178 list_for_each_entry(c, &chan_list, global_l) {
179 if (c->sport == psm && !bacmp(&c->src, src))
180 return c;
181 }
182 return NULL;
183 }
184
185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 {
187 int err;
188
189 write_lock(&chan_list_lock);
190
191 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
192 err = -EADDRINUSE;
193 goto done;
194 }
195
196 if (psm) {
197 chan->psm = psm;
198 chan->sport = psm;
199 err = 0;
200 } else {
201 u16 p, start, end, incr;
202
203 if (chan->src_type == BDADDR_BREDR) {
204 start = L2CAP_PSM_DYN_START;
205 end = L2CAP_PSM_AUTO_END;
206 incr = 2;
207 } else {
208 start = L2CAP_PSM_LE_DYN_START;
209 end = L2CAP_PSM_LE_DYN_END;
210 incr = 1;
211 }
212
213 err = -EINVAL;
214 for (p = start; p <= end; p += incr)
215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 chan->psm = cpu_to_le16(p);
217 chan->sport = cpu_to_le16(p);
218 err = 0;
219 break;
220 }
221 }
222
223 done:
224 write_unlock(&chan_list_lock);
225 return err;
226 }
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
228
229 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
230 {
231 write_lock(&chan_list_lock);
232
233 /* Override the defaults (which are for conn-oriented) */
234 chan->omtu = L2CAP_DEFAULT_MTU;
235 chan->chan_type = L2CAP_CHAN_FIXED;
236
237 chan->scid = scid;
238
239 write_unlock(&chan_list_lock);
240
241 return 0;
242 }
243
244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
245 {
246 u16 cid, dyn_end;
247
248 if (conn->hcon->type == LE_LINK)
249 dyn_end = L2CAP_CID_LE_DYN_END;
250 else
251 dyn_end = L2CAP_CID_DYN_END;
252
253 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 if (!__l2cap_get_chan_by_scid(conn, cid))
255 return cid;
256 }
257
258 return 0;
259 }
260
261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
262 {
263 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 state_to_string(state));
265
266 chan->state = state;
267 chan->ops->state_change(chan, state, 0);
268 }
269
270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
271 int state, int err)
272 {
273 chan->state = state;
274 chan->ops->state_change(chan, chan->state, err);
275 }
276
277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
278 {
279 chan->ops->state_change(chan, chan->state, err);
280 }
281
282 static void __set_retrans_timer(struct l2cap_chan *chan)
283 {
284 if (!delayed_work_pending(&chan->monitor_timer) &&
285 chan->retrans_timeout) {
286 l2cap_set_timer(chan, &chan->retrans_timer,
287 msecs_to_jiffies(chan->retrans_timeout));
288 }
289 }
290
291 static void __set_monitor_timer(struct l2cap_chan *chan)
292 {
293 __clear_retrans_timer(chan);
294 if (chan->monitor_timeout) {
295 l2cap_set_timer(chan, &chan->monitor_timer,
296 msecs_to_jiffies(chan->monitor_timeout));
297 }
298 }
299
300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
301 u16 seq)
302 {
303 struct sk_buff *skb;
304
305 skb_queue_walk(head, skb) {
306 if (bt_cb(skb)->l2cap.txseq == seq)
307 return skb;
308 }
309
310 return NULL;
311 }
312
313 /* ---- L2CAP sequence number lists ---- */
314
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
321 * allocs or frees.
322 */
323
324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
325 {
326 size_t alloc_size, i;
327
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
331 */
332 alloc_size = roundup_pow_of_two(size);
333
334 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
335 if (!seq_list->list)
336 return -ENOMEM;
337
338 seq_list->mask = alloc_size - 1;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 for (i = 0; i < alloc_size; i++)
342 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343
344 return 0;
345 }
346
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
348 {
349 kfree(seq_list->list);
350 }
351
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
353 u16 seq)
354 {
355 /* Constant-time check for list membership */
356 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
357 }
358
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 {
361 u16 seq = seq_list->head;
362 u16 mask = seq_list->mask;
363
364 seq_list->head = seq_list->list[seq & mask];
365 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
366
367 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
370 }
371
372 return seq;
373 }
374
375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376 {
377 u16 i;
378
379 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
380 return;
381
382 for (i = 0; i <= seq_list->mask; i++)
383 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
384
385 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
387 }
388
389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
390 {
391 u16 mask = seq_list->mask;
392
393 /* All appends happen in constant time */
394
395 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
396 return;
397
398 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 seq_list->head = seq;
400 else
401 seq_list->list[seq_list->tail & mask] = seq;
402
403 seq_list->tail = seq;
404 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
405 }
406
407 static void l2cap_chan_timeout(struct work_struct *work)
408 {
409 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
410 chan_timer.work);
411 struct l2cap_conn *conn = chan->conn;
412 int reason;
413
414 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
415
416 mutex_lock(&conn->chan_lock);
417 l2cap_chan_lock(chan);
418
419 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
420 reason = ECONNREFUSED;
421 else if (chan->state == BT_CONNECT &&
422 chan->sec_level != BT_SECURITY_SDP)
423 reason = ECONNREFUSED;
424 else
425 reason = ETIMEDOUT;
426
427 l2cap_chan_close(chan, reason);
428
429 l2cap_chan_unlock(chan);
430
431 chan->ops->close(chan);
432 mutex_unlock(&conn->chan_lock);
433
434 l2cap_chan_put(chan);
435 }
436
437 struct l2cap_chan *l2cap_chan_create(void)
438 {
439 struct l2cap_chan *chan;
440
441 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
442 if (!chan)
443 return NULL;
444
445 mutex_init(&chan->lock);
446
447 /* Set default lock nesting level */
448 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
449
450 write_lock(&chan_list_lock);
451 list_add(&chan->global_l, &chan_list);
452 write_unlock(&chan_list_lock);
453
454 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
455
456 chan->state = BT_OPEN;
457
458 kref_init(&chan->kref);
459
460 /* This flag is cleared in l2cap_chan_ready() */
461 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
462
463 BT_DBG("chan %p", chan);
464
465 return chan;
466 }
467 EXPORT_SYMBOL_GPL(l2cap_chan_create);
468
469 static void l2cap_chan_destroy(struct kref *kref)
470 {
471 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
472
473 BT_DBG("chan %p", chan);
474
475 write_lock(&chan_list_lock);
476 list_del(&chan->global_l);
477 write_unlock(&chan_list_lock);
478
479 kfree(chan);
480 }
481
482 void l2cap_chan_hold(struct l2cap_chan *c)
483 {
484 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
485
486 kref_get(&c->kref);
487 }
488
489 void l2cap_chan_put(struct l2cap_chan *c)
490 {
491 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
492
493 kref_put(&c->kref, l2cap_chan_destroy);
494 }
495 EXPORT_SYMBOL_GPL(l2cap_chan_put);
496
497 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
498 {
499 chan->fcs = L2CAP_FCS_CRC16;
500 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
501 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
502 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
503 chan->remote_max_tx = chan->max_tx;
504 chan->remote_tx_win = chan->tx_win;
505 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
506 chan->sec_level = BT_SECURITY_LOW;
507 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
509 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
510 chan->conf_state = 0;
511
512 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
513 }
514 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
515
516 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
517 {
518 chan->sdu = NULL;
519 chan->sdu_last_frag = NULL;
520 chan->sdu_len = 0;
521 chan->tx_credits = 0;
522 chan->rx_credits = le_max_credits;
523 chan->mps = min_t(u16, chan->imtu, le_default_mps);
524
525 skb_queue_head_init(&chan->tx_q);
526 }
527
528 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
529 {
530 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
531 __le16_to_cpu(chan->psm), chan->dcid);
532
533 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
534
535 chan->conn = conn;
536
537 switch (chan->chan_type) {
538 case L2CAP_CHAN_CONN_ORIENTED:
539 /* Alloc CID for connection-oriented socket */
540 chan->scid = l2cap_alloc_cid(conn);
541 if (conn->hcon->type == ACL_LINK)
542 chan->omtu = L2CAP_DEFAULT_MTU;
543 break;
544
545 case L2CAP_CHAN_CONN_LESS:
546 /* Connectionless socket */
547 chan->scid = L2CAP_CID_CONN_LESS;
548 chan->dcid = L2CAP_CID_CONN_LESS;
549 chan->omtu = L2CAP_DEFAULT_MTU;
550 break;
551
552 case L2CAP_CHAN_FIXED:
553 /* Caller will set CID and CID specific MTU values */
554 break;
555
556 default:
557 /* Raw socket can send/recv signalling messages only */
558 chan->scid = L2CAP_CID_SIGNALING;
559 chan->dcid = L2CAP_CID_SIGNALING;
560 chan->omtu = L2CAP_DEFAULT_MTU;
561 }
562
563 chan->local_id = L2CAP_BESTEFFORT_ID;
564 chan->local_stype = L2CAP_SERV_BESTEFFORT;
565 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
566 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
567 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
568 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
569
570 l2cap_chan_hold(chan);
571
572 /* Only keep a reference for fixed channels if they requested it */
573 if (chan->chan_type != L2CAP_CHAN_FIXED ||
574 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
575 hci_conn_hold(conn->hcon);
576
577 list_add(&chan->list, &conn->chan_l);
578 }
579
580 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
581 {
582 mutex_lock(&conn->chan_lock);
583 __l2cap_chan_add(conn, chan);
584 mutex_unlock(&conn->chan_lock);
585 }
586
587 void l2cap_chan_del(struct l2cap_chan *chan, int err)
588 {
589 struct l2cap_conn *conn = chan->conn;
590
591 __clear_chan_timer(chan);
592
593 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
594 state_to_string(chan->state));
595
596 chan->ops->teardown(chan, err);
597
598 if (conn) {
599 struct amp_mgr *mgr = conn->hcon->amp_mgr;
600 /* Delete from channel list */
601 list_del(&chan->list);
602
603 l2cap_chan_put(chan);
604
605 chan->conn = NULL;
606
607 /* Reference was only held for non-fixed channels or
608 * fixed channels that explicitly requested it using the
609 * FLAG_HOLD_HCI_CONN flag.
610 */
611 if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 hci_conn_drop(conn->hcon);
614
615 if (mgr && mgr->bredr_chan == chan)
616 mgr->bredr_chan = NULL;
617 }
618
619 if (chan->hs_hchan) {
620 struct hci_chan *hs_hchan = chan->hs_hchan;
621
622 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
623 amp_disconnect_logical_link(hs_hchan);
624 }
625
626 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
627 return;
628
629 switch(chan->mode) {
630 case L2CAP_MODE_BASIC:
631 break;
632
633 case L2CAP_MODE_LE_FLOWCTL:
634 skb_queue_purge(&chan->tx_q);
635 break;
636
637 case L2CAP_MODE_ERTM:
638 __clear_retrans_timer(chan);
639 __clear_monitor_timer(chan);
640 __clear_ack_timer(chan);
641
642 skb_queue_purge(&chan->srej_q);
643
644 l2cap_seq_list_free(&chan->srej_list);
645 l2cap_seq_list_free(&chan->retrans_list);
646
647 /* fall through */
648
649 case L2CAP_MODE_STREAMING:
650 skb_queue_purge(&chan->tx_q);
651 break;
652 }
653
654 return;
655 }
656 EXPORT_SYMBOL_GPL(l2cap_chan_del);
657
658 static void l2cap_conn_update_id_addr(struct work_struct *work)
659 {
660 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
661 id_addr_update_work);
662 struct hci_conn *hcon = conn->hcon;
663 struct l2cap_chan *chan;
664
665 mutex_lock(&conn->chan_lock);
666
667 list_for_each_entry(chan, &conn->chan_l, list) {
668 l2cap_chan_lock(chan);
669 bacpy(&chan->dst, &hcon->dst);
670 chan->dst_type = bdaddr_dst_type(hcon);
671 l2cap_chan_unlock(chan);
672 }
673
674 mutex_unlock(&conn->chan_lock);
675 }
676
677 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
678 {
679 struct l2cap_conn *conn = chan->conn;
680 struct l2cap_le_conn_rsp rsp;
681 u16 result;
682
683 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
684 result = L2CAP_CR_AUTHORIZATION;
685 else
686 result = L2CAP_CR_BAD_PSM;
687
688 l2cap_state_change(chan, BT_DISCONN);
689
690 rsp.dcid = cpu_to_le16(chan->scid);
691 rsp.mtu = cpu_to_le16(chan->imtu);
692 rsp.mps = cpu_to_le16(chan->mps);
693 rsp.credits = cpu_to_le16(chan->rx_credits);
694 rsp.result = cpu_to_le16(result);
695
696 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
697 &rsp);
698 }
699
700 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
701 {
702 struct l2cap_conn *conn = chan->conn;
703 struct l2cap_conn_rsp rsp;
704 u16 result;
705
706 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 result = L2CAP_CR_SEC_BLOCK;
708 else
709 result = L2CAP_CR_BAD_PSM;
710
711 l2cap_state_change(chan, BT_DISCONN);
712
713 rsp.scid = cpu_to_le16(chan->dcid);
714 rsp.dcid = cpu_to_le16(chan->scid);
715 rsp.result = cpu_to_le16(result);
716 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
717
718 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
719 }
720
721 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
722 {
723 struct l2cap_conn *conn = chan->conn;
724
725 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
726
727 switch (chan->state) {
728 case BT_LISTEN:
729 chan->ops->teardown(chan, 0);
730 break;
731
732 case BT_CONNECTED:
733 case BT_CONFIG:
734 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
735 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
736 l2cap_send_disconn_req(chan, reason);
737 } else
738 l2cap_chan_del(chan, reason);
739 break;
740
741 case BT_CONNECT2:
742 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
743 if (conn->hcon->type == ACL_LINK)
744 l2cap_chan_connect_reject(chan);
745 else if (conn->hcon->type == LE_LINK)
746 l2cap_chan_le_connect_reject(chan);
747 }
748
749 l2cap_chan_del(chan, reason);
750 break;
751
752 case BT_CONNECT:
753 case BT_DISCONN:
754 l2cap_chan_del(chan, reason);
755 break;
756
757 default:
758 chan->ops->teardown(chan, 0);
759 break;
760 }
761 }
762 EXPORT_SYMBOL(l2cap_chan_close);
763
764 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
765 {
766 switch (chan->chan_type) {
767 case L2CAP_CHAN_RAW:
768 switch (chan->sec_level) {
769 case BT_SECURITY_HIGH:
770 case BT_SECURITY_FIPS:
771 return HCI_AT_DEDICATED_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_DEDICATED_BONDING;
774 default:
775 return HCI_AT_NO_BONDING;
776 }
777 break;
778 case L2CAP_CHAN_CONN_LESS:
779 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
782 }
783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
788 break;
789 case L2CAP_CHAN_CONN_ORIENTED:
790 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
791 if (chan->sec_level == BT_SECURITY_LOW)
792 chan->sec_level = BT_SECURITY_SDP;
793
794 if (chan->sec_level == BT_SECURITY_HIGH ||
795 chan->sec_level == BT_SECURITY_FIPS)
796 return HCI_AT_NO_BONDING_MITM;
797 else
798 return HCI_AT_NO_BONDING;
799 }
800 /* fall through */
801 default:
802 switch (chan->sec_level) {
803 case BT_SECURITY_HIGH:
804 case BT_SECURITY_FIPS:
805 return HCI_AT_GENERAL_BONDING_MITM;
806 case BT_SECURITY_MEDIUM:
807 return HCI_AT_GENERAL_BONDING;
808 default:
809 return HCI_AT_NO_BONDING;
810 }
811 break;
812 }
813 }
814
815 /* Service level security */
816 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
817 {
818 struct l2cap_conn *conn = chan->conn;
819 __u8 auth_type;
820
821 if (conn->hcon->type == LE_LINK)
822 return smp_conn_security(conn->hcon, chan->sec_level);
823
824 auth_type = l2cap_get_auth_type(chan);
825
826 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
827 initiator);
828 }
829
830 static u8 l2cap_get_ident(struct l2cap_conn *conn)
831 {
832 u8 id;
833
834 /* Get next available identificator.
835 * 1 - 128 are used by kernel.
836 * 129 - 199 are reserved.
837 * 200 - 254 are used by utilities like l2ping, etc.
838 */
839
840 mutex_lock(&conn->ident_lock);
841
842 if (++conn->tx_ident > 128)
843 conn->tx_ident = 1;
844
845 id = conn->tx_ident;
846
847 mutex_unlock(&conn->ident_lock);
848
849 return id;
850 }
851
852 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
853 void *data)
854 {
855 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
856 u8 flags;
857
858 BT_DBG("code 0x%2.2x", code);
859
860 if (!skb)
861 return;
862
863 /* Use NO_FLUSH if supported or we have an LE link (which does
864 * not support auto-flushing packets) */
865 if (lmp_no_flush_capable(conn->hcon->hdev) ||
866 conn->hcon->type == LE_LINK)
867 flags = ACL_START_NO_FLUSH;
868 else
869 flags = ACL_START;
870
871 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
872 skb->priority = HCI_PRIO_MAX;
873
874 hci_send_acl(conn->hchan, skb, flags);
875 }
876
877 static bool __chan_is_moving(struct l2cap_chan *chan)
878 {
879 return chan->move_state != L2CAP_MOVE_STABLE &&
880 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
881 }
882
883 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
884 {
885 struct hci_conn *hcon = chan->conn->hcon;
886 u16 flags;
887
888 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
889 skb->priority);
890
891 if (chan->hs_hcon && !__chan_is_moving(chan)) {
892 if (chan->hs_hchan)
893 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
894 else
895 kfree_skb(skb);
896
897 return;
898 }
899
900 /* Use NO_FLUSH for LE links (where this is the only option) or
901 * if the BR/EDR link supports it and flushing has not been
902 * explicitly requested (through FLAG_FLUSHABLE).
903 */
904 if (hcon->type == LE_LINK ||
905 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
906 lmp_no_flush_capable(hcon->hdev)))
907 flags = ACL_START_NO_FLUSH;
908 else
909 flags = ACL_START;
910
911 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
912 hci_send_acl(chan->conn->hchan, skb, flags);
913 }
914
915 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
916 {
917 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
918 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
919
920 if (enh & L2CAP_CTRL_FRAME_TYPE) {
921 /* S-Frame */
922 control->sframe = 1;
923 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
924 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
925
926 control->sar = 0;
927 control->txseq = 0;
928 } else {
929 /* I-Frame */
930 control->sframe = 0;
931 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
932 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
933
934 control->poll = 0;
935 control->super = 0;
936 }
937 }
938
939 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
940 {
941 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
942 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
943
944 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
945 /* S-Frame */
946 control->sframe = 1;
947 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
948 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
949
950 control->sar = 0;
951 control->txseq = 0;
952 } else {
953 /* I-Frame */
954 control->sframe = 0;
955 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
956 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
957
958 control->poll = 0;
959 control->super = 0;
960 }
961 }
962
963 static inline void __unpack_control(struct l2cap_chan *chan,
964 struct sk_buff *skb)
965 {
966 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
967 __unpack_extended_control(get_unaligned_le32(skb->data),
968 &bt_cb(skb)->l2cap);
969 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
970 } else {
971 __unpack_enhanced_control(get_unaligned_le16(skb->data),
972 &bt_cb(skb)->l2cap);
973 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
974 }
975 }
976
977 static u32 __pack_extended_control(struct l2cap_ctrl *control)
978 {
979 u32 packed;
980
981 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
982 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
983
984 if (control->sframe) {
985 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
986 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
987 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
988 } else {
989 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
990 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
991 }
992
993 return packed;
994 }
995
996 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
997 {
998 u16 packed;
999
1000 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1001 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1002
1003 if (control->sframe) {
1004 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1005 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1006 packed |= L2CAP_CTRL_FRAME_TYPE;
1007 } else {
1008 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1009 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1010 }
1011
1012 return packed;
1013 }
1014
1015 static inline void __pack_control(struct l2cap_chan *chan,
1016 struct l2cap_ctrl *control,
1017 struct sk_buff *skb)
1018 {
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1020 put_unaligned_le32(__pack_extended_control(control),
1021 skb->data + L2CAP_HDR_SIZE);
1022 } else {
1023 put_unaligned_le16(__pack_enhanced_control(control),
1024 skb->data + L2CAP_HDR_SIZE);
1025 }
1026 }
1027
1028 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1029 {
1030 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1031 return L2CAP_EXT_HDR_SIZE;
1032 else
1033 return L2CAP_ENH_HDR_SIZE;
1034 }
1035
1036 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1037 u32 control)
1038 {
1039 struct sk_buff *skb;
1040 struct l2cap_hdr *lh;
1041 int hlen = __ertm_hdr_size(chan);
1042
1043 if (chan->fcs == L2CAP_FCS_CRC16)
1044 hlen += L2CAP_FCS_SIZE;
1045
1046 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1047
1048 if (!skb)
1049 return ERR_PTR(-ENOMEM);
1050
1051 lh = skb_put(skb, L2CAP_HDR_SIZE);
1052 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1053 lh->cid = cpu_to_le16(chan->dcid);
1054
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1057 else
1058 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1059
1060 if (chan->fcs == L2CAP_FCS_CRC16) {
1061 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1063 }
1064
1065 skb->priority = HCI_PRIO_MAX;
1066 return skb;
1067 }
1068
1069 static void l2cap_send_sframe(struct l2cap_chan *chan,
1070 struct l2cap_ctrl *control)
1071 {
1072 struct sk_buff *skb;
1073 u32 control_field;
1074
1075 BT_DBG("chan %p, control %p", chan, control);
1076
1077 if (!control->sframe)
1078 return;
1079
1080 if (__chan_is_moving(chan))
1081 return;
1082
1083 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1084 !control->poll)
1085 control->final = 1;
1086
1087 if (control->super == L2CAP_SUPER_RR)
1088 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1089 else if (control->super == L2CAP_SUPER_RNR)
1090 set_bit(CONN_RNR_SENT, &chan->conn_state);
1091
1092 if (control->super != L2CAP_SUPER_SREJ) {
1093 chan->last_acked_seq = control->reqseq;
1094 __clear_ack_timer(chan);
1095 }
1096
1097 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1098 control->final, control->poll, control->super);
1099
1100 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1101 control_field = __pack_extended_control(control);
1102 else
1103 control_field = __pack_enhanced_control(control);
1104
1105 skb = l2cap_create_sframe_pdu(chan, control_field);
1106 if (!IS_ERR(skb))
1107 l2cap_do_send(chan, skb);
1108 }
1109
1110 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1111 {
1112 struct l2cap_ctrl control;
1113
1114 BT_DBG("chan %p, poll %d", chan, poll);
1115
1116 memset(&control, 0, sizeof(control));
1117 control.sframe = 1;
1118 control.poll = poll;
1119
1120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1121 control.super = L2CAP_SUPER_RNR;
1122 else
1123 control.super = L2CAP_SUPER_RR;
1124
1125 control.reqseq = chan->buffer_seq;
1126 l2cap_send_sframe(chan, &control);
1127 }
1128
1129 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1130 {
1131 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1132 return true;
1133
1134 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1135 }
1136
1137 static bool __amp_capable(struct l2cap_chan *chan)
1138 {
1139 struct l2cap_conn *conn = chan->conn;
1140 struct hci_dev *hdev;
1141 bool amp_available = false;
1142
1143 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1144 return false;
1145
1146 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1147 return false;
1148
1149 read_lock(&hci_dev_list_lock);
1150 list_for_each_entry(hdev, &hci_dev_list, list) {
1151 if (hdev->amp_type != AMP_TYPE_BREDR &&
1152 test_bit(HCI_UP, &hdev->flags)) {
1153 amp_available = true;
1154 break;
1155 }
1156 }
1157 read_unlock(&hci_dev_list_lock);
1158
1159 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1160 return amp_available;
1161
1162 return false;
1163 }
1164
1165 static bool l2cap_check_efs(struct l2cap_chan *chan)
1166 {
1167 /* Check EFS parameters */
1168 return true;
1169 }
1170
1171 void l2cap_send_conn_req(struct l2cap_chan *chan)
1172 {
1173 struct l2cap_conn *conn = chan->conn;
1174 struct l2cap_conn_req req;
1175
1176 req.scid = cpu_to_le16(chan->scid);
1177 req.psm = chan->psm;
1178
1179 chan->ident = l2cap_get_ident(conn);
1180
1181 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1182
1183 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1184 }
1185
1186 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1187 {
1188 struct l2cap_create_chan_req req;
1189 req.scid = cpu_to_le16(chan->scid);
1190 req.psm = chan->psm;
1191 req.amp_id = amp_id;
1192
1193 chan->ident = l2cap_get_ident(chan->conn);
1194
1195 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1196 sizeof(req), &req);
1197 }
1198
1199 static void l2cap_move_setup(struct l2cap_chan *chan)
1200 {
1201 struct sk_buff *skb;
1202
1203 BT_DBG("chan %p", chan);
1204
1205 if (chan->mode != L2CAP_MODE_ERTM)
1206 return;
1207
1208 __clear_retrans_timer(chan);
1209 __clear_monitor_timer(chan);
1210 __clear_ack_timer(chan);
1211
1212 chan->retry_count = 0;
1213 skb_queue_walk(&chan->tx_q, skb) {
1214 if (bt_cb(skb)->l2cap.retries)
1215 bt_cb(skb)->l2cap.retries = 1;
1216 else
1217 break;
1218 }
1219
1220 chan->expected_tx_seq = chan->buffer_seq;
1221
1222 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1223 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1224 l2cap_seq_list_clear(&chan->retrans_list);
1225 l2cap_seq_list_clear(&chan->srej_list);
1226 skb_queue_purge(&chan->srej_q);
1227
1228 chan->tx_state = L2CAP_TX_STATE_XMIT;
1229 chan->rx_state = L2CAP_RX_STATE_MOVE;
1230
1231 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1232 }
1233
1234 static void l2cap_move_done(struct l2cap_chan *chan)
1235 {
1236 u8 move_role = chan->move_role;
1237 BT_DBG("chan %p", chan);
1238
1239 chan->move_state = L2CAP_MOVE_STABLE;
1240 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1241
1242 if (chan->mode != L2CAP_MODE_ERTM)
1243 return;
1244
1245 switch (move_role) {
1246 case L2CAP_MOVE_ROLE_INITIATOR:
1247 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1248 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1249 break;
1250 case L2CAP_MOVE_ROLE_RESPONDER:
1251 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1252 break;
1253 }
1254 }
1255
1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 {
1258 /* The channel may have already been flagged as connected in
1259 * case of receiving data before the L2CAP info req/rsp
1260 * procedure is complete.
1261 */
1262 if (chan->state == BT_CONNECTED)
1263 return;
1264
1265 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 chan->conf_state = 0;
1267 __clear_chan_timer(chan);
1268
1269 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1270 chan->ops->suspend(chan);
1271
1272 chan->state = BT_CONNECTED;
1273
1274 chan->ops->ready(chan);
1275 }
1276
1277 static void l2cap_le_connect(struct l2cap_chan *chan)
1278 {
1279 struct l2cap_conn *conn = chan->conn;
1280 struct l2cap_le_conn_req req;
1281
1282 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1283 return;
1284
1285 req.psm = chan->psm;
1286 req.scid = cpu_to_le16(chan->scid);
1287 req.mtu = cpu_to_le16(chan->imtu);
1288 req.mps = cpu_to_le16(chan->mps);
1289 req.credits = cpu_to_le16(chan->rx_credits);
1290
1291 chan->ident = l2cap_get_ident(conn);
1292
1293 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1294 sizeof(req), &req);
1295 }
1296
1297 static void l2cap_le_start(struct l2cap_chan *chan)
1298 {
1299 struct l2cap_conn *conn = chan->conn;
1300
1301 if (!smp_conn_security(conn->hcon, chan->sec_level))
1302 return;
1303
1304 if (!chan->psm) {
1305 l2cap_chan_ready(chan);
1306 return;
1307 }
1308
1309 if (chan->state == BT_CONNECT)
1310 l2cap_le_connect(chan);
1311 }
1312
1313 static void l2cap_start_connection(struct l2cap_chan *chan)
1314 {
1315 if (__amp_capable(chan)) {
1316 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1317 a2mp_discover_amp(chan);
1318 } else if (chan->conn->hcon->type == LE_LINK) {
1319 l2cap_le_start(chan);
1320 } else {
1321 l2cap_send_conn_req(chan);
1322 }
1323 }
1324
1325 static void l2cap_request_info(struct l2cap_conn *conn)
1326 {
1327 struct l2cap_info_req req;
1328
1329 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1330 return;
1331
1332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1333
1334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1335 conn->info_ident = l2cap_get_ident(conn);
1336
1337 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1338
1339 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1340 sizeof(req), &req);
1341 }
1342
1343 static void l2cap_do_start(struct l2cap_chan *chan)
1344 {
1345 struct l2cap_conn *conn = chan->conn;
1346
1347 if (conn->hcon->type == LE_LINK) {
1348 l2cap_le_start(chan);
1349 return;
1350 }
1351
1352 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1353 l2cap_request_info(conn);
1354 return;
1355 }
1356
1357 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1358 return;
1359
1360 if (l2cap_chan_check_security(chan, true) &&
1361 __l2cap_no_conn_pending(chan))
1362 l2cap_start_connection(chan);
1363 }
1364
1365 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1366 {
1367 u32 local_feat_mask = l2cap_feat_mask;
1368 if (!disable_ertm)
1369 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1370
1371 switch (mode) {
1372 case L2CAP_MODE_ERTM:
1373 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1374 case L2CAP_MODE_STREAMING:
1375 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1376 default:
1377 return 0x00;
1378 }
1379 }
1380
1381 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1382 {
1383 struct l2cap_conn *conn = chan->conn;
1384 struct l2cap_disconn_req req;
1385
1386 if (!conn)
1387 return;
1388
1389 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1390 __clear_retrans_timer(chan);
1391 __clear_monitor_timer(chan);
1392 __clear_ack_timer(chan);
1393 }
1394
1395 if (chan->scid == L2CAP_CID_A2MP) {
1396 l2cap_state_change(chan, BT_DISCONN);
1397 return;
1398 }
1399
1400 req.dcid = cpu_to_le16(chan->dcid);
1401 req.scid = cpu_to_le16(chan->scid);
1402 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1403 sizeof(req), &req);
1404
1405 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1406 }
1407
1408 /* ---- L2CAP connections ---- */
1409 static void l2cap_conn_start(struct l2cap_conn *conn)
1410 {
1411 struct l2cap_chan *chan, *tmp;
1412
1413 BT_DBG("conn %p", conn);
1414
1415 mutex_lock(&conn->chan_lock);
1416
1417 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1418 l2cap_chan_lock(chan);
1419
1420 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1421 l2cap_chan_ready(chan);
1422 l2cap_chan_unlock(chan);
1423 continue;
1424 }
1425
1426 if (chan->state == BT_CONNECT) {
1427 if (!l2cap_chan_check_security(chan, true) ||
1428 !__l2cap_no_conn_pending(chan)) {
1429 l2cap_chan_unlock(chan);
1430 continue;
1431 }
1432
1433 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1434 && test_bit(CONF_STATE2_DEVICE,
1435 &chan->conf_state)) {
1436 l2cap_chan_close(chan, ECONNRESET);
1437 l2cap_chan_unlock(chan);
1438 continue;
1439 }
1440
1441 l2cap_start_connection(chan);
1442
1443 } else if (chan->state == BT_CONNECT2) {
1444 struct l2cap_conn_rsp rsp;
1445 char buf[128];
1446 rsp.scid = cpu_to_le16(chan->dcid);
1447 rsp.dcid = cpu_to_le16(chan->scid);
1448
1449 if (l2cap_chan_check_security(chan, false)) {
1450 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1451 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1452 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1453 chan->ops->defer(chan);
1454
1455 } else {
1456 l2cap_state_change(chan, BT_CONFIG);
1457 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1458 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1459 }
1460 } else {
1461 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1462 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1463 }
1464
1465 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1466 sizeof(rsp), &rsp);
1467
1468 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1469 rsp.result != L2CAP_CR_SUCCESS) {
1470 l2cap_chan_unlock(chan);
1471 continue;
1472 }
1473
1474 set_bit(CONF_REQ_SENT, &chan->conf_state);
1475 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1476 l2cap_build_conf_req(chan, buf), buf);
1477 chan->num_conf_req++;
1478 }
1479
1480 l2cap_chan_unlock(chan);
1481 }
1482
1483 mutex_unlock(&conn->chan_lock);
1484 }
1485
1486 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1487 {
1488 struct hci_conn *hcon = conn->hcon;
1489 struct hci_dev *hdev = hcon->hdev;
1490
1491 BT_DBG("%s conn %p", hdev->name, conn);
1492
1493 /* For outgoing pairing which doesn't necessarily have an
1494 * associated socket (e.g. mgmt_pair_device).
1495 */
1496 if (hcon->out)
1497 smp_conn_security(hcon, hcon->pending_sec_level);
1498
1499 /* For LE slave connections, make sure the connection interval
1500 * is in the range of the minium and maximum interval that has
1501 * been configured for this connection. If not, then trigger
1502 * the connection update procedure.
1503 */
1504 if (hcon->role == HCI_ROLE_SLAVE &&
1505 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1506 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1507 struct l2cap_conn_param_update_req req;
1508
1509 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1510 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1511 req.latency = cpu_to_le16(hcon->le_conn_latency);
1512 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1513
1514 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1515 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1516 }
1517 }
1518
1519 static void l2cap_conn_ready(struct l2cap_conn *conn)
1520 {
1521 struct l2cap_chan *chan;
1522 struct hci_conn *hcon = conn->hcon;
1523
1524 BT_DBG("conn %p", conn);
1525
1526 if (hcon->type == ACL_LINK)
1527 l2cap_request_info(conn);
1528
1529 mutex_lock(&conn->chan_lock);
1530
1531 list_for_each_entry(chan, &conn->chan_l, list) {
1532
1533 l2cap_chan_lock(chan);
1534
1535 if (chan->scid == L2CAP_CID_A2MP) {
1536 l2cap_chan_unlock(chan);
1537 continue;
1538 }
1539
1540 if (hcon->type == LE_LINK) {
1541 l2cap_le_start(chan);
1542 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1543 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1544 l2cap_chan_ready(chan);
1545 } else if (chan->state == BT_CONNECT) {
1546 l2cap_do_start(chan);
1547 }
1548
1549 l2cap_chan_unlock(chan);
1550 }
1551
1552 mutex_unlock(&conn->chan_lock);
1553
1554 if (hcon->type == LE_LINK)
1555 l2cap_le_conn_ready(conn);
1556
1557 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1558 }
1559
1560 /* Notify sockets that we cannot guaranty reliability anymore */
1561 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1562 {
1563 struct l2cap_chan *chan;
1564
1565 BT_DBG("conn %p", conn);
1566
1567 mutex_lock(&conn->chan_lock);
1568
1569 list_for_each_entry(chan, &conn->chan_l, list) {
1570 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1571 l2cap_chan_set_err(chan, err);
1572 }
1573
1574 mutex_unlock(&conn->chan_lock);
1575 }
1576
1577 static void l2cap_info_timeout(struct work_struct *work)
1578 {
1579 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1580 info_timer.work);
1581
1582 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1583 conn->info_ident = 0;
1584
1585 l2cap_conn_start(conn);
1586 }
1587
1588 /*
1589 * l2cap_user
1590 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1591 * callback is called during registration. The ->remove callback is called
1592 * during unregistration.
1593 * An l2cap_user object can either be explicitly unregistered or when the
1594 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1595 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1596 * External modules must own a reference to the l2cap_conn object if they intend
1597 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1598 * any time if they don't.
1599 */
1600
1601 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1602 {
1603 struct hci_dev *hdev = conn->hcon->hdev;
1604 int ret;
1605
1606 /* We need to check whether l2cap_conn is registered. If it is not, we
1607 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1608 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1609 * relies on the parent hci_conn object to be locked. This itself relies
1610 * on the hci_dev object to be locked. So we must lock the hci device
1611 * here, too. */
1612
1613 hci_dev_lock(hdev);
1614
1615 if (!list_empty(&user->list)) {
1616 ret = -EINVAL;
1617 goto out_unlock;
1618 }
1619
1620 /* conn->hchan is NULL after l2cap_conn_del() was called */
1621 if (!conn->hchan) {
1622 ret = -ENODEV;
1623 goto out_unlock;
1624 }
1625
1626 ret = user->probe(conn, user);
1627 if (ret)
1628 goto out_unlock;
1629
1630 list_add(&user->list, &conn->users);
1631 ret = 0;
1632
1633 out_unlock:
1634 hci_dev_unlock(hdev);
1635 return ret;
1636 }
1637 EXPORT_SYMBOL(l2cap_register_user);
1638
1639 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1640 {
1641 struct hci_dev *hdev = conn->hcon->hdev;
1642
1643 hci_dev_lock(hdev);
1644
1645 if (list_empty(&user->list))
1646 goto out_unlock;
1647
1648 list_del_init(&user->list);
1649 user->remove(conn, user);
1650
1651 out_unlock:
1652 hci_dev_unlock(hdev);
1653 }
1654 EXPORT_SYMBOL(l2cap_unregister_user);
1655
1656 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1657 {
1658 struct l2cap_user *user;
1659
1660 while (!list_empty(&conn->users)) {
1661 user = list_first_entry(&conn->users, struct l2cap_user, list);
1662 list_del_init(&user->list);
1663 user->remove(conn, user);
1664 }
1665 }
1666
1667 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1668 {
1669 struct l2cap_conn *conn = hcon->l2cap_data;
1670 struct l2cap_chan *chan, *l;
1671
1672 if (!conn)
1673 return;
1674
1675 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1676
1677 kfree_skb(conn->rx_skb);
1678
1679 skb_queue_purge(&conn->pending_rx);
1680
1681 /* We can not call flush_work(&conn->pending_rx_work) here since we
1682 * might block if we are running on a worker from the same workqueue
1683 * pending_rx_work is waiting on.
1684 */
1685 if (work_pending(&conn->pending_rx_work))
1686 cancel_work_sync(&conn->pending_rx_work);
1687
1688 if (work_pending(&conn->id_addr_update_work))
1689 cancel_work_sync(&conn->id_addr_update_work);
1690
1691 l2cap_unregister_all_users(conn);
1692
1693 /* Force the connection to be immediately dropped */
1694 hcon->disc_timeout = 0;
1695
1696 mutex_lock(&conn->chan_lock);
1697
1698 /* Kill channels */
1699 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1700 l2cap_chan_hold(chan);
1701 l2cap_chan_lock(chan);
1702
1703 l2cap_chan_del(chan, err);
1704
1705 l2cap_chan_unlock(chan);
1706
1707 chan->ops->close(chan);
1708 l2cap_chan_put(chan);
1709 }
1710
1711 mutex_unlock(&conn->chan_lock);
1712
1713 hci_chan_del(conn->hchan);
1714
1715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1716 cancel_delayed_work_sync(&conn->info_timer);
1717
1718 hcon->l2cap_data = NULL;
1719 conn->hchan = NULL;
1720 l2cap_conn_put(conn);
1721 }
1722
1723 static void l2cap_conn_free(struct kref *ref)
1724 {
1725 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1726
1727 hci_conn_put(conn->hcon);
1728 kfree(conn);
1729 }
1730
1731 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1732 {
1733 kref_get(&conn->ref);
1734 return conn;
1735 }
1736 EXPORT_SYMBOL(l2cap_conn_get);
1737
1738 void l2cap_conn_put(struct l2cap_conn *conn)
1739 {
1740 kref_put(&conn->ref, l2cap_conn_free);
1741 }
1742 EXPORT_SYMBOL(l2cap_conn_put);
1743
1744 /* ---- Socket interface ---- */
1745
1746 /* Find socket with psm and source / destination bdaddr.
1747 * Returns closest match.
1748 */
1749 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1750 bdaddr_t *src,
1751 bdaddr_t *dst,
1752 u8 link_type)
1753 {
1754 struct l2cap_chan *c, *c1 = NULL;
1755
1756 read_lock(&chan_list_lock);
1757
1758 list_for_each_entry(c, &chan_list, global_l) {
1759 if (state && c->state != state)
1760 continue;
1761
1762 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1763 continue;
1764
1765 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1766 continue;
1767
1768 if (c->psm == psm) {
1769 int src_match, dst_match;
1770 int src_any, dst_any;
1771
1772 /* Exact match. */
1773 src_match = !bacmp(&c->src, src);
1774 dst_match = !bacmp(&c->dst, dst);
1775 if (src_match && dst_match) {
1776 l2cap_chan_hold(c);
1777 read_unlock(&chan_list_lock);
1778 return c;
1779 }
1780
1781 /* Closest match */
1782 src_any = !bacmp(&c->src, BDADDR_ANY);
1783 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1784 if ((src_match && dst_any) || (src_any && dst_match) ||
1785 (src_any && dst_any))
1786 c1 = c;
1787 }
1788 }
1789
1790 if (c1)
1791 l2cap_chan_hold(c1);
1792
1793 read_unlock(&chan_list_lock);
1794
1795 return c1;
1796 }
1797
1798 static void l2cap_monitor_timeout(struct work_struct *work)
1799 {
1800 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1801 monitor_timer.work);
1802
1803 BT_DBG("chan %p", chan);
1804
1805 l2cap_chan_lock(chan);
1806
1807 if (!chan->conn) {
1808 l2cap_chan_unlock(chan);
1809 l2cap_chan_put(chan);
1810 return;
1811 }
1812
1813 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1814
1815 l2cap_chan_unlock(chan);
1816 l2cap_chan_put(chan);
1817 }
1818
1819 static void l2cap_retrans_timeout(struct work_struct *work)
1820 {
1821 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1822 retrans_timer.work);
1823
1824 BT_DBG("chan %p", chan);
1825
1826 l2cap_chan_lock(chan);
1827
1828 if (!chan->conn) {
1829 l2cap_chan_unlock(chan);
1830 l2cap_chan_put(chan);
1831 return;
1832 }
1833
1834 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1835 l2cap_chan_unlock(chan);
1836 l2cap_chan_put(chan);
1837 }
1838
1839 static void l2cap_streaming_send(struct l2cap_chan *chan,
1840 struct sk_buff_head *skbs)
1841 {
1842 struct sk_buff *skb;
1843 struct l2cap_ctrl *control;
1844
1845 BT_DBG("chan %p, skbs %p", chan, skbs);
1846
1847 if (__chan_is_moving(chan))
1848 return;
1849
1850 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1851
1852 while (!skb_queue_empty(&chan->tx_q)) {
1853
1854 skb = skb_dequeue(&chan->tx_q);
1855
1856 bt_cb(skb)->l2cap.retries = 1;
1857 control = &bt_cb(skb)->l2cap;
1858
1859 control->reqseq = 0;
1860 control->txseq = chan->next_tx_seq;
1861
1862 __pack_control(chan, control, skb);
1863
1864 if (chan->fcs == L2CAP_FCS_CRC16) {
1865 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1866 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1867 }
1868
1869 l2cap_do_send(chan, skb);
1870
1871 BT_DBG("Sent txseq %u", control->txseq);
1872
1873 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1874 chan->frames_sent++;
1875 }
1876 }
1877
1878 static int l2cap_ertm_send(struct l2cap_chan *chan)
1879 {
1880 struct sk_buff *skb, *tx_skb;
1881 struct l2cap_ctrl *control;
1882 int sent = 0;
1883
1884 BT_DBG("chan %p", chan);
1885
1886 if (chan->state != BT_CONNECTED)
1887 return -ENOTCONN;
1888
1889 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1890 return 0;
1891
1892 if (__chan_is_moving(chan))
1893 return 0;
1894
1895 while (chan->tx_send_head &&
1896 chan->unacked_frames < chan->remote_tx_win &&
1897 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1898
1899 skb = chan->tx_send_head;
1900
1901 bt_cb(skb)->l2cap.retries = 1;
1902 control = &bt_cb(skb)->l2cap;
1903
1904 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1905 control->final = 1;
1906
1907 control->reqseq = chan->buffer_seq;
1908 chan->last_acked_seq = chan->buffer_seq;
1909 control->txseq = chan->next_tx_seq;
1910
1911 __pack_control(chan, control, skb);
1912
1913 if (chan->fcs == L2CAP_FCS_CRC16) {
1914 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1915 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1916 }
1917
1918 /* Clone after data has been modified. Data is assumed to be
1919 read-only (for locking purposes) on cloned sk_buffs.
1920 */
1921 tx_skb = skb_clone(skb, GFP_KERNEL);
1922
1923 if (!tx_skb)
1924 break;
1925
1926 __set_retrans_timer(chan);
1927
1928 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1929 chan->unacked_frames++;
1930 chan->frames_sent++;
1931 sent++;
1932
1933 if (skb_queue_is_last(&chan->tx_q, skb))
1934 chan->tx_send_head = NULL;
1935 else
1936 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1937
1938 l2cap_do_send(chan, tx_skb);
1939 BT_DBG("Sent txseq %u", control->txseq);
1940 }
1941
1942 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1943 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1944
1945 return sent;
1946 }
1947
1948 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1949 {
1950 struct l2cap_ctrl control;
1951 struct sk_buff *skb;
1952 struct sk_buff *tx_skb;
1953 u16 seq;
1954
1955 BT_DBG("chan %p", chan);
1956
1957 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1958 return;
1959
1960 if (__chan_is_moving(chan))
1961 return;
1962
1963 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1964 seq = l2cap_seq_list_pop(&chan->retrans_list);
1965
1966 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1967 if (!skb) {
1968 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1969 seq);
1970 continue;
1971 }
1972
1973 bt_cb(skb)->l2cap.retries++;
1974 control = bt_cb(skb)->l2cap;
1975
1976 if (chan->max_tx != 0 &&
1977 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1978 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1979 l2cap_send_disconn_req(chan, ECONNRESET);
1980 l2cap_seq_list_clear(&chan->retrans_list);
1981 break;
1982 }
1983
1984 control.reqseq = chan->buffer_seq;
1985 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1986 control.final = 1;
1987 else
1988 control.final = 0;
1989
1990 if (skb_cloned(skb)) {
1991 /* Cloned sk_buffs are read-only, so we need a
1992 * writeable copy
1993 */
1994 tx_skb = skb_copy(skb, GFP_KERNEL);
1995 } else {
1996 tx_skb = skb_clone(skb, GFP_KERNEL);
1997 }
1998
1999 if (!tx_skb) {
2000 l2cap_seq_list_clear(&chan->retrans_list);
2001 break;
2002 }
2003
2004 /* Update skb contents */
2005 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2006 put_unaligned_le32(__pack_extended_control(&control),
2007 tx_skb->data + L2CAP_HDR_SIZE);
2008 } else {
2009 put_unaligned_le16(__pack_enhanced_control(&control),
2010 tx_skb->data + L2CAP_HDR_SIZE);
2011 }
2012
2013 /* Update FCS */
2014 if (chan->fcs == L2CAP_FCS_CRC16) {
2015 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2016 tx_skb->len - L2CAP_FCS_SIZE);
2017 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2018 L2CAP_FCS_SIZE);
2019 }
2020
2021 l2cap_do_send(chan, tx_skb);
2022
2023 BT_DBG("Resent txseq %d", control.txseq);
2024
2025 chan->last_acked_seq = chan->buffer_seq;
2026 }
2027 }
2028
2029 static void l2cap_retransmit(struct l2cap_chan *chan,
2030 struct l2cap_ctrl *control)
2031 {
2032 BT_DBG("chan %p, control %p", chan, control);
2033
2034 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2035 l2cap_ertm_resend(chan);
2036 }
2037
2038 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2039 struct l2cap_ctrl *control)
2040 {
2041 struct sk_buff *skb;
2042
2043 BT_DBG("chan %p, control %p", chan, control);
2044
2045 if (control->poll)
2046 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2047
2048 l2cap_seq_list_clear(&chan->retrans_list);
2049
2050 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2051 return;
2052
2053 if (chan->unacked_frames) {
2054 skb_queue_walk(&chan->tx_q, skb) {
2055 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2056 skb == chan->tx_send_head)
2057 break;
2058 }
2059
2060 skb_queue_walk_from(&chan->tx_q, skb) {
2061 if (skb == chan->tx_send_head)
2062 break;
2063
2064 l2cap_seq_list_append(&chan->retrans_list,
2065 bt_cb(skb)->l2cap.txseq);
2066 }
2067
2068 l2cap_ertm_resend(chan);
2069 }
2070 }
2071
2072 static void l2cap_send_ack(struct l2cap_chan *chan)
2073 {
2074 struct l2cap_ctrl control;
2075 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2076 chan->last_acked_seq);
2077 int threshold;
2078
2079 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2080 chan, chan->last_acked_seq, chan->buffer_seq);
2081
2082 memset(&control, 0, sizeof(control));
2083 control.sframe = 1;
2084
2085 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2086 chan->rx_state == L2CAP_RX_STATE_RECV) {
2087 __clear_ack_timer(chan);
2088 control.super = L2CAP_SUPER_RNR;
2089 control.reqseq = chan->buffer_seq;
2090 l2cap_send_sframe(chan, &control);
2091 } else {
2092 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2093 l2cap_ertm_send(chan);
2094 /* If any i-frames were sent, they included an ack */
2095 if (chan->buffer_seq == chan->last_acked_seq)
2096 frames_to_ack = 0;
2097 }
2098
2099 /* Ack now if the window is 3/4ths full.
2100 * Calculate without mul or div
2101 */
2102 threshold = chan->ack_win;
2103 threshold += threshold << 1;
2104 threshold >>= 2;
2105
2106 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2107 threshold);
2108
2109 if (frames_to_ack >= threshold) {
2110 __clear_ack_timer(chan);
2111 control.super = L2CAP_SUPER_RR;
2112 control.reqseq = chan->buffer_seq;
2113 l2cap_send_sframe(chan, &control);
2114 frames_to_ack = 0;
2115 }
2116
2117 if (frames_to_ack)
2118 __set_ack_timer(chan);
2119 }
2120 }
2121
2122 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2123 struct msghdr *msg, int len,
2124 int count, struct sk_buff *skb)
2125 {
2126 struct l2cap_conn *conn = chan->conn;
2127 struct sk_buff **frag;
2128 int sent = 0;
2129
2130 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2131 return -EFAULT;
2132
2133 sent += count;
2134 len -= count;
2135
2136 /* Continuation fragments (no L2CAP header) */
2137 frag = &skb_shinfo(skb)->frag_list;
2138 while (len) {
2139 struct sk_buff *tmp;
2140
2141 count = min_t(unsigned int, conn->mtu, len);
2142
2143 tmp = chan->ops->alloc_skb(chan, 0, count,
2144 msg->msg_flags & MSG_DONTWAIT);
2145 if (IS_ERR(tmp))
2146 return PTR_ERR(tmp);
2147
2148 *frag = tmp;
2149
2150 if (!copy_from_iter_full(skb_put(*frag, count), count,
2151 &msg->msg_iter))
2152 return -EFAULT;
2153
2154 sent += count;
2155 len -= count;
2156
2157 skb->len += (*frag)->len;
2158 skb->data_len += (*frag)->len;
2159
2160 frag = &(*frag)->next;
2161 }
2162
2163 return sent;
2164 }
2165
2166 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2167 struct msghdr *msg, size_t len)
2168 {
2169 struct l2cap_conn *conn = chan->conn;
2170 struct sk_buff *skb;
2171 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2172 struct l2cap_hdr *lh;
2173
2174 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2175 __le16_to_cpu(chan->psm), len);
2176
2177 count = min_t(unsigned int, (conn->mtu - hlen), len);
2178
2179 skb = chan->ops->alloc_skb(chan, hlen, count,
2180 msg->msg_flags & MSG_DONTWAIT);
2181 if (IS_ERR(skb))
2182 return skb;
2183
2184 /* Create L2CAP header */
2185 lh = skb_put(skb, L2CAP_HDR_SIZE);
2186 lh->cid = cpu_to_le16(chan->dcid);
2187 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2188 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2189
2190 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2191 if (unlikely(err < 0)) {
2192 kfree_skb(skb);
2193 return ERR_PTR(err);
2194 }
2195 return skb;
2196 }
2197
2198 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2199 struct msghdr *msg, size_t len)
2200 {
2201 struct l2cap_conn *conn = chan->conn;
2202 struct sk_buff *skb;
2203 int err, count;
2204 struct l2cap_hdr *lh;
2205
2206 BT_DBG("chan %p len %zu", chan, len);
2207
2208 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2209
2210 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2211 msg->msg_flags & MSG_DONTWAIT);
2212 if (IS_ERR(skb))
2213 return skb;
2214
2215 /* Create L2CAP header */
2216 lh = skb_put(skb, L2CAP_HDR_SIZE);
2217 lh->cid = cpu_to_le16(chan->dcid);
2218 lh->len = cpu_to_le16(len);
2219
2220 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2221 if (unlikely(err < 0)) {
2222 kfree_skb(skb);
2223 return ERR_PTR(err);
2224 }
2225 return skb;
2226 }
2227
2228 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2229 struct msghdr *msg, size_t len,
2230 u16 sdulen)
2231 {
2232 struct l2cap_conn *conn = chan->conn;
2233 struct sk_buff *skb;
2234 int err, count, hlen;
2235 struct l2cap_hdr *lh;
2236
2237 BT_DBG("chan %p len %zu", chan, len);
2238
2239 if (!conn)
2240 return ERR_PTR(-ENOTCONN);
2241
2242 hlen = __ertm_hdr_size(chan);
2243
2244 if (sdulen)
2245 hlen += L2CAP_SDULEN_SIZE;
2246
2247 if (chan->fcs == L2CAP_FCS_CRC16)
2248 hlen += L2CAP_FCS_SIZE;
2249
2250 count = min_t(unsigned int, (conn->mtu - hlen), len);
2251
2252 skb = chan->ops->alloc_skb(chan, hlen, count,
2253 msg->msg_flags & MSG_DONTWAIT);
2254 if (IS_ERR(skb))
2255 return skb;
2256
2257 /* Create L2CAP header */
2258 lh = skb_put(skb, L2CAP_HDR_SIZE);
2259 lh->cid = cpu_to_le16(chan->dcid);
2260 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2261
2262 /* Control header is populated later */
2263 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2264 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2265 else
2266 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2267
2268 if (sdulen)
2269 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2270
2271 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 if (unlikely(err < 0)) {
2273 kfree_skb(skb);
2274 return ERR_PTR(err);
2275 }
2276
2277 bt_cb(skb)->l2cap.fcs = chan->fcs;
2278 bt_cb(skb)->l2cap.retries = 0;
2279 return skb;
2280 }
2281
2282 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2283 struct sk_buff_head *seg_queue,
2284 struct msghdr *msg, size_t len)
2285 {
2286 struct sk_buff *skb;
2287 u16 sdu_len;
2288 size_t pdu_len;
2289 u8 sar;
2290
2291 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2292
2293 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2294 * so fragmented skbs are not used. The HCI layer's handling
2295 * of fragmented skbs is not compatible with ERTM's queueing.
2296 */
2297
2298 /* PDU size is derived from the HCI MTU */
2299 pdu_len = chan->conn->mtu;
2300
2301 /* Constrain PDU size for BR/EDR connections */
2302 if (!chan->hs_hcon)
2303 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2304
2305 /* Adjust for largest possible L2CAP overhead. */
2306 if (chan->fcs)
2307 pdu_len -= L2CAP_FCS_SIZE;
2308
2309 pdu_len -= __ertm_hdr_size(chan);
2310
2311 /* Remote device may have requested smaller PDUs */
2312 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2313
2314 if (len <= pdu_len) {
2315 sar = L2CAP_SAR_UNSEGMENTED;
2316 sdu_len = 0;
2317 pdu_len = len;
2318 } else {
2319 sar = L2CAP_SAR_START;
2320 sdu_len = len;
2321 }
2322
2323 while (len > 0) {
2324 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2325
2326 if (IS_ERR(skb)) {
2327 __skb_queue_purge(seg_queue);
2328 return PTR_ERR(skb);
2329 }
2330
2331 bt_cb(skb)->l2cap.sar = sar;
2332 __skb_queue_tail(seg_queue, skb);
2333
2334 len -= pdu_len;
2335 if (sdu_len)
2336 sdu_len = 0;
2337
2338 if (len <= pdu_len) {
2339 sar = L2CAP_SAR_END;
2340 pdu_len = len;
2341 } else {
2342 sar = L2CAP_SAR_CONTINUE;
2343 }
2344 }
2345
2346 return 0;
2347 }
2348
2349 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2350 struct msghdr *msg,
2351 size_t len, u16 sdulen)
2352 {
2353 struct l2cap_conn *conn = chan->conn;
2354 struct sk_buff *skb;
2355 int err, count, hlen;
2356 struct l2cap_hdr *lh;
2357
2358 BT_DBG("chan %p len %zu", chan, len);
2359
2360 if (!conn)
2361 return ERR_PTR(-ENOTCONN);
2362
2363 hlen = L2CAP_HDR_SIZE;
2364
2365 if (sdulen)
2366 hlen += L2CAP_SDULEN_SIZE;
2367
2368 count = min_t(unsigned int, (conn->mtu - hlen), len);
2369
2370 skb = chan->ops->alloc_skb(chan, hlen, count,
2371 msg->msg_flags & MSG_DONTWAIT);
2372 if (IS_ERR(skb))
2373 return skb;
2374
2375 /* Create L2CAP header */
2376 lh = skb_put(skb, L2CAP_HDR_SIZE);
2377 lh->cid = cpu_to_le16(chan->dcid);
2378 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2379
2380 if (sdulen)
2381 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2382
2383 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2384 if (unlikely(err < 0)) {
2385 kfree_skb(skb);
2386 return ERR_PTR(err);
2387 }
2388
2389 return skb;
2390 }
2391
2392 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2393 struct sk_buff_head *seg_queue,
2394 struct msghdr *msg, size_t len)
2395 {
2396 struct sk_buff *skb;
2397 size_t pdu_len;
2398 u16 sdu_len;
2399
2400 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2401
2402 sdu_len = len;
2403 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2404
2405 while (len > 0) {
2406 if (len <= pdu_len)
2407 pdu_len = len;
2408
2409 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2410 if (IS_ERR(skb)) {
2411 __skb_queue_purge(seg_queue);
2412 return PTR_ERR(skb);
2413 }
2414
2415 __skb_queue_tail(seg_queue, skb);
2416
2417 len -= pdu_len;
2418
2419 if (sdu_len) {
2420 sdu_len = 0;
2421 pdu_len += L2CAP_SDULEN_SIZE;
2422 }
2423 }
2424
2425 return 0;
2426 }
2427
2428 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2429 {
2430 int sent = 0;
2431
2432 BT_DBG("chan %p", chan);
2433
2434 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2435 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2436 chan->tx_credits--;
2437 sent++;
2438 }
2439
2440 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2441 skb_queue_len(&chan->tx_q));
2442 }
2443
2444 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2445 {
2446 struct sk_buff *skb;
2447 int err;
2448 struct sk_buff_head seg_queue;
2449
2450 if (!chan->conn)
2451 return -ENOTCONN;
2452
2453 /* Connectionless channel */
2454 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2455 skb = l2cap_create_connless_pdu(chan, msg, len);
2456 if (IS_ERR(skb))
2457 return PTR_ERR(skb);
2458
2459 /* Channel lock is released before requesting new skb and then
2460 * reacquired thus we need to recheck channel state.
2461 */
2462 if (chan->state != BT_CONNECTED) {
2463 kfree_skb(skb);
2464 return -ENOTCONN;
2465 }
2466
2467 l2cap_do_send(chan, skb);
2468 return len;
2469 }
2470
2471 switch (chan->mode) {
2472 case L2CAP_MODE_LE_FLOWCTL:
2473 /* Check outgoing MTU */
2474 if (len > chan->omtu)
2475 return -EMSGSIZE;
2476
2477 __skb_queue_head_init(&seg_queue);
2478
2479 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2480
2481 if (chan->state != BT_CONNECTED) {
2482 __skb_queue_purge(&seg_queue);
2483 err = -ENOTCONN;
2484 }
2485
2486 if (err)
2487 return err;
2488
2489 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2490
2491 l2cap_le_flowctl_send(chan);
2492
2493 if (!chan->tx_credits)
2494 chan->ops->suspend(chan);
2495
2496 err = len;
2497
2498 break;
2499
2500 case L2CAP_MODE_BASIC:
2501 /* Check outgoing MTU */
2502 if (len > chan->omtu)
2503 return -EMSGSIZE;
2504
2505 /* Create a basic PDU */
2506 skb = l2cap_create_basic_pdu(chan, msg, len);
2507 if (IS_ERR(skb))
2508 return PTR_ERR(skb);
2509
2510 /* Channel lock is released before requesting new skb and then
2511 * reacquired thus we need to recheck channel state.
2512 */
2513 if (chan->state != BT_CONNECTED) {
2514 kfree_skb(skb);
2515 return -ENOTCONN;
2516 }
2517
2518 l2cap_do_send(chan, skb);
2519 err = len;
2520 break;
2521
2522 case L2CAP_MODE_ERTM:
2523 case L2CAP_MODE_STREAMING:
2524 /* Check outgoing MTU */
2525 if (len > chan->omtu) {
2526 err = -EMSGSIZE;
2527 break;
2528 }
2529
2530 __skb_queue_head_init(&seg_queue);
2531
2532 /* Do segmentation before calling in to the state machine,
2533 * since it's possible to block while waiting for memory
2534 * allocation.
2535 */
2536 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2537
2538 /* The channel could have been closed while segmenting,
2539 * check that it is still connected.
2540 */
2541 if (chan->state != BT_CONNECTED) {
2542 __skb_queue_purge(&seg_queue);
2543 err = -ENOTCONN;
2544 }
2545
2546 if (err)
2547 break;
2548
2549 if (chan->mode == L2CAP_MODE_ERTM)
2550 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2551 else
2552 l2cap_streaming_send(chan, &seg_queue);
2553
2554 err = len;
2555
2556 /* If the skbs were not queued for sending, they'll still be in
2557 * seg_queue and need to be purged.
2558 */
2559 __skb_queue_purge(&seg_queue);
2560 break;
2561
2562 default:
2563 BT_DBG("bad state %1.1x", chan->mode);
2564 err = -EBADFD;
2565 }
2566
2567 return err;
2568 }
2569 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2570
2571 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2572 {
2573 struct l2cap_ctrl control;
2574 u16 seq;
2575
2576 BT_DBG("chan %p, txseq %u", chan, txseq);
2577
2578 memset(&control, 0, sizeof(control));
2579 control.sframe = 1;
2580 control.super = L2CAP_SUPER_SREJ;
2581
2582 for (seq = chan->expected_tx_seq; seq != txseq;
2583 seq = __next_seq(chan, seq)) {
2584 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2585 control.reqseq = seq;
2586 l2cap_send_sframe(chan, &control);
2587 l2cap_seq_list_append(&chan->srej_list, seq);
2588 }
2589 }
2590
2591 chan->expected_tx_seq = __next_seq(chan, txseq);
2592 }
2593
2594 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2595 {
2596 struct l2cap_ctrl control;
2597
2598 BT_DBG("chan %p", chan);
2599
2600 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2601 return;
2602
2603 memset(&control, 0, sizeof(control));
2604 control.sframe = 1;
2605 control.super = L2CAP_SUPER_SREJ;
2606 control.reqseq = chan->srej_list.tail;
2607 l2cap_send_sframe(chan, &control);
2608 }
2609
2610 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2611 {
2612 struct l2cap_ctrl control;
2613 u16 initial_head;
2614 u16 seq;
2615
2616 BT_DBG("chan %p, txseq %u", chan, txseq);
2617
2618 memset(&control, 0, sizeof(control));
2619 control.sframe = 1;
2620 control.super = L2CAP_SUPER_SREJ;
2621
2622 /* Capture initial list head to allow only one pass through the list. */
2623 initial_head = chan->srej_list.head;
2624
2625 do {
2626 seq = l2cap_seq_list_pop(&chan->srej_list);
2627 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2628 break;
2629
2630 control.reqseq = seq;
2631 l2cap_send_sframe(chan, &control);
2632 l2cap_seq_list_append(&chan->srej_list, seq);
2633 } while (chan->srej_list.head != initial_head);
2634 }
2635
2636 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2637 {
2638 struct sk_buff *acked_skb;
2639 u16 ackseq;
2640
2641 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2642
2643 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2644 return;
2645
2646 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2647 chan->expected_ack_seq, chan->unacked_frames);
2648
2649 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2650 ackseq = __next_seq(chan, ackseq)) {
2651
2652 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2653 if (acked_skb) {
2654 skb_unlink(acked_skb, &chan->tx_q);
2655 kfree_skb(acked_skb);
2656 chan->unacked_frames--;
2657 }
2658 }
2659
2660 chan->expected_ack_seq = reqseq;
2661
2662 if (chan->unacked_frames == 0)
2663 __clear_retrans_timer(chan);
2664
2665 BT_DBG("unacked_frames %u", chan->unacked_frames);
2666 }
2667
2668 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2669 {
2670 BT_DBG("chan %p", chan);
2671
2672 chan->expected_tx_seq = chan->buffer_seq;
2673 l2cap_seq_list_clear(&chan->srej_list);
2674 skb_queue_purge(&chan->srej_q);
2675 chan->rx_state = L2CAP_RX_STATE_RECV;
2676 }
2677
2678 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2679 struct l2cap_ctrl *control,
2680 struct sk_buff_head *skbs, u8 event)
2681 {
2682 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2683 event);
2684
2685 switch (event) {
2686 case L2CAP_EV_DATA_REQUEST:
2687 if (chan->tx_send_head == NULL)
2688 chan->tx_send_head = skb_peek(skbs);
2689
2690 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2691 l2cap_ertm_send(chan);
2692 break;
2693 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2694 BT_DBG("Enter LOCAL_BUSY");
2695 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2696
2697 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2698 /* The SREJ_SENT state must be aborted if we are to
2699 * enter the LOCAL_BUSY state.
2700 */
2701 l2cap_abort_rx_srej_sent(chan);
2702 }
2703
2704 l2cap_send_ack(chan);
2705
2706 break;
2707 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2708 BT_DBG("Exit LOCAL_BUSY");
2709 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2710
2711 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2712 struct l2cap_ctrl local_control;
2713
2714 memset(&local_control, 0, sizeof(local_control));
2715 local_control.sframe = 1;
2716 local_control.super = L2CAP_SUPER_RR;
2717 local_control.poll = 1;
2718 local_control.reqseq = chan->buffer_seq;
2719 l2cap_send_sframe(chan, &local_control);
2720
2721 chan->retry_count = 1;
2722 __set_monitor_timer(chan);
2723 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2724 }
2725 break;
2726 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2727 l2cap_process_reqseq(chan, control->reqseq);
2728 break;
2729 case L2CAP_EV_EXPLICIT_POLL:
2730 l2cap_send_rr_or_rnr(chan, 1);
2731 chan->retry_count = 1;
2732 __set_monitor_timer(chan);
2733 __clear_ack_timer(chan);
2734 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2735 break;
2736 case L2CAP_EV_RETRANS_TO:
2737 l2cap_send_rr_or_rnr(chan, 1);
2738 chan->retry_count = 1;
2739 __set_monitor_timer(chan);
2740 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2741 break;
2742 case L2CAP_EV_RECV_FBIT:
2743 /* Nothing to process */
2744 break;
2745 default:
2746 break;
2747 }
2748 }
2749
2750 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2751 struct l2cap_ctrl *control,
2752 struct sk_buff_head *skbs, u8 event)
2753 {
2754 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2755 event);
2756
2757 switch (event) {
2758 case L2CAP_EV_DATA_REQUEST:
2759 if (chan->tx_send_head == NULL)
2760 chan->tx_send_head = skb_peek(skbs);
2761 /* Queue data, but don't send. */
2762 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2763 break;
2764 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2765 BT_DBG("Enter LOCAL_BUSY");
2766 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2767
2768 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2769 /* The SREJ_SENT state must be aborted if we are to
2770 * enter the LOCAL_BUSY state.
2771 */
2772 l2cap_abort_rx_srej_sent(chan);
2773 }
2774
2775 l2cap_send_ack(chan);
2776
2777 break;
2778 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2779 BT_DBG("Exit LOCAL_BUSY");
2780 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2781
2782 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2783 struct l2cap_ctrl local_control;
2784 memset(&local_control, 0, sizeof(local_control));
2785 local_control.sframe = 1;
2786 local_control.super = L2CAP_SUPER_RR;
2787 local_control.poll = 1;
2788 local_control.reqseq = chan->buffer_seq;
2789 l2cap_send_sframe(chan, &local_control);
2790
2791 chan->retry_count = 1;
2792 __set_monitor_timer(chan);
2793 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2794 }
2795 break;
2796 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2797 l2cap_process_reqseq(chan, control->reqseq);
2798
2799 /* Fall through */
2800
2801 case L2CAP_EV_RECV_FBIT:
2802 if (control && control->final) {
2803 __clear_monitor_timer(chan);
2804 if (chan->unacked_frames > 0)
2805 __set_retrans_timer(chan);
2806 chan->retry_count = 0;
2807 chan->tx_state = L2CAP_TX_STATE_XMIT;
2808 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2809 }
2810 break;
2811 case L2CAP_EV_EXPLICIT_POLL:
2812 /* Ignore */
2813 break;
2814 case L2CAP_EV_MONITOR_TO:
2815 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2816 l2cap_send_rr_or_rnr(chan, 1);
2817 __set_monitor_timer(chan);
2818 chan->retry_count++;
2819 } else {
2820 l2cap_send_disconn_req(chan, ECONNABORTED);
2821 }
2822 break;
2823 default:
2824 break;
2825 }
2826 }
2827
2828 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2829 struct sk_buff_head *skbs, u8 event)
2830 {
2831 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2832 chan, control, skbs, event, chan->tx_state);
2833
2834 switch (chan->tx_state) {
2835 case L2CAP_TX_STATE_XMIT:
2836 l2cap_tx_state_xmit(chan, control, skbs, event);
2837 break;
2838 case L2CAP_TX_STATE_WAIT_F:
2839 l2cap_tx_state_wait_f(chan, control, skbs, event);
2840 break;
2841 default:
2842 /* Ignore event */
2843 break;
2844 }
2845 }
2846
2847 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2848 struct l2cap_ctrl *control)
2849 {
2850 BT_DBG("chan %p, control %p", chan, control);
2851 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2852 }
2853
2854 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2855 struct l2cap_ctrl *control)
2856 {
2857 BT_DBG("chan %p, control %p", chan, control);
2858 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2859 }
2860
2861 /* Copy frame to all raw sockets on that connection */
2862 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2863 {
2864 struct sk_buff *nskb;
2865 struct l2cap_chan *chan;
2866
2867 BT_DBG("conn %p", conn);
2868
2869 mutex_lock(&conn->chan_lock);
2870
2871 list_for_each_entry(chan, &conn->chan_l, list) {
2872 if (chan->chan_type != L2CAP_CHAN_RAW)
2873 continue;
2874
2875 /* Don't send frame to the channel it came from */
2876 if (bt_cb(skb)->l2cap.chan == chan)
2877 continue;
2878
2879 nskb = skb_clone(skb, GFP_KERNEL);
2880 if (!nskb)
2881 continue;
2882 if (chan->ops->recv(chan, nskb))
2883 kfree_skb(nskb);
2884 }
2885
2886 mutex_unlock(&conn->chan_lock);
2887 }
2888
2889 /* ---- L2CAP signalling commands ---- */
2890 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2891 u8 ident, u16 dlen, void *data)
2892 {
2893 struct sk_buff *skb, **frag;
2894 struct l2cap_cmd_hdr *cmd;
2895 struct l2cap_hdr *lh;
2896 int len, count;
2897
2898 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2899 conn, code, ident, dlen);
2900
2901 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2902 return NULL;
2903
2904 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2905 count = min_t(unsigned int, conn->mtu, len);
2906
2907 skb = bt_skb_alloc(count, GFP_KERNEL);
2908 if (!skb)
2909 return NULL;
2910
2911 lh = skb_put(skb, L2CAP_HDR_SIZE);
2912 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2913
2914 if (conn->hcon->type == LE_LINK)
2915 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2916 else
2917 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2918
2919 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2920 cmd->code = code;
2921 cmd->ident = ident;
2922 cmd->len = cpu_to_le16(dlen);
2923
2924 if (dlen) {
2925 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2926 skb_put_data(skb, data, count);
2927 data += count;
2928 }
2929
2930 len -= skb->len;
2931
2932 /* Continuation fragments (no L2CAP header) */
2933 frag = &skb_shinfo(skb)->frag_list;
2934 while (len) {
2935 count = min_t(unsigned int, conn->mtu, len);
2936
2937 *frag = bt_skb_alloc(count, GFP_KERNEL);
2938 if (!*frag)
2939 goto fail;
2940
2941 skb_put_data(*frag, data, count);
2942
2943 len -= count;
2944 data += count;
2945
2946 frag = &(*frag)->next;
2947 }
2948
2949 return skb;
2950
2951 fail:
2952 kfree_skb(skb);
2953 return NULL;
2954 }
2955
2956 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2957 unsigned long *val)
2958 {
2959 struct l2cap_conf_opt *opt = *ptr;
2960 int len;
2961
2962 len = L2CAP_CONF_OPT_SIZE + opt->len;
2963 *ptr += len;
2964
2965 *type = opt->type;
2966 *olen = opt->len;
2967
2968 switch (opt->len) {
2969 case 1:
2970 *val = *((u8 *) opt->val);
2971 break;
2972
2973 case 2:
2974 *val = get_unaligned_le16(opt->val);
2975 break;
2976
2977 case 4:
2978 *val = get_unaligned_le32(opt->val);
2979 break;
2980
2981 default:
2982 *val = (unsigned long) opt->val;
2983 break;
2984 }
2985
2986 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2987 return len;
2988 }
2989
2990 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2991 {
2992 struct l2cap_conf_opt *opt = *ptr;
2993
2994 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2995
2996 opt->type = type;
2997 opt->len = len;
2998
2999 switch (len) {
3000 case 1:
3001 *((u8 *) opt->val) = val;
3002 break;
3003
3004 case 2:
3005 put_unaligned_le16(val, opt->val);
3006 break;
3007
3008 case 4:
3009 put_unaligned_le32(val, opt->val);
3010 break;
3011
3012 default:
3013 memcpy(opt->val, (void *) val, len);
3014 break;
3015 }
3016
3017 *ptr += L2CAP_CONF_OPT_SIZE + len;
3018 }
3019
3020 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3021 {
3022 struct l2cap_conf_efs efs;
3023
3024 switch (chan->mode) {
3025 case L2CAP_MODE_ERTM:
3026 efs.id = chan->local_id;
3027 efs.stype = chan->local_stype;
3028 efs.msdu = cpu_to_le16(chan->local_msdu);
3029 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3030 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3031 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3032 break;
3033
3034 case L2CAP_MODE_STREAMING:
3035 efs.id = 1;
3036 efs.stype = L2CAP_SERV_BESTEFFORT;
3037 efs.msdu = cpu_to_le16(chan->local_msdu);
3038 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3039 efs.acc_lat = 0;
3040 efs.flush_to = 0;
3041 break;
3042
3043 default:
3044 return;
3045 }
3046
3047 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3048 (unsigned long) &efs);
3049 }
3050
3051 static void l2cap_ack_timeout(struct work_struct *work)
3052 {
3053 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3054 ack_timer.work);
3055 u16 frames_to_ack;
3056
3057 BT_DBG("chan %p", chan);
3058
3059 l2cap_chan_lock(chan);
3060
3061 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3062 chan->last_acked_seq);
3063
3064 if (frames_to_ack)
3065 l2cap_send_rr_or_rnr(chan, 0);
3066
3067 l2cap_chan_unlock(chan);
3068 l2cap_chan_put(chan);
3069 }
3070
3071 int l2cap_ertm_init(struct l2cap_chan *chan)
3072 {
3073 int err;
3074
3075 chan->next_tx_seq = 0;
3076 chan->expected_tx_seq = 0;
3077 chan->expected_ack_seq = 0;
3078 chan->unacked_frames = 0;
3079 chan->buffer_seq = 0;
3080 chan->frames_sent = 0;
3081 chan->last_acked_seq = 0;
3082 chan->sdu = NULL;
3083 chan->sdu_last_frag = NULL;
3084 chan->sdu_len = 0;
3085
3086 skb_queue_head_init(&chan->tx_q);
3087
3088 chan->local_amp_id = AMP_ID_BREDR;
3089 chan->move_id = AMP_ID_BREDR;
3090 chan->move_state = L2CAP_MOVE_STABLE;
3091 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3092
3093 if (chan->mode != L2CAP_MODE_ERTM)
3094 return 0;
3095
3096 chan->rx_state = L2CAP_RX_STATE_RECV;
3097 chan->tx_state = L2CAP_TX_STATE_XMIT;
3098
3099 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3100 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3101 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3102
3103 skb_queue_head_init(&chan->srej_q);
3104
3105 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3106 if (err < 0)
3107 return err;
3108
3109 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3110 if (err < 0)
3111 l2cap_seq_list_free(&chan->srej_list);
3112
3113 return err;
3114 }
3115
3116 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3117 {
3118 switch (mode) {
3119 case L2CAP_MODE_STREAMING:
3120 case L2CAP_MODE_ERTM:
3121 if (l2cap_mode_supported(mode, remote_feat_mask))
3122 return mode;
3123 /* fall through */
3124 default:
3125 return L2CAP_MODE_BASIC;
3126 }
3127 }
3128
3129 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3130 {
3131 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3132 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3133 }
3134
3135 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3136 {
3137 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3138 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3139 }
3140
3141 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3142 struct l2cap_conf_rfc *rfc)
3143 {
3144 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3145 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3146
3147 /* Class 1 devices have must have ERTM timeouts
3148 * exceeding the Link Supervision Timeout. The
3149 * default Link Supervision Timeout for AMP
3150 * controllers is 10 seconds.
3151 *
3152 * Class 1 devices use 0xffffffff for their
3153 * best-effort flush timeout, so the clamping logic
3154 * will result in a timeout that meets the above
3155 * requirement. ERTM timeouts are 16-bit values, so
3156 * the maximum timeout is 65.535 seconds.
3157 */
3158
3159 /* Convert timeout to milliseconds and round */
3160 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3161
3162 /* This is the recommended formula for class 2 devices
3163 * that start ERTM timers when packets are sent to the
3164 * controller.
3165 */
3166 ertm_to = 3 * ertm_to + 500;
3167
3168 if (ertm_to > 0xffff)
3169 ertm_to = 0xffff;
3170
3171 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3172 rfc->monitor_timeout = rfc->retrans_timeout;
3173 } else {
3174 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3175 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3176 }
3177 }
3178
3179 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3180 {
3181 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3182 __l2cap_ews_supported(chan->conn)) {
3183 /* use extended control field */
3184 set_bit(FLAG_EXT_CTRL, &chan->flags);
3185 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3186 } else {
3187 chan->tx_win = min_t(u16, chan->tx_win,
3188 L2CAP_DEFAULT_TX_WINDOW);
3189 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3190 }
3191 chan->ack_win = chan->tx_win;
3192 }
3193
3194 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3195 {
3196 struct l2cap_conf_req *req = data;
3197 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3198 void *ptr = req->data;
3199 u16 size;
3200
3201 BT_DBG("chan %p", chan);
3202
3203 if (chan->num_conf_req || chan->num_conf_rsp)
3204 goto done;
3205
3206 switch (chan->mode) {
3207 case L2CAP_MODE_STREAMING:
3208 case L2CAP_MODE_ERTM:
3209 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3210 break;
3211
3212 if (__l2cap_efs_supported(chan->conn))
3213 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3214
3215 /* fall through */
3216 default:
3217 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3218 break;
3219 }
3220
3221 done:
3222 if (chan->imtu != L2CAP_DEFAULT_MTU)
3223 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3224
3225 switch (chan->mode) {
3226 case L2CAP_MODE_BASIC:
3227 if (disable_ertm)
3228 break;
3229
3230 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3231 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3232 break;
3233
3234 rfc.mode = L2CAP_MODE_BASIC;
3235 rfc.txwin_size = 0;
3236 rfc.max_transmit = 0;
3237 rfc.retrans_timeout = 0;
3238 rfc.monitor_timeout = 0;
3239 rfc.max_pdu_size = 0;
3240
3241 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3242 (unsigned long) &rfc);
3243 break;
3244
3245 case L2CAP_MODE_ERTM:
3246 rfc.mode = L2CAP_MODE_ERTM;
3247 rfc.max_transmit = chan->max_tx;
3248
3249 __l2cap_set_ertm_timeouts(chan, &rfc);
3250
3251 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3252 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3253 L2CAP_FCS_SIZE);
3254 rfc.max_pdu_size = cpu_to_le16(size);
3255
3256 l2cap_txwin_setup(chan);
3257
3258 rfc.txwin_size = min_t(u16, chan->tx_win,
3259 L2CAP_DEFAULT_TX_WINDOW);
3260
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 (unsigned long) &rfc);
3263
3264 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3265 l2cap_add_opt_efs(&ptr, chan);
3266
3267 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3269 chan->tx_win);
3270
3271 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3272 if (chan->fcs == L2CAP_FCS_NONE ||
3273 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3274 chan->fcs = L2CAP_FCS_NONE;
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3276 chan->fcs);
3277 }
3278 break;
3279
3280 case L2CAP_MODE_STREAMING:
3281 l2cap_txwin_setup(chan);
3282 rfc.mode = L2CAP_MODE_STREAMING;
3283 rfc.txwin_size = 0;
3284 rfc.max_transmit = 0;
3285 rfc.retrans_timeout = 0;
3286 rfc.monitor_timeout = 0;
3287
3288 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3289 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3290 L2CAP_FCS_SIZE);
3291 rfc.max_pdu_size = cpu_to_le16(size);
3292
3293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3294 (unsigned long) &rfc);
3295
3296 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3297 l2cap_add_opt_efs(&ptr, chan);
3298
3299 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3300 if (chan->fcs == L2CAP_FCS_NONE ||
3301 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3302 chan->fcs = L2CAP_FCS_NONE;
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3304 chan->fcs);
3305 }
3306 break;
3307 }
3308
3309 req->dcid = cpu_to_le16(chan->dcid);
3310 req->flags = cpu_to_le16(0);
3311
3312 return ptr - data;
3313 }
3314
3315 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3316 {
3317 struct l2cap_conf_rsp *rsp = data;
3318 void *ptr = rsp->data;
3319 void *req = chan->conf_req;
3320 int len = chan->conf_len;
3321 int type, hint, olen;
3322 unsigned long val;
3323 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3324 struct l2cap_conf_efs efs;
3325 u8 remote_efs = 0;
3326 u16 mtu = L2CAP_DEFAULT_MTU;
3327 u16 result = L2CAP_CONF_SUCCESS;
3328 u16 size;
3329
3330 BT_DBG("chan %p", chan);
3331
3332 while (len >= L2CAP_CONF_OPT_SIZE) {
3333 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3334
3335 hint = type & L2CAP_CONF_HINT;
3336 type &= L2CAP_CONF_MASK;
3337
3338 switch (type) {
3339 case L2CAP_CONF_MTU:
3340 mtu = val;
3341 break;
3342
3343 case L2CAP_CONF_FLUSH_TO:
3344 chan->flush_to = val;
3345 break;
3346
3347 case L2CAP_CONF_QOS:
3348 break;
3349
3350 case L2CAP_CONF_RFC:
3351 if (olen == sizeof(rfc))
3352 memcpy(&rfc, (void *) val, olen);
3353 break;
3354
3355 case L2CAP_CONF_FCS:
3356 if (val == L2CAP_FCS_NONE)
3357 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3358 break;
3359
3360 case L2CAP_CONF_EFS:
3361 remote_efs = 1;
3362 if (olen == sizeof(efs))
3363 memcpy(&efs, (void *) val, olen);
3364 break;
3365
3366 case L2CAP_CONF_EWS:
3367 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3368 return -ECONNREFUSED;
3369
3370 set_bit(FLAG_EXT_CTRL, &chan->flags);
3371 set_bit(CONF_EWS_RECV, &chan->conf_state);
3372 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3373 chan->remote_tx_win = val;
3374 break;
3375
3376 default:
3377 if (hint)
3378 break;
3379
3380 result = L2CAP_CONF_UNKNOWN;
3381 *((u8 *) ptr++) = type;
3382 break;
3383 }
3384 }
3385
3386 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3387 goto done;
3388
3389 switch (chan->mode) {
3390 case L2CAP_MODE_STREAMING:
3391 case L2CAP_MODE_ERTM:
3392 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3393 chan->mode = l2cap_select_mode(rfc.mode,
3394 chan->conn->feat_mask);
3395 break;
3396 }
3397
3398 if (remote_efs) {
3399 if (__l2cap_efs_supported(chan->conn))
3400 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3401 else
3402 return -ECONNREFUSED;
3403 }
3404
3405 if (chan->mode != rfc.mode)
3406 return -ECONNREFUSED;
3407
3408 break;
3409 }
3410
3411 done:
3412 if (chan->mode != rfc.mode) {
3413 result = L2CAP_CONF_UNACCEPT;
3414 rfc.mode = chan->mode;
3415
3416 if (chan->num_conf_rsp == 1)
3417 return -ECONNREFUSED;
3418
3419 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3420 (unsigned long) &rfc);
3421 }
3422
3423 if (result == L2CAP_CONF_SUCCESS) {
3424 /* Configure output options and let the other side know
3425 * which ones we don't like. */
3426
3427 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3428 result = L2CAP_CONF_UNACCEPT;
3429 else {
3430 chan->omtu = mtu;
3431 set_bit(CONF_MTU_DONE, &chan->conf_state);
3432 }
3433 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3434
3435 if (remote_efs) {
3436 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3437 efs.stype != L2CAP_SERV_NOTRAFIC &&
3438 efs.stype != chan->local_stype) {
3439
3440 result = L2CAP_CONF_UNACCEPT;
3441
3442 if (chan->num_conf_req >= 1)
3443 return -ECONNREFUSED;
3444
3445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3446 sizeof(efs),
3447 (unsigned long) &efs);
3448 } else {
3449 /* Send PENDING Conf Rsp */
3450 result = L2CAP_CONF_PENDING;
3451 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3452 }
3453 }
3454
3455 switch (rfc.mode) {
3456 case L2CAP_MODE_BASIC:
3457 chan->fcs = L2CAP_FCS_NONE;
3458 set_bit(CONF_MODE_DONE, &chan->conf_state);
3459 break;
3460
3461 case L2CAP_MODE_ERTM:
3462 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3463 chan->remote_tx_win = rfc.txwin_size;
3464 else
3465 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3466
3467 chan->remote_max_tx = rfc.max_transmit;
3468
3469 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3470 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3471 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3472 rfc.max_pdu_size = cpu_to_le16(size);
3473 chan->remote_mps = size;
3474
3475 __l2cap_set_ertm_timeouts(chan, &rfc);
3476
3477 set_bit(CONF_MODE_DONE, &chan->conf_state);
3478
3479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3480 sizeof(rfc), (unsigned long) &rfc);
3481
3482 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3483 chan->remote_id = efs.id;
3484 chan->remote_stype = efs.stype;
3485 chan->remote_msdu = le16_to_cpu(efs.msdu);
3486 chan->remote_flush_to =
3487 le32_to_cpu(efs.flush_to);
3488 chan->remote_acc_lat =
3489 le32_to_cpu(efs.acc_lat);
3490 chan->remote_sdu_itime =
3491 le32_to_cpu(efs.sdu_itime);
3492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3493 sizeof(efs),
3494 (unsigned long) &efs);
3495 }
3496 break;
3497
3498 case L2CAP_MODE_STREAMING:
3499 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3500 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3501 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3502 rfc.max_pdu_size = cpu_to_le16(size);
3503 chan->remote_mps = size;
3504
3505 set_bit(CONF_MODE_DONE, &chan->conf_state);
3506
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3508 (unsigned long) &rfc);
3509
3510 break;
3511
3512 default:
3513 result = L2CAP_CONF_UNACCEPT;
3514
3515 memset(&rfc, 0, sizeof(rfc));
3516 rfc.mode = chan->mode;
3517 }
3518
3519 if (result == L2CAP_CONF_SUCCESS)
3520 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3521 }
3522 rsp->scid = cpu_to_le16(chan->dcid);
3523 rsp->result = cpu_to_le16(result);
3524 rsp->flags = cpu_to_le16(0);
3525
3526 return ptr - data;
3527 }
3528
3529 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3530 void *data, u16 *result)
3531 {
3532 struct l2cap_conf_req *req = data;
3533 void *ptr = req->data;
3534 int type, olen;
3535 unsigned long val;
3536 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3537 struct l2cap_conf_efs efs;
3538
3539 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3540
3541 while (len >= L2CAP_CONF_OPT_SIZE) {
3542 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3543
3544 switch (type) {
3545 case L2CAP_CONF_MTU:
3546 if (val < L2CAP_DEFAULT_MIN_MTU) {
3547 *result = L2CAP_CONF_UNACCEPT;
3548 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3549 } else
3550 chan->imtu = val;
3551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3552 break;
3553
3554 case L2CAP_CONF_FLUSH_TO:
3555 chan->flush_to = val;
3556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3557 2, chan->flush_to);
3558 break;
3559
3560 case L2CAP_CONF_RFC:
3561 if (olen == sizeof(rfc))
3562 memcpy(&rfc, (void *)val, olen);
3563
3564 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3565 rfc.mode != chan->mode)
3566 return -ECONNREFUSED;
3567
3568 chan->fcs = 0;
3569
3570 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3571 sizeof(rfc), (unsigned long) &rfc);
3572 break;
3573
3574 case L2CAP_CONF_EWS:
3575 chan->ack_win = min_t(u16, val, chan->ack_win);
3576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3577 chan->tx_win);
3578 break;
3579
3580 case L2CAP_CONF_EFS:
3581 if (olen == sizeof(efs))
3582 memcpy(&efs, (void *)val, olen);
3583
3584 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3585 efs.stype != L2CAP_SERV_NOTRAFIC &&
3586 efs.stype != chan->local_stype)
3587 return -ECONNREFUSED;
3588
3589 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3590 (unsigned long) &efs);
3591 break;
3592
3593 case L2CAP_CONF_FCS:
3594 if (*result == L2CAP_CONF_PENDING)
3595 if (val == L2CAP_FCS_NONE)
3596 set_bit(CONF_RECV_NO_FCS,
3597 &chan->conf_state);
3598 break;
3599 }
3600 }
3601
3602 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3603 return -ECONNREFUSED;
3604
3605 chan->mode = rfc.mode;
3606
3607 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3608 switch (rfc.mode) {
3609 case L2CAP_MODE_ERTM:
3610 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3611 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3612 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3613 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3614 chan->ack_win = min_t(u16, chan->ack_win,
3615 rfc.txwin_size);
3616
3617 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3618 chan->local_msdu = le16_to_cpu(efs.msdu);
3619 chan->local_sdu_itime =
3620 le32_to_cpu(efs.sdu_itime);
3621 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3622 chan->local_flush_to =
3623 le32_to_cpu(efs.flush_to);
3624 }
3625 break;
3626
3627 case L2CAP_MODE_STREAMING:
3628 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3629 }
3630 }
3631
3632 req->dcid = cpu_to_le16(chan->dcid);
3633 req->flags = cpu_to_le16(0);
3634
3635 return ptr - data;
3636 }
3637
3638 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3639 u16 result, u16 flags)
3640 {
3641 struct l2cap_conf_rsp *rsp = data;
3642 void *ptr = rsp->data;
3643
3644 BT_DBG("chan %p", chan);
3645
3646 rsp->scid = cpu_to_le16(chan->dcid);
3647 rsp->result = cpu_to_le16(result);
3648 rsp->flags = cpu_to_le16(flags);
3649
3650 return ptr - data;
3651 }
3652
3653 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3654 {
3655 struct l2cap_le_conn_rsp rsp;
3656 struct l2cap_conn *conn = chan->conn;
3657
3658 BT_DBG("chan %p", chan);
3659
3660 rsp.dcid = cpu_to_le16(chan->scid);
3661 rsp.mtu = cpu_to_le16(chan->imtu);
3662 rsp.mps = cpu_to_le16(chan->mps);
3663 rsp.credits = cpu_to_le16(chan->rx_credits);
3664 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3665
3666 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3667 &rsp);
3668 }
3669
3670 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3671 {
3672 struct l2cap_conn_rsp rsp;
3673 struct l2cap_conn *conn = chan->conn;
3674 u8 buf[128];
3675 u8 rsp_code;
3676
3677 rsp.scid = cpu_to_le16(chan->dcid);
3678 rsp.dcid = cpu_to_le16(chan->scid);
3679 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3680 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3681
3682 if (chan->hs_hcon)
3683 rsp_code = L2CAP_CREATE_CHAN_RSP;
3684 else
3685 rsp_code = L2CAP_CONN_RSP;
3686
3687 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3688
3689 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3690
3691 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3692 return;
3693
3694 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3695 l2cap_build_conf_req(chan, buf), buf);
3696 chan->num_conf_req++;
3697 }
3698
3699 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3700 {
3701 int type, olen;
3702 unsigned long val;
3703 /* Use sane default values in case a misbehaving remote device
3704 * did not send an RFC or extended window size option.
3705 */
3706 u16 txwin_ext = chan->ack_win;
3707 struct l2cap_conf_rfc rfc = {
3708 .mode = chan->mode,
3709 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3710 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3711 .max_pdu_size = cpu_to_le16(chan->imtu),
3712 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3713 };
3714
3715 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3716
3717 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3718 return;
3719
3720 while (len >= L2CAP_CONF_OPT_SIZE) {
3721 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3722
3723 switch (type) {
3724 case L2CAP_CONF_RFC:
3725 if (olen == sizeof(rfc))
3726 memcpy(&rfc, (void *)val, olen);
3727 break;
3728 case L2CAP_CONF_EWS:
3729 txwin_ext = val;
3730 break;
3731 }
3732 }
3733
3734 switch (rfc.mode) {
3735 case L2CAP_MODE_ERTM:
3736 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3737 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3738 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3739 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3740 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3741 else
3742 chan->ack_win = min_t(u16, chan->ack_win,
3743 rfc.txwin_size);
3744 break;
3745 case L2CAP_MODE_STREAMING:
3746 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3747 }
3748 }
3749
3750 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3751 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3752 u8 *data)
3753 {
3754 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3755
3756 if (cmd_len < sizeof(*rej))
3757 return -EPROTO;
3758
3759 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3760 return 0;
3761
3762 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3763 cmd->ident == conn->info_ident) {
3764 cancel_delayed_work(&conn->info_timer);
3765
3766 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3767 conn->info_ident = 0;
3768
3769 l2cap_conn_start(conn);
3770 }
3771
3772 return 0;
3773 }
3774
3775 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3776 struct l2cap_cmd_hdr *cmd,
3777 u8 *data, u8 rsp_code, u8 amp_id)
3778 {
3779 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3780 struct l2cap_conn_rsp rsp;
3781 struct l2cap_chan *chan = NULL, *pchan;
3782 int result, status = L2CAP_CS_NO_INFO;
3783
3784 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3785 __le16 psm = req->psm;
3786
3787 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3788
3789 /* Check if we have socket listening on psm */
3790 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3791 &conn->hcon->dst, ACL_LINK);
3792 if (!pchan) {
3793 result = L2CAP_CR_BAD_PSM;
3794 goto sendresp;
3795 }
3796
3797 mutex_lock(&conn->chan_lock);
3798 l2cap_chan_lock(pchan);
3799
3800 /* Check if the ACL is secure enough (if not SDP) */
3801 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3802 !hci_conn_check_link_mode(conn->hcon)) {
3803 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3804 result = L2CAP_CR_SEC_BLOCK;
3805 goto response;
3806 }
3807
3808 result = L2CAP_CR_NO_MEM;
3809
3810 /* Check if we already have channel with that dcid */
3811 if (__l2cap_get_chan_by_dcid(conn, scid))
3812 goto response;
3813
3814 chan = pchan->ops->new_connection(pchan);
3815 if (!chan)
3816 goto response;
3817
3818 /* For certain devices (ex: HID mouse), support for authentication,
3819 * pairing and bonding is optional. For such devices, inorder to avoid
3820 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3821 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3822 */
3823 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3824
3825 bacpy(&chan->src, &conn->hcon->src);
3826 bacpy(&chan->dst, &conn->hcon->dst);
3827 chan->src_type = bdaddr_src_type(conn->hcon);
3828 chan->dst_type = bdaddr_dst_type(conn->hcon);
3829 chan->psm = psm;
3830 chan->dcid = scid;
3831 chan->local_amp_id = amp_id;
3832
3833 __l2cap_chan_add(conn, chan);
3834
3835 dcid = chan->scid;
3836
3837 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3838
3839 chan->ident = cmd->ident;
3840
3841 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3842 if (l2cap_chan_check_security(chan, false)) {
3843 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3844 l2cap_state_change(chan, BT_CONNECT2);
3845 result = L2CAP_CR_PEND;
3846 status = L2CAP_CS_AUTHOR_PEND;
3847 chan->ops->defer(chan);
3848 } else {
3849 /* Force pending result for AMP controllers.
3850 * The connection will succeed after the
3851 * physical link is up.
3852 */
3853 if (amp_id == AMP_ID_BREDR) {
3854 l2cap_state_change(chan, BT_CONFIG);
3855 result = L2CAP_CR_SUCCESS;
3856 } else {
3857 l2cap_state_change(chan, BT_CONNECT2);
3858 result = L2CAP_CR_PEND;
3859 }
3860 status = L2CAP_CS_NO_INFO;
3861 }
3862 } else {
3863 l2cap_state_change(chan, BT_CONNECT2);
3864 result = L2CAP_CR_PEND;
3865 status = L2CAP_CS_AUTHEN_PEND;
3866 }
3867 } else {
3868 l2cap_state_change(chan, BT_CONNECT2);
3869 result = L2CAP_CR_PEND;
3870 status = L2CAP_CS_NO_INFO;
3871 }
3872
3873 response:
3874 l2cap_chan_unlock(pchan);
3875 mutex_unlock(&conn->chan_lock);
3876 l2cap_chan_put(pchan);
3877
3878 sendresp:
3879 rsp.scid = cpu_to_le16(scid);
3880 rsp.dcid = cpu_to_le16(dcid);
3881 rsp.result = cpu_to_le16(result);
3882 rsp.status = cpu_to_le16(status);
3883 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3884
3885 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3886 struct l2cap_info_req info;
3887 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3888
3889 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3890 conn->info_ident = l2cap_get_ident(conn);
3891
3892 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3893
3894 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3895 sizeof(info), &info);
3896 }
3897
3898 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3899 result == L2CAP_CR_SUCCESS) {
3900 u8 buf[128];
3901 set_bit(CONF_REQ_SENT, &chan->conf_state);
3902 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3903 l2cap_build_conf_req(chan, buf), buf);
3904 chan->num_conf_req++;
3905 }
3906
3907 return chan;
3908 }
3909
3910 static int l2cap_connect_req(struct l2cap_conn *conn,
3911 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3912 {
3913 struct hci_dev *hdev = conn->hcon->hdev;
3914 struct hci_conn *hcon = conn->hcon;
3915
3916 if (cmd_len < sizeof(struct l2cap_conn_req))
3917 return -EPROTO;
3918
3919 hci_dev_lock(hdev);
3920 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3921 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3922 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3923 hci_dev_unlock(hdev);
3924
3925 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3926 return 0;
3927 }
3928
3929 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3930 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3931 u8 *data)
3932 {
3933 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3934 u16 scid, dcid, result, status;
3935 struct l2cap_chan *chan;
3936 u8 req[128];
3937 int err;
3938
3939 if (cmd_len < sizeof(*rsp))
3940 return -EPROTO;
3941
3942 scid = __le16_to_cpu(rsp->scid);
3943 dcid = __le16_to_cpu(rsp->dcid);
3944 result = __le16_to_cpu(rsp->result);
3945 status = __le16_to_cpu(rsp->status);
3946
3947 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3948 dcid, scid, result, status);
3949
3950 mutex_lock(&conn->chan_lock);
3951
3952 if (scid) {
3953 chan = __l2cap_get_chan_by_scid(conn, scid);
3954 if (!chan) {
3955 err = -EBADSLT;
3956 goto unlock;
3957 }
3958 } else {
3959 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3960 if (!chan) {
3961 err = -EBADSLT;
3962 goto unlock;
3963 }
3964 }
3965
3966 err = 0;
3967
3968 l2cap_chan_lock(chan);
3969
3970 switch (result) {
3971 case L2CAP_CR_SUCCESS:
3972 l2cap_state_change(chan, BT_CONFIG);
3973 chan->ident = 0;
3974 chan->dcid = dcid;
3975 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3976
3977 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3978 break;
3979
3980 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3981 l2cap_build_conf_req(chan, req), req);
3982 chan->num_conf_req++;
3983 break;
3984
3985 case L2CAP_CR_PEND:
3986 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3987 break;
3988
3989 default:
3990 l2cap_chan_del(chan, ECONNREFUSED);
3991 break;
3992 }
3993
3994 l2cap_chan_unlock(chan);
3995
3996 unlock:
3997 mutex_unlock(&conn->chan_lock);
3998
3999 return err;
4000 }
4001
4002 static inline void set_default_fcs(struct l2cap_chan *chan)
4003 {
4004 /* FCS is enabled only in ERTM or streaming mode, if one or both
4005 * sides request it.
4006 */
4007 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4008 chan->fcs = L2CAP_FCS_NONE;
4009 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4010 chan->fcs = L2CAP_FCS_CRC16;
4011 }
4012
4013 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4014 u8 ident, u16 flags)
4015 {
4016 struct l2cap_conn *conn = chan->conn;
4017
4018 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4019 flags);
4020
4021 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4022 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4023
4024 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4025 l2cap_build_conf_rsp(chan, data,
4026 L2CAP_CONF_SUCCESS, flags), data);
4027 }
4028
4029 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4030 u16 scid, u16 dcid)
4031 {
4032 struct l2cap_cmd_rej_cid rej;
4033
4034 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4035 rej.scid = __cpu_to_le16(scid);
4036 rej.dcid = __cpu_to_le16(dcid);
4037
4038 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4039 }
4040
4041 static inline int l2cap_config_req(struct l2cap_conn *conn,
4042 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4043 u8 *data)
4044 {
4045 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4046 u16 dcid, flags;
4047 u8 rsp[64];
4048 struct l2cap_chan *chan;
4049 int len, err = 0;
4050
4051 if (cmd_len < sizeof(*req))
4052 return -EPROTO;
4053
4054 dcid = __le16_to_cpu(req->dcid);
4055 flags = __le16_to_cpu(req->flags);
4056
4057 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4058
4059 chan = l2cap_get_chan_by_scid(conn, dcid);
4060 if (!chan) {
4061 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4062 return 0;
4063 }
4064
4065 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4066 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4067 chan->dcid);
4068 goto unlock;
4069 }
4070
4071 /* Reject if config buffer is too small. */
4072 len = cmd_len - sizeof(*req);
4073 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4074 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4075 l2cap_build_conf_rsp(chan, rsp,
4076 L2CAP_CONF_REJECT, flags), rsp);
4077 goto unlock;
4078 }
4079
4080 /* Store config. */
4081 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4082 chan->conf_len += len;
4083
4084 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4085 /* Incomplete config. Send empty response. */
4086 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4087 l2cap_build_conf_rsp(chan, rsp,
4088 L2CAP_CONF_SUCCESS, flags), rsp);
4089 goto unlock;
4090 }
4091
4092 /* Complete config. */
4093 len = l2cap_parse_conf_req(chan, rsp);
4094 if (len < 0) {
4095 l2cap_send_disconn_req(chan, ECONNRESET);
4096 goto unlock;
4097 }
4098
4099 chan->ident = cmd->ident;
4100 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4101 chan->num_conf_rsp++;
4102
4103 /* Reset config buffer. */
4104 chan->conf_len = 0;
4105
4106 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4107 goto unlock;
4108
4109 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4110 set_default_fcs(chan);
4111
4112 if (chan->mode == L2CAP_MODE_ERTM ||
4113 chan->mode == L2CAP_MODE_STREAMING)
4114 err = l2cap_ertm_init(chan);
4115
4116 if (err < 0)
4117 l2cap_send_disconn_req(chan, -err);
4118 else
4119 l2cap_chan_ready(chan);
4120
4121 goto unlock;
4122 }
4123
4124 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4125 u8 buf[64];
4126 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4127 l2cap_build_conf_req(chan, buf), buf);
4128 chan->num_conf_req++;
4129 }
4130
4131 /* Got Conf Rsp PENDING from remote side and assume we sent
4132 Conf Rsp PENDING in the code above */
4133 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4134 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4135
4136 /* check compatibility */
4137
4138 /* Send rsp for BR/EDR channel */
4139 if (!chan->hs_hcon)
4140 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4141 else
4142 chan->ident = cmd->ident;
4143 }
4144
4145 unlock:
4146 l2cap_chan_unlock(chan);
4147 return err;
4148 }
4149
4150 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4151 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4152 u8 *data)
4153 {
4154 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4155 u16 scid, flags, result;
4156 struct l2cap_chan *chan;
4157 int len = cmd_len - sizeof(*rsp);
4158 int err = 0;
4159
4160 if (cmd_len < sizeof(*rsp))
4161 return -EPROTO;
4162
4163 scid = __le16_to_cpu(rsp->scid);
4164 flags = __le16_to_cpu(rsp->flags);
4165 result = __le16_to_cpu(rsp->result);
4166
4167 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4168 result, len);
4169
4170 chan = l2cap_get_chan_by_scid(conn, scid);
4171 if (!chan)
4172 return 0;
4173
4174 switch (result) {
4175 case L2CAP_CONF_SUCCESS:
4176 l2cap_conf_rfc_get(chan, rsp->data, len);
4177 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4178 break;
4179
4180 case L2CAP_CONF_PENDING:
4181 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4182
4183 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4184 char buf[64];
4185
4186 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4187 buf, &result);
4188 if (len < 0) {
4189 l2cap_send_disconn_req(chan, ECONNRESET);
4190 goto done;
4191 }
4192
4193 if (!chan->hs_hcon) {
4194 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4195 0);
4196 } else {
4197 if (l2cap_check_efs(chan)) {
4198 amp_create_logical_link(chan);
4199 chan->ident = cmd->ident;
4200 }
4201 }
4202 }
4203 goto done;
4204
4205 case L2CAP_CONF_UNACCEPT:
4206 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4207 char req[64];
4208
4209 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4210 l2cap_send_disconn_req(chan, ECONNRESET);
4211 goto done;
4212 }
4213
4214 /* throw out any old stored conf requests */
4215 result = L2CAP_CONF_SUCCESS;
4216 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4217 req, &result);
4218 if (len < 0) {
4219 l2cap_send_disconn_req(chan, ECONNRESET);
4220 goto done;
4221 }
4222
4223 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4224 L2CAP_CONF_REQ, len, req);
4225 chan->num_conf_req++;
4226 if (result != L2CAP_CONF_SUCCESS)
4227 goto done;
4228 break;
4229 }
4230
4231 default:
4232 l2cap_chan_set_err(chan, ECONNRESET);
4233
4234 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4235 l2cap_send_disconn_req(chan, ECONNRESET);
4236 goto done;
4237 }
4238
4239 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4240 goto done;
4241
4242 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4243
4244 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4245 set_default_fcs(chan);
4246
4247 if (chan->mode == L2CAP_MODE_ERTM ||
4248 chan->mode == L2CAP_MODE_STREAMING)
4249 err = l2cap_ertm_init(chan);
4250
4251 if (err < 0)
4252 l2cap_send_disconn_req(chan, -err);
4253 else
4254 l2cap_chan_ready(chan);
4255 }
4256
4257 done:
4258 l2cap_chan_unlock(chan);
4259 return err;
4260 }
4261
4262 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4263 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4264 u8 *data)
4265 {
4266 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4267 struct l2cap_disconn_rsp rsp;
4268 u16 dcid, scid;
4269 struct l2cap_chan *chan;
4270
4271 if (cmd_len != sizeof(*req))
4272 return -EPROTO;
4273
4274 scid = __le16_to_cpu(req->scid);
4275 dcid = __le16_to_cpu(req->dcid);
4276
4277 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4278
4279 mutex_lock(&conn->chan_lock);
4280
4281 chan = __l2cap_get_chan_by_scid(conn, dcid);
4282 if (!chan) {
4283 mutex_unlock(&conn->chan_lock);
4284 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4285 return 0;
4286 }
4287
4288 l2cap_chan_lock(chan);
4289
4290 rsp.dcid = cpu_to_le16(chan->scid);
4291 rsp.scid = cpu_to_le16(chan->dcid);
4292 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4293
4294 chan->ops->set_shutdown(chan);
4295
4296 l2cap_chan_hold(chan);
4297 l2cap_chan_del(chan, ECONNRESET);
4298
4299 l2cap_chan_unlock(chan);
4300
4301 chan->ops->close(chan);
4302 l2cap_chan_put(chan);
4303
4304 mutex_unlock(&conn->chan_lock);
4305
4306 return 0;
4307 }
4308
4309 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4310 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4311 u8 *data)
4312 {
4313 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4314 u16 dcid, scid;
4315 struct l2cap_chan *chan;
4316
4317 if (cmd_len != sizeof(*rsp))
4318 return -EPROTO;
4319
4320 scid = __le16_to_cpu(rsp->scid);
4321 dcid = __le16_to_cpu(rsp->dcid);
4322
4323 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4324
4325 mutex_lock(&conn->chan_lock);
4326
4327 chan = __l2cap_get_chan_by_scid(conn, scid);
4328 if (!chan) {
4329 mutex_unlock(&conn->chan_lock);
4330 return 0;
4331 }
4332
4333 l2cap_chan_lock(chan);
4334
4335 l2cap_chan_hold(chan);
4336 l2cap_chan_del(chan, 0);
4337
4338 l2cap_chan_unlock(chan);
4339
4340 chan->ops->close(chan);
4341 l2cap_chan_put(chan);
4342
4343 mutex_unlock(&conn->chan_lock);
4344
4345 return 0;
4346 }
4347
4348 static inline int l2cap_information_req(struct l2cap_conn *conn,
4349 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4350 u8 *data)
4351 {
4352 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4353 u16 type;
4354
4355 if (cmd_len != sizeof(*req))
4356 return -EPROTO;
4357
4358 type = __le16_to_cpu(req->type);
4359
4360 BT_DBG("type 0x%4.4x", type);
4361
4362 if (type == L2CAP_IT_FEAT_MASK) {
4363 u8 buf[8];
4364 u32 feat_mask = l2cap_feat_mask;
4365 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4366 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4367 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4368 if (!disable_ertm)
4369 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4370 | L2CAP_FEAT_FCS;
4371 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4372 feat_mask |= L2CAP_FEAT_EXT_FLOW
4373 | L2CAP_FEAT_EXT_WINDOW;
4374
4375 put_unaligned_le32(feat_mask, rsp->data);
4376 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4377 buf);
4378 } else if (type == L2CAP_IT_FIXED_CHAN) {
4379 u8 buf[12];
4380 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4381
4382 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4383 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4384 rsp->data[0] = conn->local_fixed_chan;
4385 memset(rsp->data + 1, 0, 7);
4386 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4387 buf);
4388 } else {
4389 struct l2cap_info_rsp rsp;
4390 rsp.type = cpu_to_le16(type);
4391 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4392 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4393 &rsp);
4394 }
4395
4396 return 0;
4397 }
4398
4399 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4400 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4401 u8 *data)
4402 {
4403 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4404 u16 type, result;
4405
4406 if (cmd_len < sizeof(*rsp))
4407 return -EPROTO;
4408
4409 type = __le16_to_cpu(rsp->type);
4410 result = __le16_to_cpu(rsp->result);
4411
4412 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4413
4414 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4415 if (cmd->ident != conn->info_ident ||
4416 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4417 return 0;
4418
4419 cancel_delayed_work(&conn->info_timer);
4420
4421 if (result != L2CAP_IR_SUCCESS) {
4422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4423 conn->info_ident = 0;
4424
4425 l2cap_conn_start(conn);
4426
4427 return 0;
4428 }
4429
4430 switch (type) {
4431 case L2CAP_IT_FEAT_MASK:
4432 conn->feat_mask = get_unaligned_le32(rsp->data);
4433
4434 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4435 struct l2cap_info_req req;
4436 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4437
4438 conn->info_ident = l2cap_get_ident(conn);
4439
4440 l2cap_send_cmd(conn, conn->info_ident,
4441 L2CAP_INFO_REQ, sizeof(req), &req);
4442 } else {
4443 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4444 conn->info_ident = 0;
4445
4446 l2cap_conn_start(conn);
4447 }
4448 break;
4449
4450 case L2CAP_IT_FIXED_CHAN:
4451 conn->remote_fixed_chan = rsp->data[0];
4452 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4453 conn->info_ident = 0;
4454
4455 l2cap_conn_start(conn);
4456 break;
4457 }
4458
4459 return 0;
4460 }
4461
4462 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4463 struct l2cap_cmd_hdr *cmd,
4464 u16 cmd_len, void *data)
4465 {
4466 struct l2cap_create_chan_req *req = data;
4467 struct l2cap_create_chan_rsp rsp;
4468 struct l2cap_chan *chan;
4469 struct hci_dev *hdev;
4470 u16 psm, scid;
4471
4472 if (cmd_len != sizeof(*req))
4473 return -EPROTO;
4474
4475 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4476 return -EINVAL;
4477
4478 psm = le16_to_cpu(req->psm);
4479 scid = le16_to_cpu(req->scid);
4480
4481 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4482
4483 /* For controller id 0 make BR/EDR connection */
4484 if (req->amp_id == AMP_ID_BREDR) {
4485 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4486 req->amp_id);
4487 return 0;
4488 }
4489
4490 /* Validate AMP controller id */
4491 hdev = hci_dev_get(req->amp_id);
4492 if (!hdev)
4493 goto error;
4494
4495 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4496 hci_dev_put(hdev);
4497 goto error;
4498 }
4499
4500 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4501 req->amp_id);
4502 if (chan) {
4503 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4504 struct hci_conn *hs_hcon;
4505
4506 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4507 &conn->hcon->dst);
4508 if (!hs_hcon) {
4509 hci_dev_put(hdev);
4510 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4511 chan->dcid);
4512 return 0;
4513 }
4514
4515 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4516
4517 mgr->bredr_chan = chan;
4518 chan->hs_hcon = hs_hcon;
4519 chan->fcs = L2CAP_FCS_NONE;
4520 conn->mtu = hdev->block_mtu;
4521 }
4522
4523 hci_dev_put(hdev);
4524
4525 return 0;
4526
4527 error:
4528 rsp.dcid = 0;
4529 rsp.scid = cpu_to_le16(scid);
4530 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4531 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4532
4533 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4534 sizeof(rsp), &rsp);
4535
4536 return 0;
4537 }
4538
4539 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4540 {
4541 struct l2cap_move_chan_req req;
4542 u8 ident;
4543
4544 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4545
4546 ident = l2cap_get_ident(chan->conn);
4547 chan->ident = ident;
4548
4549 req.icid = cpu_to_le16(chan->scid);
4550 req.dest_amp_id = dest_amp_id;
4551
4552 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4553 &req);
4554
4555 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4556 }
4557
4558 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4559 {
4560 struct l2cap_move_chan_rsp rsp;
4561
4562 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4563
4564 rsp.icid = cpu_to_le16(chan->dcid);
4565 rsp.result = cpu_to_le16(result);
4566
4567 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4568 sizeof(rsp), &rsp);
4569 }
4570
4571 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4572 {
4573 struct l2cap_move_chan_cfm cfm;
4574
4575 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4576
4577 chan->ident = l2cap_get_ident(chan->conn);
4578
4579 cfm.icid = cpu_to_le16(chan->scid);
4580 cfm.result = cpu_to_le16(result);
4581
4582 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4583 sizeof(cfm), &cfm);
4584
4585 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4586 }
4587
4588 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4589 {
4590 struct l2cap_move_chan_cfm cfm;
4591
4592 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4593
4594 cfm.icid = cpu_to_le16(icid);
4595 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4596
4597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4598 sizeof(cfm), &cfm);
4599 }
4600
4601 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4602 u16 icid)
4603 {
4604 struct l2cap_move_chan_cfm_rsp rsp;
4605
4606 BT_DBG("icid 0x%4.4x", icid);
4607
4608 rsp.icid = cpu_to_le16(icid);
4609 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4610 }
4611
4612 static void __release_logical_link(struct l2cap_chan *chan)
4613 {
4614 chan->hs_hchan = NULL;
4615 chan->hs_hcon = NULL;
4616
4617 /* Placeholder - release the logical link */
4618 }
4619
4620 static void l2cap_logical_fail(struct l2cap_chan *chan)
4621 {
4622 /* Logical link setup failed */
4623 if (chan->state != BT_CONNECTED) {
4624 /* Create channel failure, disconnect */
4625 l2cap_send_disconn_req(chan, ECONNRESET);
4626 return;
4627 }
4628
4629 switch (chan->move_role) {
4630 case L2CAP_MOVE_ROLE_RESPONDER:
4631 l2cap_move_done(chan);
4632 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4633 break;
4634 case L2CAP_MOVE_ROLE_INITIATOR:
4635 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4636 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4637 /* Remote has only sent pending or
4638 * success responses, clean up
4639 */
4640 l2cap_move_done(chan);
4641 }
4642
4643 /* Other amp move states imply that the move
4644 * has already aborted
4645 */
4646 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4647 break;
4648 }
4649 }
4650
4651 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4652 struct hci_chan *hchan)
4653 {
4654 struct l2cap_conf_rsp rsp;
4655
4656 chan->hs_hchan = hchan;
4657 chan->hs_hcon->l2cap_data = chan->conn;
4658
4659 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4660
4661 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4662 int err;
4663
4664 set_default_fcs(chan);
4665
4666 err = l2cap_ertm_init(chan);
4667 if (err < 0)
4668 l2cap_send_disconn_req(chan, -err);
4669 else
4670 l2cap_chan_ready(chan);
4671 }
4672 }
4673
4674 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4675 struct hci_chan *hchan)
4676 {
4677 chan->hs_hcon = hchan->conn;
4678 chan->hs_hcon->l2cap_data = chan->conn;
4679
4680 BT_DBG("move_state %d", chan->move_state);
4681
4682 switch (chan->move_state) {
4683 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4684 /* Move confirm will be sent after a success
4685 * response is received
4686 */
4687 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4688 break;
4689 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4690 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4691 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4692 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4693 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4694 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4695 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4696 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4697 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4698 }
4699 break;
4700 default:
4701 /* Move was not in expected state, free the channel */
4702 __release_logical_link(chan);
4703
4704 chan->move_state = L2CAP_MOVE_STABLE;
4705 }
4706 }
4707
4708 /* Call with chan locked */
4709 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4710 u8 status)
4711 {
4712 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4713
4714 if (status) {
4715 l2cap_logical_fail(chan);
4716 __release_logical_link(chan);
4717 return;
4718 }
4719
4720 if (chan->state != BT_CONNECTED) {
4721 /* Ignore logical link if channel is on BR/EDR */
4722 if (chan->local_amp_id != AMP_ID_BREDR)
4723 l2cap_logical_finish_create(chan, hchan);
4724 } else {
4725 l2cap_logical_finish_move(chan, hchan);
4726 }
4727 }
4728
4729 void l2cap_move_start(struct l2cap_chan *chan)
4730 {
4731 BT_DBG("chan %p", chan);
4732
4733 if (chan->local_amp_id == AMP_ID_BREDR) {
4734 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4735 return;
4736 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4737 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4738 /* Placeholder - start physical link setup */
4739 } else {
4740 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4741 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4742 chan->move_id = 0;
4743 l2cap_move_setup(chan);
4744 l2cap_send_move_chan_req(chan, 0);
4745 }
4746 }
4747
4748 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4749 u8 local_amp_id, u8 remote_amp_id)
4750 {
4751 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4752 local_amp_id, remote_amp_id);
4753
4754 chan->fcs = L2CAP_FCS_NONE;
4755
4756 /* Outgoing channel on AMP */
4757 if (chan->state == BT_CONNECT) {
4758 if (result == L2CAP_CR_SUCCESS) {
4759 chan->local_amp_id = local_amp_id;
4760 l2cap_send_create_chan_req(chan, remote_amp_id);
4761 } else {
4762 /* Revert to BR/EDR connect */
4763 l2cap_send_conn_req(chan);
4764 }
4765
4766 return;
4767 }
4768
4769 /* Incoming channel on AMP */
4770 if (__l2cap_no_conn_pending(chan)) {
4771 struct l2cap_conn_rsp rsp;
4772 char buf[128];
4773 rsp.scid = cpu_to_le16(chan->dcid);
4774 rsp.dcid = cpu_to_le16(chan->scid);
4775
4776 if (result == L2CAP_CR_SUCCESS) {
4777 /* Send successful response */
4778 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4779 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4780 } else {
4781 /* Send negative response */
4782 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4783 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4784 }
4785
4786 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4787 sizeof(rsp), &rsp);
4788
4789 if (result == L2CAP_CR_SUCCESS) {
4790 l2cap_state_change(chan, BT_CONFIG);
4791 set_bit(CONF_REQ_SENT, &chan->conf_state);
4792 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4793 L2CAP_CONF_REQ,
4794 l2cap_build_conf_req(chan, buf), buf);
4795 chan->num_conf_req++;
4796 }
4797 }
4798 }
4799
4800 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4801 u8 remote_amp_id)
4802 {
4803 l2cap_move_setup(chan);
4804 chan->move_id = local_amp_id;
4805 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4806
4807 l2cap_send_move_chan_req(chan, remote_amp_id);
4808 }
4809
4810 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4811 {
4812 struct hci_chan *hchan = NULL;
4813
4814 /* Placeholder - get hci_chan for logical link */
4815
4816 if (hchan) {
4817 if (hchan->state == BT_CONNECTED) {
4818 /* Logical link is ready to go */
4819 chan->hs_hcon = hchan->conn;
4820 chan->hs_hcon->l2cap_data = chan->conn;
4821 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4822 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4823
4824 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4825 } else {
4826 /* Wait for logical link to be ready */
4827 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4828 }
4829 } else {
4830 /* Logical link not available */
4831 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4832 }
4833 }
4834
4835 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4836 {
4837 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4838 u8 rsp_result;
4839 if (result == -EINVAL)
4840 rsp_result = L2CAP_MR_BAD_ID;
4841 else
4842 rsp_result = L2CAP_MR_NOT_ALLOWED;
4843
4844 l2cap_send_move_chan_rsp(chan, rsp_result);
4845 }
4846
4847 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4848 chan->move_state = L2CAP_MOVE_STABLE;
4849
4850 /* Restart data transmission */
4851 l2cap_ertm_send(chan);
4852 }
4853
4854 /* Invoke with locked chan */
4855 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4856 {
4857 u8 local_amp_id = chan->local_amp_id;
4858 u8 remote_amp_id = chan->remote_amp_id;
4859
4860 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4861 chan, result, local_amp_id, remote_amp_id);
4862
4863 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4864 l2cap_chan_unlock(chan);
4865 return;
4866 }
4867
4868 if (chan->state != BT_CONNECTED) {
4869 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4870 } else if (result != L2CAP_MR_SUCCESS) {
4871 l2cap_do_move_cancel(chan, result);
4872 } else {
4873 switch (chan->move_role) {
4874 case L2CAP_MOVE_ROLE_INITIATOR:
4875 l2cap_do_move_initiate(chan, local_amp_id,
4876 remote_amp_id);
4877 break;
4878 case L2CAP_MOVE_ROLE_RESPONDER:
4879 l2cap_do_move_respond(chan, result);
4880 break;
4881 default:
4882 l2cap_do_move_cancel(chan, result);
4883 break;
4884 }
4885 }
4886 }
4887
4888 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4889 struct l2cap_cmd_hdr *cmd,
4890 u16 cmd_len, void *data)
4891 {
4892 struct l2cap_move_chan_req *req = data;
4893 struct l2cap_move_chan_rsp rsp;
4894 struct l2cap_chan *chan;
4895 u16 icid = 0;
4896 u16 result = L2CAP_MR_NOT_ALLOWED;
4897
4898 if (cmd_len != sizeof(*req))
4899 return -EPROTO;
4900
4901 icid = le16_to_cpu(req->icid);
4902
4903 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4904
4905 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4906 return -EINVAL;
4907
4908 chan = l2cap_get_chan_by_dcid(conn, icid);
4909 if (!chan) {
4910 rsp.icid = cpu_to_le16(icid);
4911 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4912 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4913 sizeof(rsp), &rsp);
4914 return 0;
4915 }
4916
4917 chan->ident = cmd->ident;
4918
4919 if (chan->scid < L2CAP_CID_DYN_START ||
4920 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4921 (chan->mode != L2CAP_MODE_ERTM &&
4922 chan->mode != L2CAP_MODE_STREAMING)) {
4923 result = L2CAP_MR_NOT_ALLOWED;
4924 goto send_move_response;
4925 }
4926
4927 if (chan->local_amp_id == req->dest_amp_id) {
4928 result = L2CAP_MR_SAME_ID;
4929 goto send_move_response;
4930 }
4931
4932 if (req->dest_amp_id != AMP_ID_BREDR) {
4933 struct hci_dev *hdev;
4934 hdev = hci_dev_get(req->dest_amp_id);
4935 if (!hdev || hdev->dev_type != HCI_AMP ||
4936 !test_bit(HCI_UP, &hdev->flags)) {
4937 if (hdev)
4938 hci_dev_put(hdev);
4939
4940 result = L2CAP_MR_BAD_ID;
4941 goto send_move_response;
4942 }
4943 hci_dev_put(hdev);
4944 }
4945
4946 /* Detect a move collision. Only send a collision response
4947 * if this side has "lost", otherwise proceed with the move.
4948 * The winner has the larger bd_addr.
4949 */
4950 if ((__chan_is_moving(chan) ||
4951 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4952 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4953 result = L2CAP_MR_COLLISION;
4954 goto send_move_response;
4955 }
4956
4957 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4958 l2cap_move_setup(chan);
4959 chan->move_id = req->dest_amp_id;
4960 icid = chan->dcid;
4961
4962 if (req->dest_amp_id == AMP_ID_BREDR) {
4963 /* Moving to BR/EDR */
4964 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4965 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4966 result = L2CAP_MR_PEND;
4967 } else {
4968 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4969 result = L2CAP_MR_SUCCESS;
4970 }
4971 } else {
4972 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4973 /* Placeholder - uncomment when amp functions are available */
4974 /*amp_accept_physical(chan, req->dest_amp_id);*/
4975 result = L2CAP_MR_PEND;
4976 }
4977
4978 send_move_response:
4979 l2cap_send_move_chan_rsp(chan, result);
4980
4981 l2cap_chan_unlock(chan);
4982
4983 return 0;
4984 }
4985
4986 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4987 {
4988 struct l2cap_chan *chan;
4989 struct hci_chan *hchan = NULL;
4990
4991 chan = l2cap_get_chan_by_scid(conn, icid);
4992 if (!chan) {
4993 l2cap_send_move_chan_cfm_icid(conn, icid);
4994 return;
4995 }
4996
4997 __clear_chan_timer(chan);
4998 if (result == L2CAP_MR_PEND)
4999 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5000
5001 switch (chan->move_state) {
5002 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5003 /* Move confirm will be sent when logical link
5004 * is complete.
5005 */
5006 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5007 break;
5008 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5009 if (result == L2CAP_MR_PEND) {
5010 break;
5011 } else if (test_bit(CONN_LOCAL_BUSY,
5012 &chan->conn_state)) {
5013 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5014 } else {
5015 /* Logical link is up or moving to BR/EDR,
5016 * proceed with move
5017 */
5018 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5019 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5020 }
5021 break;
5022 case L2CAP_MOVE_WAIT_RSP:
5023 /* Moving to AMP */
5024 if (result == L2CAP_MR_SUCCESS) {
5025 /* Remote is ready, send confirm immediately
5026 * after logical link is ready
5027 */
5028 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5029 } else {
5030 /* Both logical link and move success
5031 * are required to confirm
5032 */
5033 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5034 }
5035
5036 /* Placeholder - get hci_chan for logical link */
5037 if (!hchan) {
5038 /* Logical link not available */
5039 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5040 break;
5041 }
5042
5043 /* If the logical link is not yet connected, do not
5044 * send confirmation.
5045 */
5046 if (hchan->state != BT_CONNECTED)
5047 break;
5048
5049 /* Logical link is already ready to go */
5050
5051 chan->hs_hcon = hchan->conn;
5052 chan->hs_hcon->l2cap_data = chan->conn;
5053
5054 if (result == L2CAP_MR_SUCCESS) {
5055 /* Can confirm now */
5056 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5057 } else {
5058 /* Now only need move success
5059 * to confirm
5060 */
5061 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5062 }
5063
5064 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5065 break;
5066 default:
5067 /* Any other amp move state means the move failed. */
5068 chan->move_id = chan->local_amp_id;
5069 l2cap_move_done(chan);
5070 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5071 }
5072
5073 l2cap_chan_unlock(chan);
5074 }
5075
5076 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5077 u16 result)
5078 {
5079 struct l2cap_chan *chan;
5080
5081 chan = l2cap_get_chan_by_ident(conn, ident);
5082 if (!chan) {
5083 /* Could not locate channel, icid is best guess */
5084 l2cap_send_move_chan_cfm_icid(conn, icid);
5085 return;
5086 }
5087
5088 __clear_chan_timer(chan);
5089
5090 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5091 if (result == L2CAP_MR_COLLISION) {
5092 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5093 } else {
5094 /* Cleanup - cancel move */
5095 chan->move_id = chan->local_amp_id;
5096 l2cap_move_done(chan);
5097 }
5098 }
5099
5100 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5101
5102 l2cap_chan_unlock(chan);
5103 }
5104
5105 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5106 struct l2cap_cmd_hdr *cmd,
5107 u16 cmd_len, void *data)
5108 {
5109 struct l2cap_move_chan_rsp *rsp = data;
5110 u16 icid, result;
5111
5112 if (cmd_len != sizeof(*rsp))
5113 return -EPROTO;
5114
5115 icid = le16_to_cpu(rsp->icid);
5116 result = le16_to_cpu(rsp->result);
5117
5118 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5119
5120 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5121 l2cap_move_continue(conn, icid, result);
5122 else
5123 l2cap_move_fail(conn, cmd->ident, icid, result);
5124
5125 return 0;
5126 }
5127
5128 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5129 struct l2cap_cmd_hdr *cmd,
5130 u16 cmd_len, void *data)
5131 {
5132 struct l2cap_move_chan_cfm *cfm = data;
5133 struct l2cap_chan *chan;
5134 u16 icid, result;
5135
5136 if (cmd_len != sizeof(*cfm))
5137 return -EPROTO;
5138
5139 icid = le16_to_cpu(cfm->icid);
5140 result = le16_to_cpu(cfm->result);
5141
5142 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5143
5144 chan = l2cap_get_chan_by_dcid(conn, icid);
5145 if (!chan) {
5146 /* Spec requires a response even if the icid was not found */
5147 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5148 return 0;
5149 }
5150
5151 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5152 if (result == L2CAP_MC_CONFIRMED) {
5153 chan->local_amp_id = chan->move_id;
5154 if (chan->local_amp_id == AMP_ID_BREDR)
5155 __release_logical_link(chan);
5156 } else {
5157 chan->move_id = chan->local_amp_id;
5158 }
5159
5160 l2cap_move_done(chan);
5161 }
5162
5163 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5164
5165 l2cap_chan_unlock(chan);
5166
5167 return 0;
5168 }
5169
5170 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5171 struct l2cap_cmd_hdr *cmd,
5172 u16 cmd_len, void *data)
5173 {
5174 struct l2cap_move_chan_cfm_rsp *rsp = data;
5175 struct l2cap_chan *chan;
5176 u16 icid;
5177
5178 if (cmd_len != sizeof(*rsp))
5179 return -EPROTO;
5180
5181 icid = le16_to_cpu(rsp->icid);
5182
5183 BT_DBG("icid 0x%4.4x", icid);
5184
5185 chan = l2cap_get_chan_by_scid(conn, icid);
5186 if (!chan)
5187 return 0;
5188
5189 __clear_chan_timer(chan);
5190
5191 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5192 chan->local_amp_id = chan->move_id;
5193
5194 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5195 __release_logical_link(chan);
5196
5197 l2cap_move_done(chan);
5198 }
5199
5200 l2cap_chan_unlock(chan);
5201
5202 return 0;
5203 }
5204
5205 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5206 struct l2cap_cmd_hdr *cmd,
5207 u16 cmd_len, u8 *data)
5208 {
5209 struct hci_conn *hcon = conn->hcon;
5210 struct l2cap_conn_param_update_req *req;
5211 struct l2cap_conn_param_update_rsp rsp;
5212 u16 min, max, latency, to_multiplier;
5213 int err;
5214
5215 if (hcon->role != HCI_ROLE_MASTER)
5216 return -EINVAL;
5217
5218 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5219 return -EPROTO;
5220
5221 req = (struct l2cap_conn_param_update_req *) data;
5222 min = __le16_to_cpu(req->min);
5223 max = __le16_to_cpu(req->max);
5224 latency = __le16_to_cpu(req->latency);
5225 to_multiplier = __le16_to_cpu(req->to_multiplier);
5226
5227 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5228 min, max, latency, to_multiplier);
5229
5230 memset(&rsp, 0, sizeof(rsp));
5231
5232 err = hci_check_conn_params(min, max, latency, to_multiplier);
5233 if (err)
5234 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5235 else
5236 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5237
5238 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5239 sizeof(rsp), &rsp);
5240
5241 if (!err) {
5242 u8 store_hint;
5243
5244 store_hint = hci_le_conn_update(hcon, min, max, latency,
5245 to_multiplier);
5246 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5247 store_hint, min, max, latency,
5248 to_multiplier);
5249
5250 }
5251
5252 return 0;
5253 }
5254
5255 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5256 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5257 u8 *data)
5258 {
5259 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5260 struct hci_conn *hcon = conn->hcon;
5261 u16 dcid, mtu, mps, credits, result;
5262 struct l2cap_chan *chan;
5263 int err, sec_level;
5264
5265 if (cmd_len < sizeof(*rsp))
5266 return -EPROTO;
5267
5268 dcid = __le16_to_cpu(rsp->dcid);
5269 mtu = __le16_to_cpu(rsp->mtu);
5270 mps = __le16_to_cpu(rsp->mps);
5271 credits = __le16_to_cpu(rsp->credits);
5272 result = __le16_to_cpu(rsp->result);
5273
5274 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5275 dcid < L2CAP_CID_DYN_START ||
5276 dcid > L2CAP_CID_LE_DYN_END))
5277 return -EPROTO;
5278
5279 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5280 dcid, mtu, mps, credits, result);
5281
5282 mutex_lock(&conn->chan_lock);
5283
5284 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5285 if (!chan) {
5286 err = -EBADSLT;
5287 goto unlock;
5288 }
5289
5290 err = 0;
5291
5292 l2cap_chan_lock(chan);
5293
5294 switch (result) {
5295 case L2CAP_CR_SUCCESS:
5296 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5297 err = -EBADSLT;
5298 break;
5299 }
5300
5301 chan->ident = 0;
5302 chan->dcid = dcid;
5303 chan->omtu = mtu;
5304 chan->remote_mps = mps;
5305 chan->tx_credits = credits;
5306 l2cap_chan_ready(chan);
5307 break;
5308
5309 case L2CAP_CR_AUTHENTICATION:
5310 case L2CAP_CR_ENCRYPTION:
5311 /* If we already have MITM protection we can't do
5312 * anything.
5313 */
5314 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5315 l2cap_chan_del(chan, ECONNREFUSED);
5316 break;
5317 }
5318
5319 sec_level = hcon->sec_level + 1;
5320 if (chan->sec_level < sec_level)
5321 chan->sec_level = sec_level;
5322
5323 /* We'll need to send a new Connect Request */
5324 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5325
5326 smp_conn_security(hcon, chan->sec_level);
5327 break;
5328
5329 default:
5330 l2cap_chan_del(chan, ECONNREFUSED);
5331 break;
5332 }
5333
5334 l2cap_chan_unlock(chan);
5335
5336 unlock:
5337 mutex_unlock(&conn->chan_lock);
5338
5339 return err;
5340 }
5341
5342 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5343 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5344 u8 *data)
5345 {
5346 int err = 0;
5347
5348 switch (cmd->code) {
5349 case L2CAP_COMMAND_REJ:
5350 l2cap_command_rej(conn, cmd, cmd_len, data);
5351 break;
5352
5353 case L2CAP_CONN_REQ:
5354 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5355 break;
5356
5357 case L2CAP_CONN_RSP:
5358 case L2CAP_CREATE_CHAN_RSP:
5359 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5360 break;
5361
5362 case L2CAP_CONF_REQ:
5363 err = l2cap_config_req(conn, cmd, cmd_len, data);
5364 break;
5365
5366 case L2CAP_CONF_RSP:
5367 l2cap_config_rsp(conn, cmd, cmd_len, data);
5368 break;
5369
5370 case L2CAP_DISCONN_REQ:
5371 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5372 break;
5373
5374 case L2CAP_DISCONN_RSP:
5375 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5376 break;
5377
5378 case L2CAP_ECHO_REQ:
5379 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5380 break;
5381
5382 case L2CAP_ECHO_RSP:
5383 break;
5384
5385 case L2CAP_INFO_REQ:
5386 err = l2cap_information_req(conn, cmd, cmd_len, data);
5387 break;
5388
5389 case L2CAP_INFO_RSP:
5390 l2cap_information_rsp(conn, cmd, cmd_len, data);
5391 break;
5392
5393 case L2CAP_CREATE_CHAN_REQ:
5394 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5395 break;
5396
5397 case L2CAP_MOVE_CHAN_REQ:
5398 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5399 break;
5400
5401 case L2CAP_MOVE_CHAN_RSP:
5402 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5403 break;
5404
5405 case L2CAP_MOVE_CHAN_CFM:
5406 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5407 break;
5408
5409 case L2CAP_MOVE_CHAN_CFM_RSP:
5410 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5411 break;
5412
5413 default:
5414 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5415 err = -EINVAL;
5416 break;
5417 }
5418
5419 return err;
5420 }
5421
5422 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5423 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5424 u8 *data)
5425 {
5426 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5427 struct l2cap_le_conn_rsp rsp;
5428 struct l2cap_chan *chan, *pchan;
5429 u16 dcid, scid, credits, mtu, mps;
5430 __le16 psm;
5431 u8 result;
5432
5433 if (cmd_len != sizeof(*req))
5434 return -EPROTO;
5435
5436 scid = __le16_to_cpu(req->scid);
5437 mtu = __le16_to_cpu(req->mtu);
5438 mps = __le16_to_cpu(req->mps);
5439 psm = req->psm;
5440 dcid = 0;
5441 credits = 0;
5442
5443 if (mtu < 23 || mps < 23)
5444 return -EPROTO;
5445
5446 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5447 scid, mtu, mps);
5448
5449 /* Check if we have socket listening on psm */
5450 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5451 &conn->hcon->dst, LE_LINK);
5452 if (!pchan) {
5453 result = L2CAP_CR_BAD_PSM;
5454 chan = NULL;
5455 goto response;
5456 }
5457
5458 mutex_lock(&conn->chan_lock);
5459 l2cap_chan_lock(pchan);
5460
5461 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5462 SMP_ALLOW_STK)) {
5463 result = L2CAP_CR_AUTHENTICATION;
5464 chan = NULL;
5465 goto response_unlock;
5466 }
5467
5468 /* Check for valid dynamic CID range */
5469 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5470 result = L2CAP_CR_INVALID_SCID;
5471 chan = NULL;
5472 goto response_unlock;
5473 }
5474
5475 /* Check if we already have channel with that dcid */
5476 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5477 result = L2CAP_CR_SCID_IN_USE;
5478 chan = NULL;
5479 goto response_unlock;
5480 }
5481
5482 chan = pchan->ops->new_connection(pchan);
5483 if (!chan) {
5484 result = L2CAP_CR_NO_MEM;
5485 goto response_unlock;
5486 }
5487
5488 l2cap_le_flowctl_init(chan);
5489
5490 bacpy(&chan->src, &conn->hcon->src);
5491 bacpy(&chan->dst, &conn->hcon->dst);
5492 chan->src_type = bdaddr_src_type(conn->hcon);
5493 chan->dst_type = bdaddr_dst_type(conn->hcon);
5494 chan->psm = psm;
5495 chan->dcid = scid;
5496 chan->omtu = mtu;
5497 chan->remote_mps = mps;
5498 chan->tx_credits = __le16_to_cpu(req->credits);
5499
5500 __l2cap_chan_add(conn, chan);
5501 dcid = chan->scid;
5502 credits = chan->rx_credits;
5503
5504 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5505
5506 chan->ident = cmd->ident;
5507
5508 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5509 l2cap_state_change(chan, BT_CONNECT2);
5510 /* The following result value is actually not defined
5511 * for LE CoC but we use it to let the function know
5512 * that it should bail out after doing its cleanup
5513 * instead of sending a response.
5514 */
5515 result = L2CAP_CR_PEND;
5516 chan->ops->defer(chan);
5517 } else {
5518 l2cap_chan_ready(chan);
5519 result = L2CAP_CR_SUCCESS;
5520 }
5521
5522 response_unlock:
5523 l2cap_chan_unlock(pchan);
5524 mutex_unlock(&conn->chan_lock);
5525 l2cap_chan_put(pchan);
5526
5527 if (result == L2CAP_CR_PEND)
5528 return 0;
5529
5530 response:
5531 if (chan) {
5532 rsp.mtu = cpu_to_le16(chan->imtu);
5533 rsp.mps = cpu_to_le16(chan->mps);
5534 } else {
5535 rsp.mtu = 0;
5536 rsp.mps = 0;
5537 }
5538
5539 rsp.dcid = cpu_to_le16(dcid);
5540 rsp.credits = cpu_to_le16(credits);
5541 rsp.result = cpu_to_le16(result);
5542
5543 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5544
5545 return 0;
5546 }
5547
5548 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5549 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5550 u8 *data)
5551 {
5552 struct l2cap_le_credits *pkt;
5553 struct l2cap_chan *chan;
5554 u16 cid, credits, max_credits;
5555
5556 if (cmd_len != sizeof(*pkt))
5557 return -EPROTO;
5558
5559 pkt = (struct l2cap_le_credits *) data;
5560 cid = __le16_to_cpu(pkt->cid);
5561 credits = __le16_to_cpu(pkt->credits);
5562
5563 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5564
5565 chan = l2cap_get_chan_by_dcid(conn, cid);
5566 if (!chan)
5567 return -EBADSLT;
5568
5569 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5570 if (credits > max_credits) {
5571 BT_ERR("LE credits overflow");
5572 l2cap_send_disconn_req(chan, ECONNRESET);
5573 l2cap_chan_unlock(chan);
5574
5575 /* Return 0 so that we don't trigger an unnecessary
5576 * command reject packet.
5577 */
5578 return 0;
5579 }
5580
5581 chan->tx_credits += credits;
5582
5583 /* Resume sending */
5584 l2cap_le_flowctl_send(chan);
5585
5586 if (chan->tx_credits)
5587 chan->ops->resume(chan);
5588
5589 l2cap_chan_unlock(chan);
5590
5591 return 0;
5592 }
5593
5594 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5595 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 u8 *data)
5597 {
5598 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5599 struct l2cap_chan *chan;
5600
5601 if (cmd_len < sizeof(*rej))
5602 return -EPROTO;
5603
5604 mutex_lock(&conn->chan_lock);
5605
5606 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5607 if (!chan)
5608 goto done;
5609
5610 l2cap_chan_lock(chan);
5611 l2cap_chan_del(chan, ECONNREFUSED);
5612 l2cap_chan_unlock(chan);
5613
5614 done:
5615 mutex_unlock(&conn->chan_lock);
5616 return 0;
5617 }
5618
5619 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5620 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5621 u8 *data)
5622 {
5623 int err = 0;
5624
5625 switch (cmd->code) {
5626 case L2CAP_COMMAND_REJ:
5627 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5628 break;
5629
5630 case L2CAP_CONN_PARAM_UPDATE_REQ:
5631 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5632 break;
5633
5634 case L2CAP_CONN_PARAM_UPDATE_RSP:
5635 break;
5636
5637 case L2CAP_LE_CONN_RSP:
5638 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5639 break;
5640
5641 case L2CAP_LE_CONN_REQ:
5642 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5643 break;
5644
5645 case L2CAP_LE_CREDITS:
5646 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5647 break;
5648
5649 case L2CAP_DISCONN_REQ:
5650 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5651 break;
5652
5653 case L2CAP_DISCONN_RSP:
5654 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5655 break;
5656
5657 default:
5658 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5659 err = -EINVAL;
5660 break;
5661 }
5662
5663 return err;
5664 }
5665
5666 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5667 struct sk_buff *skb)
5668 {
5669 struct hci_conn *hcon = conn->hcon;
5670 struct l2cap_cmd_hdr *cmd;
5671 u16 len;
5672 int err;
5673
5674 if (hcon->type != LE_LINK)
5675 goto drop;
5676
5677 if (skb->len < L2CAP_CMD_HDR_SIZE)
5678 goto drop;
5679
5680 cmd = (void *) skb->data;
5681 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5682
5683 len = le16_to_cpu(cmd->len);
5684
5685 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5686
5687 if (len != skb->len || !cmd->ident) {
5688 BT_DBG("corrupted command");
5689 goto drop;
5690 }
5691
5692 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5693 if (err) {
5694 struct l2cap_cmd_rej_unk rej;
5695
5696 BT_ERR("Wrong link type (%d)", err);
5697
5698 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5699 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5700 sizeof(rej), &rej);
5701 }
5702
5703 drop:
5704 kfree_skb(skb);
5705 }
5706
5707 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5708 struct sk_buff *skb)
5709 {
5710 struct hci_conn *hcon = conn->hcon;
5711 u8 *data = skb->data;
5712 int len = skb->len;
5713 struct l2cap_cmd_hdr cmd;
5714 int err;
5715
5716 l2cap_raw_recv(conn, skb);
5717
5718 if (hcon->type != ACL_LINK)
5719 goto drop;
5720
5721 while (len >= L2CAP_CMD_HDR_SIZE) {
5722 u16 cmd_len;
5723 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5724 data += L2CAP_CMD_HDR_SIZE;
5725 len -= L2CAP_CMD_HDR_SIZE;
5726
5727 cmd_len = le16_to_cpu(cmd.len);
5728
5729 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5730 cmd.ident);
5731
5732 if (cmd_len > len || !cmd.ident) {
5733 BT_DBG("corrupted command");
5734 break;
5735 }
5736
5737 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5738 if (err) {
5739 struct l2cap_cmd_rej_unk rej;
5740
5741 BT_ERR("Wrong link type (%d)", err);
5742
5743 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5744 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5745 sizeof(rej), &rej);
5746 }
5747
5748 data += cmd_len;
5749 len -= cmd_len;
5750 }
5751
5752 drop:
5753 kfree_skb(skb);
5754 }
5755
5756 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5757 {
5758 u16 our_fcs, rcv_fcs;
5759 int hdr_size;
5760
5761 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5762 hdr_size = L2CAP_EXT_HDR_SIZE;
5763 else
5764 hdr_size = L2CAP_ENH_HDR_SIZE;
5765
5766 if (chan->fcs == L2CAP_FCS_CRC16) {
5767 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5768 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5769 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5770
5771 if (our_fcs != rcv_fcs)
5772 return -EBADMSG;
5773 }
5774 return 0;
5775 }
5776
5777 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5778 {
5779 struct l2cap_ctrl control;
5780
5781 BT_DBG("chan %p", chan);
5782
5783 memset(&control, 0, sizeof(control));
5784 control.sframe = 1;
5785 control.final = 1;
5786 control.reqseq = chan->buffer_seq;
5787 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5788
5789 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5790 control.super = L2CAP_SUPER_RNR;
5791 l2cap_send_sframe(chan, &control);
5792 }
5793
5794 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5795 chan->unacked_frames > 0)
5796 __set_retrans_timer(chan);
5797
5798 /* Send pending iframes */
5799 l2cap_ertm_send(chan);
5800
5801 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5802 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5803 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5804 * send it now.
5805 */
5806 control.super = L2CAP_SUPER_RR;
5807 l2cap_send_sframe(chan, &control);
5808 }
5809 }
5810
5811 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5812 struct sk_buff **last_frag)
5813 {
5814 /* skb->len reflects data in skb as well as all fragments
5815 * skb->data_len reflects only data in fragments
5816 */
5817 if (!skb_has_frag_list(skb))
5818 skb_shinfo(skb)->frag_list = new_frag;
5819
5820 new_frag->next = NULL;
5821
5822 (*last_frag)->next = new_frag;
5823 *last_frag = new_frag;
5824
5825 skb->len += new_frag->len;
5826 skb->data_len += new_frag->len;
5827 skb->truesize += new_frag->truesize;
5828 }
5829
5830 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5831 struct l2cap_ctrl *control)
5832 {
5833 int err = -EINVAL;
5834
5835 switch (control->sar) {
5836 case L2CAP_SAR_UNSEGMENTED:
5837 if (chan->sdu)
5838 break;
5839
5840 err = chan->ops->recv(chan, skb);
5841 break;
5842
5843 case L2CAP_SAR_START:
5844 if (chan->sdu)
5845 break;
5846
5847 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5848 break;
5849
5850 chan->sdu_len = get_unaligned_le16(skb->data);
5851 skb_pull(skb, L2CAP_SDULEN_SIZE);
5852
5853 if (chan->sdu_len > chan->imtu) {
5854 err = -EMSGSIZE;
5855 break;
5856 }
5857
5858 if (skb->len >= chan->sdu_len)
5859 break;
5860
5861 chan->sdu = skb;
5862 chan->sdu_last_frag = skb;
5863
5864 skb = NULL;
5865 err = 0;
5866 break;
5867
5868 case L2CAP_SAR_CONTINUE:
5869 if (!chan->sdu)
5870 break;
5871
5872 append_skb_frag(chan->sdu, skb,
5873 &chan->sdu_last_frag);
5874 skb = NULL;
5875
5876 if (chan->sdu->len >= chan->sdu_len)
5877 break;
5878
5879 err = 0;
5880 break;
5881
5882 case L2CAP_SAR_END:
5883 if (!chan->sdu)
5884 break;
5885
5886 append_skb_frag(chan->sdu, skb,
5887 &chan->sdu_last_frag);
5888 skb = NULL;
5889
5890 if (chan->sdu->len != chan->sdu_len)
5891 break;
5892
5893 err = chan->ops->recv(chan, chan->sdu);
5894
5895 if (!err) {
5896 /* Reassembly complete */
5897 chan->sdu = NULL;
5898 chan->sdu_last_frag = NULL;
5899 chan->sdu_len = 0;
5900 }
5901 break;
5902 }
5903
5904 if (err) {
5905 kfree_skb(skb);
5906 kfree_skb(chan->sdu);
5907 chan->sdu = NULL;
5908 chan->sdu_last_frag = NULL;
5909 chan->sdu_len = 0;
5910 }
5911
5912 return err;
5913 }
5914
5915 static int l2cap_resegment(struct l2cap_chan *chan)
5916 {
5917 /* Placeholder */
5918 return 0;
5919 }
5920
5921 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5922 {
5923 u8 event;
5924
5925 if (chan->mode != L2CAP_MODE_ERTM)
5926 return;
5927
5928 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5929 l2cap_tx(chan, NULL, NULL, event);
5930 }
5931
5932 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5933 {
5934 int err = 0;
5935 /* Pass sequential frames to l2cap_reassemble_sdu()
5936 * until a gap is encountered.
5937 */
5938
5939 BT_DBG("chan %p", chan);
5940
5941 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5942 struct sk_buff *skb;
5943 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5944 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5945
5946 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5947
5948 if (!skb)
5949 break;
5950
5951 skb_unlink(skb, &chan->srej_q);
5952 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5953 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5954 if (err)
5955 break;
5956 }
5957
5958 if (skb_queue_empty(&chan->srej_q)) {
5959 chan->rx_state = L2CAP_RX_STATE_RECV;
5960 l2cap_send_ack(chan);
5961 }
5962
5963 return err;
5964 }
5965
5966 static void l2cap_handle_srej(struct l2cap_chan *chan,
5967 struct l2cap_ctrl *control)
5968 {
5969 struct sk_buff *skb;
5970
5971 BT_DBG("chan %p, control %p", chan, control);
5972
5973 if (control->reqseq == chan->next_tx_seq) {
5974 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5975 l2cap_send_disconn_req(chan, ECONNRESET);
5976 return;
5977 }
5978
5979 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5980
5981 if (skb == NULL) {
5982 BT_DBG("Seq %d not available for retransmission",
5983 control->reqseq);
5984 return;
5985 }
5986
5987 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5988 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5989 l2cap_send_disconn_req(chan, ECONNRESET);
5990 return;
5991 }
5992
5993 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5994
5995 if (control->poll) {
5996 l2cap_pass_to_tx(chan, control);
5997
5998 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5999 l2cap_retransmit(chan, control);
6000 l2cap_ertm_send(chan);
6001
6002 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6003 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6004 chan->srej_save_reqseq = control->reqseq;
6005 }
6006 } else {
6007 l2cap_pass_to_tx_fbit(chan, control);
6008
6009 if (control->final) {
6010 if (chan->srej_save_reqseq != control->reqseq ||
6011 !test_and_clear_bit(CONN_SREJ_ACT,
6012 &chan->conn_state))
6013 l2cap_retransmit(chan, control);
6014 } else {
6015 l2cap_retransmit(chan, control);
6016 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6017 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6018 chan->srej_save_reqseq = control->reqseq;
6019 }
6020 }
6021 }
6022 }
6023
6024 static void l2cap_handle_rej(struct l2cap_chan *chan,
6025 struct l2cap_ctrl *control)
6026 {
6027 struct sk_buff *skb;
6028
6029 BT_DBG("chan %p, control %p", chan, control);
6030
6031 if (control->reqseq == chan->next_tx_seq) {
6032 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6033 l2cap_send_disconn_req(chan, ECONNRESET);
6034 return;
6035 }
6036
6037 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6038
6039 if (chan->max_tx && skb &&
6040 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6041 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6042 l2cap_send_disconn_req(chan, ECONNRESET);
6043 return;
6044 }
6045
6046 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6047
6048 l2cap_pass_to_tx(chan, control);
6049
6050 if (control->final) {
6051 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6052 l2cap_retransmit_all(chan, control);
6053 } else {
6054 l2cap_retransmit_all(chan, control);
6055 l2cap_ertm_send(chan);
6056 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6057 set_bit(CONN_REJ_ACT, &chan->conn_state);
6058 }
6059 }
6060
6061 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6062 {
6063 BT_DBG("chan %p, txseq %d", chan, txseq);
6064
6065 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6066 chan->expected_tx_seq);
6067
6068 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6069 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6070 chan->tx_win) {
6071 /* See notes below regarding "double poll" and
6072 * invalid packets.
6073 */
6074 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6075 BT_DBG("Invalid/Ignore - after SREJ");
6076 return L2CAP_TXSEQ_INVALID_IGNORE;
6077 } else {
6078 BT_DBG("Invalid - in window after SREJ sent");
6079 return L2CAP_TXSEQ_INVALID;
6080 }
6081 }
6082
6083 if (chan->srej_list.head == txseq) {
6084 BT_DBG("Expected SREJ");
6085 return L2CAP_TXSEQ_EXPECTED_SREJ;
6086 }
6087
6088 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6089 BT_DBG("Duplicate SREJ - txseq already stored");
6090 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6091 }
6092
6093 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6094 BT_DBG("Unexpected SREJ - not requested");
6095 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6096 }
6097 }
6098
6099 if (chan->expected_tx_seq == txseq) {
6100 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6101 chan->tx_win) {
6102 BT_DBG("Invalid - txseq outside tx window");
6103 return L2CAP_TXSEQ_INVALID;
6104 } else {
6105 BT_DBG("Expected");
6106 return L2CAP_TXSEQ_EXPECTED;
6107 }
6108 }
6109
6110 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6111 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6112 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6113 return L2CAP_TXSEQ_DUPLICATE;
6114 }
6115
6116 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6117 /* A source of invalid packets is a "double poll" condition,
6118 * where delays cause us to send multiple poll packets. If
6119 * the remote stack receives and processes both polls,
6120 * sequence numbers can wrap around in such a way that a
6121 * resent frame has a sequence number that looks like new data
6122 * with a sequence gap. This would trigger an erroneous SREJ
6123 * request.
6124 *
6125 * Fortunately, this is impossible with a tx window that's
6126 * less than half of the maximum sequence number, which allows
6127 * invalid frames to be safely ignored.
6128 *
6129 * With tx window sizes greater than half of the tx window
6130 * maximum, the frame is invalid and cannot be ignored. This
6131 * causes a disconnect.
6132 */
6133
6134 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6135 BT_DBG("Invalid/Ignore - txseq outside tx window");
6136 return L2CAP_TXSEQ_INVALID_IGNORE;
6137 } else {
6138 BT_DBG("Invalid - txseq outside tx window");
6139 return L2CAP_TXSEQ_INVALID;
6140 }
6141 } else {
6142 BT_DBG("Unexpected - txseq indicates missing frames");
6143 return L2CAP_TXSEQ_UNEXPECTED;
6144 }
6145 }
6146
6147 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6148 struct l2cap_ctrl *control,
6149 struct sk_buff *skb, u8 event)
6150 {
6151 int err = 0;
6152 bool skb_in_use = false;
6153
6154 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6155 event);
6156
6157 switch (event) {
6158 case L2CAP_EV_RECV_IFRAME:
6159 switch (l2cap_classify_txseq(chan, control->txseq)) {
6160 case L2CAP_TXSEQ_EXPECTED:
6161 l2cap_pass_to_tx(chan, control);
6162
6163 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6164 BT_DBG("Busy, discarding expected seq %d",
6165 control->txseq);
6166 break;
6167 }
6168
6169 chan->expected_tx_seq = __next_seq(chan,
6170 control->txseq);
6171
6172 chan->buffer_seq = chan->expected_tx_seq;
6173 skb_in_use = true;
6174
6175 err = l2cap_reassemble_sdu(chan, skb, control);
6176 if (err)
6177 break;
6178
6179 if (control->final) {
6180 if (!test_and_clear_bit(CONN_REJ_ACT,
6181 &chan->conn_state)) {
6182 control->final = 0;
6183 l2cap_retransmit_all(chan, control);
6184 l2cap_ertm_send(chan);
6185 }
6186 }
6187
6188 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6189 l2cap_send_ack(chan);
6190 break;
6191 case L2CAP_TXSEQ_UNEXPECTED:
6192 l2cap_pass_to_tx(chan, control);
6193
6194 /* Can't issue SREJ frames in the local busy state.
6195 * Drop this frame, it will be seen as missing
6196 * when local busy is exited.
6197 */
6198 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6199 BT_DBG("Busy, discarding unexpected seq %d",
6200 control->txseq);
6201 break;
6202 }
6203
6204 /* There was a gap in the sequence, so an SREJ
6205 * must be sent for each missing frame. The
6206 * current frame is stored for later use.
6207 */
6208 skb_queue_tail(&chan->srej_q, skb);
6209 skb_in_use = true;
6210 BT_DBG("Queued %p (queue len %d)", skb,
6211 skb_queue_len(&chan->srej_q));
6212
6213 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6214 l2cap_seq_list_clear(&chan->srej_list);
6215 l2cap_send_srej(chan, control->txseq);
6216
6217 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6218 break;
6219 case L2CAP_TXSEQ_DUPLICATE:
6220 l2cap_pass_to_tx(chan, control);
6221 break;
6222 case L2CAP_TXSEQ_INVALID_IGNORE:
6223 break;
6224 case L2CAP_TXSEQ_INVALID:
6225 default:
6226 l2cap_send_disconn_req(chan, ECONNRESET);
6227 break;
6228 }
6229 break;
6230 case L2CAP_EV_RECV_RR:
6231 l2cap_pass_to_tx(chan, control);
6232 if (control->final) {
6233 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6234
6235 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6236 !__chan_is_moving(chan)) {
6237 control->final = 0;
6238 l2cap_retransmit_all(chan, control);
6239 }
6240
6241 l2cap_ertm_send(chan);
6242 } else if (control->poll) {
6243 l2cap_send_i_or_rr_or_rnr(chan);
6244 } else {
6245 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6246 &chan->conn_state) &&
6247 chan->unacked_frames)
6248 __set_retrans_timer(chan);
6249
6250 l2cap_ertm_send(chan);
6251 }
6252 break;
6253 case L2CAP_EV_RECV_RNR:
6254 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6255 l2cap_pass_to_tx(chan, control);
6256 if (control && control->poll) {
6257 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6258 l2cap_send_rr_or_rnr(chan, 0);
6259 }
6260 __clear_retrans_timer(chan);
6261 l2cap_seq_list_clear(&chan->retrans_list);
6262 break;
6263 case L2CAP_EV_RECV_REJ:
6264 l2cap_handle_rej(chan, control);
6265 break;
6266 case L2CAP_EV_RECV_SREJ:
6267 l2cap_handle_srej(chan, control);
6268 break;
6269 default:
6270 break;
6271 }
6272
6273 if (skb && !skb_in_use) {
6274 BT_DBG("Freeing %p", skb);
6275 kfree_skb(skb);
6276 }
6277
6278 return err;
6279 }
6280
6281 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6282 struct l2cap_ctrl *control,
6283 struct sk_buff *skb, u8 event)
6284 {
6285 int err = 0;
6286 u16 txseq = control->txseq;
6287 bool skb_in_use = false;
6288
6289 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6290 event);
6291
6292 switch (event) {
6293 case L2CAP_EV_RECV_IFRAME:
6294 switch (l2cap_classify_txseq(chan, txseq)) {
6295 case L2CAP_TXSEQ_EXPECTED:
6296 /* Keep frame for reassembly later */
6297 l2cap_pass_to_tx(chan, control);
6298 skb_queue_tail(&chan->srej_q, skb);
6299 skb_in_use = true;
6300 BT_DBG("Queued %p (queue len %d)", skb,
6301 skb_queue_len(&chan->srej_q));
6302
6303 chan->expected_tx_seq = __next_seq(chan, txseq);
6304 break;
6305 case L2CAP_TXSEQ_EXPECTED_SREJ:
6306 l2cap_seq_list_pop(&chan->srej_list);
6307
6308 l2cap_pass_to_tx(chan, control);
6309 skb_queue_tail(&chan->srej_q, skb);
6310 skb_in_use = true;
6311 BT_DBG("Queued %p (queue len %d)", skb,
6312 skb_queue_len(&chan->srej_q));
6313
6314 err = l2cap_rx_queued_iframes(chan);
6315 if (err)
6316 break;
6317
6318 break;
6319 case L2CAP_TXSEQ_UNEXPECTED:
6320 /* Got a frame that can't be reassembled yet.
6321 * Save it for later, and send SREJs to cover
6322 * the missing frames.
6323 */
6324 skb_queue_tail(&chan->srej_q, skb);
6325 skb_in_use = true;
6326 BT_DBG("Queued %p (queue len %d)", skb,
6327 skb_queue_len(&chan->srej_q));
6328
6329 l2cap_pass_to_tx(chan, control);
6330 l2cap_send_srej(chan, control->txseq);
6331 break;
6332 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6333 /* This frame was requested with an SREJ, but
6334 * some expected retransmitted frames are
6335 * missing. Request retransmission of missing
6336 * SREJ'd frames.
6337 */
6338 skb_queue_tail(&chan->srej_q, skb);
6339 skb_in_use = true;
6340 BT_DBG("Queued %p (queue len %d)", skb,
6341 skb_queue_len(&chan->srej_q));
6342
6343 l2cap_pass_to_tx(chan, control);
6344 l2cap_send_srej_list(chan, control->txseq);
6345 break;
6346 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6347 /* We've already queued this frame. Drop this copy. */
6348 l2cap_pass_to_tx(chan, control);
6349 break;
6350 case L2CAP_TXSEQ_DUPLICATE:
6351 /* Expecting a later sequence number, so this frame
6352 * was already received. Ignore it completely.
6353 */
6354 break;
6355 case L2CAP_TXSEQ_INVALID_IGNORE:
6356 break;
6357 case L2CAP_TXSEQ_INVALID:
6358 default:
6359 l2cap_send_disconn_req(chan, ECONNRESET);
6360 break;
6361 }
6362 break;
6363 case L2CAP_EV_RECV_RR:
6364 l2cap_pass_to_tx(chan, control);
6365 if (control->final) {
6366 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6367
6368 if (!test_and_clear_bit(CONN_REJ_ACT,
6369 &chan->conn_state)) {
6370 control->final = 0;
6371 l2cap_retransmit_all(chan, control);
6372 }
6373
6374 l2cap_ertm_send(chan);
6375 } else if (control->poll) {
6376 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6377 &chan->conn_state) &&
6378 chan->unacked_frames) {
6379 __set_retrans_timer(chan);
6380 }
6381
6382 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6383 l2cap_send_srej_tail(chan);
6384 } else {
6385 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6386 &chan->conn_state) &&
6387 chan->unacked_frames)
6388 __set_retrans_timer(chan);
6389
6390 l2cap_send_ack(chan);
6391 }
6392 break;
6393 case L2CAP_EV_RECV_RNR:
6394 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6395 l2cap_pass_to_tx(chan, control);
6396 if (control->poll) {
6397 l2cap_send_srej_tail(chan);
6398 } else {
6399 struct l2cap_ctrl rr_control;
6400 memset(&rr_control, 0, sizeof(rr_control));
6401 rr_control.sframe = 1;
6402 rr_control.super = L2CAP_SUPER_RR;
6403 rr_control.reqseq = chan->buffer_seq;
6404 l2cap_send_sframe(chan, &rr_control);
6405 }
6406
6407 break;
6408 case L2CAP_EV_RECV_REJ:
6409 l2cap_handle_rej(chan, control);
6410 break;
6411 case L2CAP_EV_RECV_SREJ:
6412 l2cap_handle_srej(chan, control);
6413 break;
6414 }
6415
6416 if (skb && !skb_in_use) {
6417 BT_DBG("Freeing %p", skb);
6418 kfree_skb(skb);
6419 }
6420
6421 return err;
6422 }
6423
6424 static int l2cap_finish_move(struct l2cap_chan *chan)
6425 {
6426 BT_DBG("chan %p", chan);
6427
6428 chan->rx_state = L2CAP_RX_STATE_RECV;
6429
6430 if (chan->hs_hcon)
6431 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6432 else
6433 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6434
6435 return l2cap_resegment(chan);
6436 }
6437
6438 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6439 struct l2cap_ctrl *control,
6440 struct sk_buff *skb, u8 event)
6441 {
6442 int err;
6443
6444 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6445 event);
6446
6447 if (!control->poll)
6448 return -EPROTO;
6449
6450 l2cap_process_reqseq(chan, control->reqseq);
6451
6452 if (!skb_queue_empty(&chan->tx_q))
6453 chan->tx_send_head = skb_peek(&chan->tx_q);
6454 else
6455 chan->tx_send_head = NULL;
6456
6457 /* Rewind next_tx_seq to the point expected
6458 * by the receiver.
6459 */
6460 chan->next_tx_seq = control->reqseq;
6461 chan->unacked_frames = 0;
6462
6463 err = l2cap_finish_move(chan);
6464 if (err)
6465 return err;
6466
6467 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6468 l2cap_send_i_or_rr_or_rnr(chan);
6469
6470 if (event == L2CAP_EV_RECV_IFRAME)
6471 return -EPROTO;
6472
6473 return l2cap_rx_state_recv(chan, control, NULL, event);
6474 }
6475
6476 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6477 struct l2cap_ctrl *control,
6478 struct sk_buff *skb, u8 event)
6479 {
6480 int err;
6481
6482 if (!control->final)
6483 return -EPROTO;
6484
6485 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6486
6487 chan->rx_state = L2CAP_RX_STATE_RECV;
6488 l2cap_process_reqseq(chan, control->reqseq);
6489
6490 if (!skb_queue_empty(&chan->tx_q))
6491 chan->tx_send_head = skb_peek(&chan->tx_q);
6492 else
6493 chan->tx_send_head = NULL;
6494
6495 /* Rewind next_tx_seq to the point expected
6496 * by the receiver.
6497 */
6498 chan->next_tx_seq = control->reqseq;
6499 chan->unacked_frames = 0;
6500
6501 if (chan->hs_hcon)
6502 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6503 else
6504 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6505
6506 err = l2cap_resegment(chan);
6507
6508 if (!err)
6509 err = l2cap_rx_state_recv(chan, control, skb, event);
6510
6511 return err;
6512 }
6513
6514 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6515 {
6516 /* Make sure reqseq is for a packet that has been sent but not acked */
6517 u16 unacked;
6518
6519 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6520 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6521 }
6522
6523 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6524 struct sk_buff *skb, u8 event)
6525 {
6526 int err = 0;
6527
6528 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6529 control, skb, event, chan->rx_state);
6530
6531 if (__valid_reqseq(chan, control->reqseq)) {
6532 switch (chan->rx_state) {
6533 case L2CAP_RX_STATE_RECV:
6534 err = l2cap_rx_state_recv(chan, control, skb, event);
6535 break;
6536 case L2CAP_RX_STATE_SREJ_SENT:
6537 err = l2cap_rx_state_srej_sent(chan, control, skb,
6538 event);
6539 break;
6540 case L2CAP_RX_STATE_WAIT_P:
6541 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6542 break;
6543 case L2CAP_RX_STATE_WAIT_F:
6544 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6545 break;
6546 default:
6547 /* shut it down */
6548 break;
6549 }
6550 } else {
6551 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6552 control->reqseq, chan->next_tx_seq,
6553 chan->expected_ack_seq);
6554 l2cap_send_disconn_req(chan, ECONNRESET);
6555 }
6556
6557 return err;
6558 }
6559
6560 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6561 struct sk_buff *skb)
6562 {
6563 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6564 chan->rx_state);
6565
6566 if (l2cap_classify_txseq(chan, control->txseq) ==
6567 L2CAP_TXSEQ_EXPECTED) {
6568 l2cap_pass_to_tx(chan, control);
6569
6570 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6571 __next_seq(chan, chan->buffer_seq));
6572
6573 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6574
6575 l2cap_reassemble_sdu(chan, skb, control);
6576 } else {
6577 if (chan->sdu) {
6578 kfree_skb(chan->sdu);
6579 chan->sdu = NULL;
6580 }
6581 chan->sdu_last_frag = NULL;
6582 chan->sdu_len = 0;
6583
6584 if (skb) {
6585 BT_DBG("Freeing %p", skb);
6586 kfree_skb(skb);
6587 }
6588 }
6589
6590 chan->last_acked_seq = control->txseq;
6591 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6592
6593 return 0;
6594 }
6595
6596 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6597 {
6598 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6599 u16 len;
6600 u8 event;
6601
6602 __unpack_control(chan, skb);
6603
6604 len = skb->len;
6605
6606 /*
6607 * We can just drop the corrupted I-frame here.
6608 * Receiver will miss it and start proper recovery
6609 * procedures and ask for retransmission.
6610 */
6611 if (l2cap_check_fcs(chan, skb))
6612 goto drop;
6613
6614 if (!control->sframe && control->sar == L2CAP_SAR_START)
6615 len -= L2CAP_SDULEN_SIZE;
6616
6617 if (chan->fcs == L2CAP_FCS_CRC16)
6618 len -= L2CAP_FCS_SIZE;
6619
6620 if (len > chan->mps) {
6621 l2cap_send_disconn_req(chan, ECONNRESET);
6622 goto drop;
6623 }
6624
6625 if ((chan->mode == L2CAP_MODE_ERTM ||
6626 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6627 goto drop;
6628
6629 if (!control->sframe) {
6630 int err;
6631
6632 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6633 control->sar, control->reqseq, control->final,
6634 control->txseq);
6635
6636 /* Validate F-bit - F=0 always valid, F=1 only
6637 * valid in TX WAIT_F
6638 */
6639 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6640 goto drop;
6641
6642 if (chan->mode != L2CAP_MODE_STREAMING) {
6643 event = L2CAP_EV_RECV_IFRAME;
6644 err = l2cap_rx(chan, control, skb, event);
6645 } else {
6646 err = l2cap_stream_rx(chan, control, skb);
6647 }
6648
6649 if (err)
6650 l2cap_send_disconn_req(chan, ECONNRESET);
6651 } else {
6652 const u8 rx_func_to_event[4] = {
6653 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6654 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6655 };
6656
6657 /* Only I-frames are expected in streaming mode */
6658 if (chan->mode == L2CAP_MODE_STREAMING)
6659 goto drop;
6660
6661 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6662 control->reqseq, control->final, control->poll,
6663 control->super);
6664
6665 if (len != 0) {
6666 BT_ERR("Trailing bytes: %d in sframe", len);
6667 l2cap_send_disconn_req(chan, ECONNRESET);
6668 goto drop;
6669 }
6670
6671 /* Validate F and P bits */
6672 if (control->final && (control->poll ||
6673 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6674 goto drop;
6675
6676 event = rx_func_to_event[control->super];
6677 if (l2cap_rx(chan, control, skb, event))
6678 l2cap_send_disconn_req(chan, ECONNRESET);
6679 }
6680
6681 return 0;
6682
6683 drop:
6684 kfree_skb(skb);
6685 return 0;
6686 }
6687
6688 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6689 {
6690 struct l2cap_conn *conn = chan->conn;
6691 struct l2cap_le_credits pkt;
6692 u16 return_credits;
6693
6694 /* We return more credits to the sender only after the amount of
6695 * credits falls below half of the initial amount.
6696 */
6697 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6698 return;
6699
6700 return_credits = le_max_credits - chan->rx_credits;
6701
6702 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6703
6704 chan->rx_credits += return_credits;
6705
6706 pkt.cid = cpu_to_le16(chan->scid);
6707 pkt.credits = cpu_to_le16(return_credits);
6708
6709 chan->ident = l2cap_get_ident(conn);
6710
6711 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6712 }
6713
6714 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6715 {
6716 int err;
6717
6718 if (!chan->rx_credits) {
6719 BT_ERR("No credits to receive LE L2CAP data");
6720 l2cap_send_disconn_req(chan, ECONNRESET);
6721 return -ENOBUFS;
6722 }
6723
6724 if (chan->imtu < skb->len) {
6725 BT_ERR("Too big LE L2CAP PDU");
6726 return -ENOBUFS;
6727 }
6728
6729 chan->rx_credits--;
6730 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6731
6732 l2cap_chan_le_send_credits(chan);
6733
6734 err = 0;
6735
6736 if (!chan->sdu) {
6737 u16 sdu_len;
6738
6739 sdu_len = get_unaligned_le16(skb->data);
6740 skb_pull(skb, L2CAP_SDULEN_SIZE);
6741
6742 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6743 sdu_len, skb->len, chan->imtu);
6744
6745 if (sdu_len > chan->imtu) {
6746 BT_ERR("Too big LE L2CAP SDU length received");
6747 err = -EMSGSIZE;
6748 goto failed;
6749 }
6750
6751 if (skb->len > sdu_len) {
6752 BT_ERR("Too much LE L2CAP data received");
6753 err = -EINVAL;
6754 goto failed;
6755 }
6756
6757 if (skb->len == sdu_len)
6758 return chan->ops->recv(chan, skb);
6759
6760 chan->sdu = skb;
6761 chan->sdu_len = sdu_len;
6762 chan->sdu_last_frag = skb;
6763
6764 return 0;
6765 }
6766
6767 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6768 chan->sdu->len, skb->len, chan->sdu_len);
6769
6770 if (chan->sdu->len + skb->len > chan->sdu_len) {
6771 BT_ERR("Too much LE L2CAP data received");
6772 err = -EINVAL;
6773 goto failed;
6774 }
6775
6776 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6777 skb = NULL;
6778
6779 if (chan->sdu->len == chan->sdu_len) {
6780 err = chan->ops->recv(chan, chan->sdu);
6781 if (!err) {
6782 chan->sdu = NULL;
6783 chan->sdu_last_frag = NULL;
6784 chan->sdu_len = 0;
6785 }
6786 }
6787
6788 failed:
6789 if (err) {
6790 kfree_skb(skb);
6791 kfree_skb(chan->sdu);
6792 chan->sdu = NULL;
6793 chan->sdu_last_frag = NULL;
6794 chan->sdu_len = 0;
6795 }
6796
6797 /* We can't return an error here since we took care of the skb
6798 * freeing internally. An error return would cause the caller to
6799 * do a double-free of the skb.
6800 */
6801 return 0;
6802 }
6803
6804 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6805 struct sk_buff *skb)
6806 {
6807 struct l2cap_chan *chan;
6808
6809 chan = l2cap_get_chan_by_scid(conn, cid);
6810 if (!chan) {
6811 if (cid == L2CAP_CID_A2MP) {
6812 chan = a2mp_channel_create(conn, skb);
6813 if (!chan) {
6814 kfree_skb(skb);
6815 return;
6816 }
6817
6818 l2cap_chan_lock(chan);
6819 } else {
6820 BT_DBG("unknown cid 0x%4.4x", cid);
6821 /* Drop packet and return */
6822 kfree_skb(skb);
6823 return;
6824 }
6825 }
6826
6827 BT_DBG("chan %p, len %d", chan, skb->len);
6828
6829 /* If we receive data on a fixed channel before the info req/rsp
6830 * procdure is done simply assume that the channel is supported
6831 * and mark it as ready.
6832 */
6833 if (chan->chan_type == L2CAP_CHAN_FIXED)
6834 l2cap_chan_ready(chan);
6835
6836 if (chan->state != BT_CONNECTED)
6837 goto drop;
6838
6839 switch (chan->mode) {
6840 case L2CAP_MODE_LE_FLOWCTL:
6841 if (l2cap_le_data_rcv(chan, skb) < 0)
6842 goto drop;
6843
6844 goto done;
6845
6846 case L2CAP_MODE_BASIC:
6847 /* If socket recv buffers overflows we drop data here
6848 * which is *bad* because L2CAP has to be reliable.
6849 * But we don't have any other choice. L2CAP doesn't
6850 * provide flow control mechanism. */
6851
6852 if (chan->imtu < skb->len) {
6853 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6854 goto drop;
6855 }
6856
6857 if (!chan->ops->recv(chan, skb))
6858 goto done;
6859 break;
6860
6861 case L2CAP_MODE_ERTM:
6862 case L2CAP_MODE_STREAMING:
6863 l2cap_data_rcv(chan, skb);
6864 goto done;
6865
6866 default:
6867 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6868 break;
6869 }
6870
6871 drop:
6872 kfree_skb(skb);
6873
6874 done:
6875 l2cap_chan_unlock(chan);
6876 }
6877
6878 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6879 struct sk_buff *skb)
6880 {
6881 struct hci_conn *hcon = conn->hcon;
6882 struct l2cap_chan *chan;
6883
6884 if (hcon->type != ACL_LINK)
6885 goto free_skb;
6886
6887 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6888 ACL_LINK);
6889 if (!chan)
6890 goto free_skb;
6891
6892 BT_DBG("chan %p, len %d", chan, skb->len);
6893
6894 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6895 goto drop;
6896
6897 if (chan->imtu < skb->len)
6898 goto drop;
6899
6900 /* Store remote BD_ADDR and PSM for msg_name */
6901 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6902 bt_cb(skb)->l2cap.psm = psm;
6903
6904 if (!chan->ops->recv(chan, skb)) {
6905 l2cap_chan_put(chan);
6906 return;
6907 }
6908
6909 drop:
6910 l2cap_chan_put(chan);
6911 free_skb:
6912 kfree_skb(skb);
6913 }
6914
6915 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6916 {
6917 struct l2cap_hdr *lh = (void *) skb->data;
6918 struct hci_conn *hcon = conn->hcon;
6919 u16 cid, len;
6920 __le16 psm;
6921
6922 if (hcon->state != BT_CONNECTED) {
6923 BT_DBG("queueing pending rx skb");
6924 skb_queue_tail(&conn->pending_rx, skb);
6925 return;
6926 }
6927
6928 skb_pull(skb, L2CAP_HDR_SIZE);
6929 cid = __le16_to_cpu(lh->cid);
6930 len = __le16_to_cpu(lh->len);
6931
6932 if (len != skb->len) {
6933 kfree_skb(skb);
6934 return;
6935 }
6936
6937 /* Since we can't actively block incoming LE connections we must
6938 * at least ensure that we ignore incoming data from them.
6939 */
6940 if (hcon->type == LE_LINK &&
6941 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6942 bdaddr_dst_type(hcon))) {
6943 kfree_skb(skb);
6944 return;
6945 }
6946
6947 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6948
6949 switch (cid) {
6950 case L2CAP_CID_SIGNALING:
6951 l2cap_sig_channel(conn, skb);
6952 break;
6953
6954 case L2CAP_CID_CONN_LESS:
6955 psm = get_unaligned((__le16 *) skb->data);
6956 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6957 l2cap_conless_channel(conn, psm, skb);
6958 break;
6959
6960 case L2CAP_CID_LE_SIGNALING:
6961 l2cap_le_sig_channel(conn, skb);
6962 break;
6963
6964 default:
6965 l2cap_data_channel(conn, cid, skb);
6966 break;
6967 }
6968 }
6969
6970 static void process_pending_rx(struct work_struct *work)
6971 {
6972 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6973 pending_rx_work);
6974 struct sk_buff *skb;
6975
6976 BT_DBG("");
6977
6978 while ((skb = skb_dequeue(&conn->pending_rx)))
6979 l2cap_recv_frame(conn, skb);
6980 }
6981
6982 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6983 {
6984 struct l2cap_conn *conn = hcon->l2cap_data;
6985 struct hci_chan *hchan;
6986
6987 if (conn)
6988 return conn;
6989
6990 hchan = hci_chan_create(hcon);
6991 if (!hchan)
6992 return NULL;
6993
6994 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6995 if (!conn) {
6996 hci_chan_del(hchan);
6997 return NULL;
6998 }
6999
7000 kref_init(&conn->ref);
7001 hcon->l2cap_data = conn;
7002 conn->hcon = hci_conn_get(hcon);
7003 conn->hchan = hchan;
7004
7005 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7006
7007 switch (hcon->type) {
7008 case LE_LINK:
7009 if (hcon->hdev->le_mtu) {
7010 conn->mtu = hcon->hdev->le_mtu;
7011 break;
7012 }
7013 /* fall through */
7014 default:
7015 conn->mtu = hcon->hdev->acl_mtu;
7016 break;
7017 }
7018
7019 conn->feat_mask = 0;
7020
7021 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7022
7023 if (hcon->type == ACL_LINK &&
7024 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7025 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7026
7027 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7028 (bredr_sc_enabled(hcon->hdev) ||
7029 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7030 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7031
7032 mutex_init(&conn->ident_lock);
7033 mutex_init(&conn->chan_lock);
7034
7035 INIT_LIST_HEAD(&conn->chan_l);
7036 INIT_LIST_HEAD(&conn->users);
7037
7038 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7039
7040 skb_queue_head_init(&conn->pending_rx);
7041 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7042 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7043
7044 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7045
7046 return conn;
7047 }
7048
7049 static bool is_valid_psm(u16 psm, u8 dst_type) {
7050 if (!psm)
7051 return false;
7052
7053 if (bdaddr_type_is_le(dst_type))
7054 return (psm <= 0x00ff);
7055
7056 /* PSM must be odd and lsb of upper byte must be 0 */
7057 return ((psm & 0x0101) == 0x0001);
7058 }
7059
7060 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7061 bdaddr_t *dst, u8 dst_type)
7062 {
7063 struct l2cap_conn *conn;
7064 struct hci_conn *hcon;
7065 struct hci_dev *hdev;
7066 int err;
7067
7068 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7069 dst_type, __le16_to_cpu(psm));
7070
7071 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7072 if (!hdev)
7073 return -EHOSTUNREACH;
7074
7075 hci_dev_lock(hdev);
7076
7077 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7078 chan->chan_type != L2CAP_CHAN_RAW) {
7079 err = -EINVAL;
7080 goto done;
7081 }
7082
7083 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7084 err = -EINVAL;
7085 goto done;
7086 }
7087
7088 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7089 err = -EINVAL;
7090 goto done;
7091 }
7092
7093 switch (chan->mode) {
7094 case L2CAP_MODE_BASIC:
7095 break;
7096 case L2CAP_MODE_LE_FLOWCTL:
7097 l2cap_le_flowctl_init(chan);
7098 break;
7099 case L2CAP_MODE_ERTM:
7100 case L2CAP_MODE_STREAMING:
7101 if (!disable_ertm)
7102 break;
7103 /* fall through */
7104 default:
7105 err = -EOPNOTSUPP;
7106 goto done;
7107 }
7108
7109 switch (chan->state) {
7110 case BT_CONNECT:
7111 case BT_CONNECT2:
7112 case BT_CONFIG:
7113 /* Already connecting */
7114 err = 0;
7115 goto done;
7116
7117 case BT_CONNECTED:
7118 /* Already connected */
7119 err = -EISCONN;
7120 goto done;
7121
7122 case BT_OPEN:
7123 case BT_BOUND:
7124 /* Can connect */
7125 break;
7126
7127 default:
7128 err = -EBADFD;
7129 goto done;
7130 }
7131
7132 /* Set destination address and psm */
7133 bacpy(&chan->dst, dst);
7134 chan->dst_type = dst_type;
7135
7136 chan->psm = psm;
7137 chan->dcid = cid;
7138
7139 if (bdaddr_type_is_le(dst_type)) {
7140 /* Convert from L2CAP channel address type to HCI address type
7141 */
7142 if (dst_type == BDADDR_LE_PUBLIC)
7143 dst_type = ADDR_LE_DEV_PUBLIC;
7144 else
7145 dst_type = ADDR_LE_DEV_RANDOM;
7146
7147 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7148 hcon = hci_connect_le(hdev, dst, dst_type,
7149 chan->sec_level,
7150 HCI_LE_CONN_TIMEOUT,
7151 HCI_ROLE_SLAVE);
7152 else
7153 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7154 chan->sec_level,
7155 HCI_LE_CONN_TIMEOUT);
7156
7157 } else {
7158 u8 auth_type = l2cap_get_auth_type(chan);
7159 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7160 }
7161
7162 if (IS_ERR(hcon)) {
7163 err = PTR_ERR(hcon);
7164 goto done;
7165 }
7166
7167 conn = l2cap_conn_add(hcon);
7168 if (!conn) {
7169 hci_conn_drop(hcon);
7170 err = -ENOMEM;
7171 goto done;
7172 }
7173
7174 mutex_lock(&conn->chan_lock);
7175 l2cap_chan_lock(chan);
7176
7177 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7178 hci_conn_drop(hcon);
7179 err = -EBUSY;
7180 goto chan_unlock;
7181 }
7182
7183 /* Update source addr of the socket */
7184 bacpy(&chan->src, &hcon->src);
7185 chan->src_type = bdaddr_src_type(hcon);
7186
7187 __l2cap_chan_add(conn, chan);
7188
7189 /* l2cap_chan_add takes its own ref so we can drop this one */
7190 hci_conn_drop(hcon);
7191
7192 l2cap_state_change(chan, BT_CONNECT);
7193 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7194
7195 /* Release chan->sport so that it can be reused by other
7196 * sockets (as it's only used for listening sockets).
7197 */
7198 write_lock(&chan_list_lock);
7199 chan->sport = 0;
7200 write_unlock(&chan_list_lock);
7201
7202 if (hcon->state == BT_CONNECTED) {
7203 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7204 __clear_chan_timer(chan);
7205 if (l2cap_chan_check_security(chan, true))
7206 l2cap_state_change(chan, BT_CONNECTED);
7207 } else
7208 l2cap_do_start(chan);
7209 }
7210
7211 err = 0;
7212
7213 chan_unlock:
7214 l2cap_chan_unlock(chan);
7215 mutex_unlock(&conn->chan_lock);
7216 done:
7217 hci_dev_unlock(hdev);
7218 hci_dev_put(hdev);
7219 return err;
7220 }
7221 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7222
7223 /* ---- L2CAP interface with lower layer (HCI) ---- */
7224
7225 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7226 {
7227 int exact = 0, lm1 = 0, lm2 = 0;
7228 struct l2cap_chan *c;
7229
7230 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7231
7232 /* Find listening sockets and check their link_mode */
7233 read_lock(&chan_list_lock);
7234 list_for_each_entry(c, &chan_list, global_l) {
7235 if (c->state != BT_LISTEN)
7236 continue;
7237
7238 if (!bacmp(&c->src, &hdev->bdaddr)) {
7239 lm1 |= HCI_LM_ACCEPT;
7240 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7241 lm1 |= HCI_LM_MASTER;
7242 exact++;
7243 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7244 lm2 |= HCI_LM_ACCEPT;
7245 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7246 lm2 |= HCI_LM_MASTER;
7247 }
7248 }
7249 read_unlock(&chan_list_lock);
7250
7251 return exact ? lm1 : lm2;
7252 }
7253
7254 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7255 * from an existing channel in the list or from the beginning of the
7256 * global list (by passing NULL as first parameter).
7257 */
7258 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7259 struct hci_conn *hcon)
7260 {
7261 u8 src_type = bdaddr_src_type(hcon);
7262
7263 read_lock(&chan_list_lock);
7264
7265 if (c)
7266 c = list_next_entry(c, global_l);
7267 else
7268 c = list_entry(chan_list.next, typeof(*c), global_l);
7269
7270 list_for_each_entry_from(c, &chan_list, global_l) {
7271 if (c->chan_type != L2CAP_CHAN_FIXED)
7272 continue;
7273 if (c->state != BT_LISTEN)
7274 continue;
7275 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7276 continue;
7277 if (src_type != c->src_type)
7278 continue;
7279
7280 l2cap_chan_hold(c);
7281 read_unlock(&chan_list_lock);
7282 return c;
7283 }
7284
7285 read_unlock(&chan_list_lock);
7286
7287 return NULL;
7288 }
7289
7290 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7291 {
7292 struct hci_dev *hdev = hcon->hdev;
7293 struct l2cap_conn *conn;
7294 struct l2cap_chan *pchan;
7295 u8 dst_type;
7296
7297 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7298 return;
7299
7300 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7301
7302 if (status) {
7303 l2cap_conn_del(hcon, bt_to_errno(status));
7304 return;
7305 }
7306
7307 conn = l2cap_conn_add(hcon);
7308 if (!conn)
7309 return;
7310
7311 dst_type = bdaddr_dst_type(hcon);
7312
7313 /* If device is blocked, do not create channels for it */
7314 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7315 return;
7316
7317 /* Find fixed channels and notify them of the new connection. We
7318 * use multiple individual lookups, continuing each time where
7319 * we left off, because the list lock would prevent calling the
7320 * potentially sleeping l2cap_chan_lock() function.
7321 */
7322 pchan = l2cap_global_fixed_chan(NULL, hcon);
7323 while (pchan) {
7324 struct l2cap_chan *chan, *next;
7325
7326 /* Client fixed channels should override server ones */
7327 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7328 goto next;
7329
7330 l2cap_chan_lock(pchan);
7331 chan = pchan->ops->new_connection(pchan);
7332 if (chan) {
7333 bacpy(&chan->src, &hcon->src);
7334 bacpy(&chan->dst, &hcon->dst);
7335 chan->src_type = bdaddr_src_type(hcon);
7336 chan->dst_type = dst_type;
7337
7338 __l2cap_chan_add(conn, chan);
7339 }
7340
7341 l2cap_chan_unlock(pchan);
7342 next:
7343 next = l2cap_global_fixed_chan(pchan, hcon);
7344 l2cap_chan_put(pchan);
7345 pchan = next;
7346 }
7347
7348 l2cap_conn_ready(conn);
7349 }
7350
7351 int l2cap_disconn_ind(struct hci_conn *hcon)
7352 {
7353 struct l2cap_conn *conn = hcon->l2cap_data;
7354
7355 BT_DBG("hcon %p", hcon);
7356
7357 if (!conn)
7358 return HCI_ERROR_REMOTE_USER_TERM;
7359 return conn->disc_reason;
7360 }
7361
7362 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7363 {
7364 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7365 return;
7366
7367 BT_DBG("hcon %p reason %d", hcon, reason);
7368
7369 l2cap_conn_del(hcon, bt_to_errno(reason));
7370 }
7371
7372 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7373 {
7374 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7375 return;
7376
7377 if (encrypt == 0x00) {
7378 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7379 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7380 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7381 chan->sec_level == BT_SECURITY_FIPS)
7382 l2cap_chan_close(chan, ECONNREFUSED);
7383 } else {
7384 if (chan->sec_level == BT_SECURITY_MEDIUM)
7385 __clear_chan_timer(chan);
7386 }
7387 }
7388
7389 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7390 {
7391 struct l2cap_conn *conn = hcon->l2cap_data;
7392 struct l2cap_chan *chan;
7393
7394 if (!conn)
7395 return;
7396
7397 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7398
7399 mutex_lock(&conn->chan_lock);
7400
7401 list_for_each_entry(chan, &conn->chan_l, list) {
7402 l2cap_chan_lock(chan);
7403
7404 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7405 state_to_string(chan->state));
7406
7407 if (chan->scid == L2CAP_CID_A2MP) {
7408 l2cap_chan_unlock(chan);
7409 continue;
7410 }
7411
7412 if (!status && encrypt)
7413 chan->sec_level = hcon->sec_level;
7414
7415 if (!__l2cap_no_conn_pending(chan)) {
7416 l2cap_chan_unlock(chan);
7417 continue;
7418 }
7419
7420 if (!status && (chan->state == BT_CONNECTED ||
7421 chan->state == BT_CONFIG)) {
7422 chan->ops->resume(chan);
7423 l2cap_check_encryption(chan, encrypt);
7424 l2cap_chan_unlock(chan);
7425 continue;
7426 }
7427
7428 if (chan->state == BT_CONNECT) {
7429 if (!status)
7430 l2cap_start_connection(chan);
7431 else
7432 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7433 } else if (chan->state == BT_CONNECT2 &&
7434 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7435 struct l2cap_conn_rsp rsp;
7436 __u16 res, stat;
7437
7438 if (!status) {
7439 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7440 res = L2CAP_CR_PEND;
7441 stat = L2CAP_CS_AUTHOR_PEND;
7442 chan->ops->defer(chan);
7443 } else {
7444 l2cap_state_change(chan, BT_CONFIG);
7445 res = L2CAP_CR_SUCCESS;
7446 stat = L2CAP_CS_NO_INFO;
7447 }
7448 } else {
7449 l2cap_state_change(chan, BT_DISCONN);
7450 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7451 res = L2CAP_CR_SEC_BLOCK;
7452 stat = L2CAP_CS_NO_INFO;
7453 }
7454
7455 rsp.scid = cpu_to_le16(chan->dcid);
7456 rsp.dcid = cpu_to_le16(chan->scid);
7457 rsp.result = cpu_to_le16(res);
7458 rsp.status = cpu_to_le16(stat);
7459 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7460 sizeof(rsp), &rsp);
7461
7462 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7463 res == L2CAP_CR_SUCCESS) {
7464 char buf[128];
7465 set_bit(CONF_REQ_SENT, &chan->conf_state);
7466 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7467 L2CAP_CONF_REQ,
7468 l2cap_build_conf_req(chan, buf),
7469 buf);
7470 chan->num_conf_req++;
7471 }
7472 }
7473
7474 l2cap_chan_unlock(chan);
7475 }
7476
7477 mutex_unlock(&conn->chan_lock);
7478 }
7479
7480 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7481 {
7482 struct l2cap_conn *conn = hcon->l2cap_data;
7483 struct l2cap_hdr *hdr;
7484 int len;
7485
7486 /* For AMP controller do not create l2cap conn */
7487 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7488 goto drop;
7489
7490 if (!conn)
7491 conn = l2cap_conn_add(hcon);
7492
7493 if (!conn)
7494 goto drop;
7495
7496 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7497
7498 switch (flags) {
7499 case ACL_START:
7500 case ACL_START_NO_FLUSH:
7501 case ACL_COMPLETE:
7502 if (conn->rx_len) {
7503 BT_ERR("Unexpected start frame (len %d)", skb->len);
7504 kfree_skb(conn->rx_skb);
7505 conn->rx_skb = NULL;
7506 conn->rx_len = 0;
7507 l2cap_conn_unreliable(conn, ECOMM);
7508 }
7509
7510 /* Start fragment always begin with Basic L2CAP header */
7511 if (skb->len < L2CAP_HDR_SIZE) {
7512 BT_ERR("Frame is too short (len %d)", skb->len);
7513 l2cap_conn_unreliable(conn, ECOMM);
7514 goto drop;
7515 }
7516
7517 hdr = (struct l2cap_hdr *) skb->data;
7518 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7519
7520 if (len == skb->len) {
7521 /* Complete frame received */
7522 l2cap_recv_frame(conn, skb);
7523 return;
7524 }
7525
7526 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7527
7528 if (skb->len > len) {
7529 BT_ERR("Frame is too long (len %d, expected len %d)",
7530 skb->len, len);
7531 l2cap_conn_unreliable(conn, ECOMM);
7532 goto drop;
7533 }
7534
7535 /* Allocate skb for the complete frame (with header) */
7536 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7537 if (!conn->rx_skb)
7538 goto drop;
7539
7540 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7541 skb->len);
7542 conn->rx_len = len - skb->len;
7543 break;
7544
7545 case ACL_CONT:
7546 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7547
7548 if (!conn->rx_len) {
7549 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7550 l2cap_conn_unreliable(conn, ECOMM);
7551 goto drop;
7552 }
7553
7554 if (skb->len > conn->rx_len) {
7555 BT_ERR("Fragment is too long (len %d, expected %d)",
7556 skb->len, conn->rx_len);
7557 kfree_skb(conn->rx_skb);
7558 conn->rx_skb = NULL;
7559 conn->rx_len = 0;
7560 l2cap_conn_unreliable(conn, ECOMM);
7561 goto drop;
7562 }
7563
7564 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7565 skb->len);
7566 conn->rx_len -= skb->len;
7567
7568 if (!conn->rx_len) {
7569 /* Complete frame received. l2cap_recv_frame
7570 * takes ownership of the skb so set the global
7571 * rx_skb pointer to NULL first.
7572 */
7573 struct sk_buff *rx_skb = conn->rx_skb;
7574 conn->rx_skb = NULL;
7575 l2cap_recv_frame(conn, rx_skb);
7576 }
7577 break;
7578 }
7579
7580 drop:
7581 kfree_skb(skb);
7582 }
7583
7584 static struct hci_cb l2cap_cb = {
7585 .name = "L2CAP",
7586 .connect_cfm = l2cap_connect_cfm,
7587 .disconn_cfm = l2cap_disconn_cfm,
7588 .security_cfm = l2cap_security_cfm,
7589 };
7590
7591 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7592 {
7593 struct l2cap_chan *c;
7594
7595 read_lock(&chan_list_lock);
7596
7597 list_for_each_entry(c, &chan_list, global_l) {
7598 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7599 &c->src, c->src_type, &c->dst, c->dst_type,
7600 c->state, __le16_to_cpu(c->psm),
7601 c->scid, c->dcid, c->imtu, c->omtu,
7602 c->sec_level, c->mode);
7603 }
7604
7605 read_unlock(&chan_list_lock);
7606
7607 return 0;
7608 }
7609
7610 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7611 {
7612 return single_open(file, l2cap_debugfs_show, inode->i_private);
7613 }
7614
7615 static const struct file_operations l2cap_debugfs_fops = {
7616 .open = l2cap_debugfs_open,
7617 .read = seq_read,
7618 .llseek = seq_lseek,
7619 .release = single_release,
7620 };
7621
7622 static struct dentry *l2cap_debugfs;
7623
7624 int __init l2cap_init(void)
7625 {
7626 int err;
7627
7628 err = l2cap_init_sockets();
7629 if (err < 0)
7630 return err;
7631
7632 hci_register_cb(&l2cap_cb);
7633
7634 if (IS_ERR_OR_NULL(bt_debugfs))
7635 return 0;
7636
7637 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7638 NULL, &l2cap_debugfs_fops);
7639
7640 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7641 &le_max_credits);
7642 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7643 &le_default_mps);
7644
7645 return 0;
7646 }
7647
7648 void l2cap_exit(void)
7649 {
7650 debugfs_remove(l2cap_debugfs);
7651 hci_unregister_cb(&l2cap_cb);
7652 l2cap_cleanup_sockets();
7653 }
7654
7655 module_param(disable_ertm, bool, 0644);
7656 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");