]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/bluetooth/l2cap_core.c
Merge branch 'rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41
42 bool disable_ertm;
43
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
57
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
60
61 /* ---- L2CAP channels ---- */
62
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 struct l2cap_chan *c;
66
67 list_for_each_entry(c, &conn->chan_l, list) {
68 if (c->dcid == cid)
69 return c;
70 }
71 return NULL;
72 }
73
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 struct l2cap_chan *c;
77
78 list_for_each_entry(c, &conn->chan_l, list) {
79 if (c->scid == cid)
80 return c;
81 }
82 return NULL;
83 }
84
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 struct l2cap_chan *c;
90
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
93 if (c)
94 l2cap_chan_lock(c);
95 mutex_unlock(&conn->chan_lock);
96
97 return c;
98 }
99
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 struct l2cap_chan *c;
103
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
106 return c;
107 }
108 return NULL;
109 }
110
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 struct l2cap_chan *c;
114
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 return c;
118 }
119 return NULL;
120 }
121
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 int err;
125
126 write_lock(&chan_list_lock);
127
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 err = -EADDRINUSE;
130 goto done;
131 }
132
133 if (psm) {
134 chan->psm = psm;
135 chan->sport = psm;
136 err = 0;
137 } else {
138 u16 p;
139
140 err = -EINVAL;
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
145 err = 0;
146 break;
147 }
148 }
149
150 done:
151 write_unlock(&chan_list_lock);
152 return err;
153 }
154
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
156 {
157 write_lock(&chan_list_lock);
158
159 chan->scid = scid;
160
161 write_unlock(&chan_list_lock);
162
163 return 0;
164 }
165
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 u16 cid = L2CAP_CID_DYN_START;
169
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
172 return cid;
173 }
174
175 return 0;
176 }
177
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
182
183 chan->state = state;
184 chan->ops->state_change(chan, state);
185 }
186
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 struct sock *sk = chan->sk;
190
191 lock_sock(sk);
192 __l2cap_state_change(chan, state);
193 release_sock(sk);
194 }
195
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 struct sock *sk = chan->sk;
199
200 sk->sk_err = err;
201 }
202
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 struct sock *sk = chan->sk;
206
207 lock_sock(sk);
208 __l2cap_chan_set_err(chan, err);
209 release_sock(sk);
210 }
211
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
218 }
219 }
220
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
227 }
228 }
229
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 u16 seq)
232 {
233 struct sk_buff *skb;
234
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
237 return skb;
238 }
239
240 return NULL;
241 }
242
243 /* ---- L2CAP sequence number lists ---- */
244
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
251 * allocs or frees.
252 */
253
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 size_t alloc_size, i;
257
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
261 */
262 alloc_size = roundup_pow_of_two(size);
263
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 if (!seq_list->list)
266 return -ENOMEM;
267
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273
274 return 0;
275 }
276
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 kfree(seq_list->list);
280 }
281
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 u16 seq)
284 {
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 u16 mask = seq_list->mask;
292
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 }
305 } else {
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
312 }
313
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
319 }
320 return seq;
321 }
322
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 u16 i;
332
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 u16 mask = seq_list->mask;
346
347 /* All appends happen in constant time */
348
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 return;
351
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
354 else
355 seq_list->list[seq_list->tail & mask] = seq;
356
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 chan_timer.work);
365 struct l2cap_conn *conn = chan->conn;
366 int reason;
367
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
372
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
378 else
379 reason = ETIMEDOUT;
380
381 l2cap_chan_close(chan, reason);
382
383 l2cap_chan_unlock(chan);
384
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
387
388 l2cap_chan_put(chan);
389 }
390
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 struct l2cap_chan *chan;
394
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 if (!chan)
397 return NULL;
398
399 mutex_init(&chan->lock);
400
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
404
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406
407 chan->state = BT_OPEN;
408
409 atomic_set(&chan->refcnt, 1);
410
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413
414 BT_DBG("chan %p", chan);
415
416 return chan;
417 }
418
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
420 {
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
424
425 l2cap_chan_put(chan);
426 }
427
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
429 {
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
435 chan->sec_level = BT_SECURITY_LOW;
436
437 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
438 }
439
440 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 {
442 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
443 __le16_to_cpu(chan->psm), chan->dcid);
444
445 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
446
447 chan->conn = conn;
448
449 switch (chan->chan_type) {
450 case L2CAP_CHAN_CONN_ORIENTED:
451 if (conn->hcon->type == LE_LINK) {
452 /* LE connection */
453 chan->omtu = L2CAP_DEFAULT_MTU;
454 chan->scid = L2CAP_CID_LE_DATA;
455 chan->dcid = L2CAP_CID_LE_DATA;
456 } else {
457 /* Alloc CID for connection-oriented socket */
458 chan->scid = l2cap_alloc_cid(conn);
459 chan->omtu = L2CAP_DEFAULT_MTU;
460 }
461 break;
462
463 case L2CAP_CHAN_CONN_LESS:
464 /* Connectionless socket */
465 chan->scid = L2CAP_CID_CONN_LESS;
466 chan->dcid = L2CAP_CID_CONN_LESS;
467 chan->omtu = L2CAP_DEFAULT_MTU;
468 break;
469
470 case L2CAP_CHAN_CONN_FIX_A2MP:
471 chan->scid = L2CAP_CID_A2MP;
472 chan->dcid = L2CAP_CID_A2MP;
473 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
474 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
475 break;
476
477 default:
478 /* Raw socket can send/recv signalling messages only */
479 chan->scid = L2CAP_CID_SIGNALING;
480 chan->dcid = L2CAP_CID_SIGNALING;
481 chan->omtu = L2CAP_DEFAULT_MTU;
482 }
483
484 chan->local_id = L2CAP_BESTEFFORT_ID;
485 chan->local_stype = L2CAP_SERV_BESTEFFORT;
486 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
487 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
488 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
489 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
490
491 l2cap_chan_hold(chan);
492
493 list_add(&chan->list, &conn->chan_l);
494 }
495
496 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
497 {
498 mutex_lock(&conn->chan_lock);
499 __l2cap_chan_add(conn, chan);
500 mutex_unlock(&conn->chan_lock);
501 }
502
503 void l2cap_chan_del(struct l2cap_chan *chan, int err)
504 {
505 struct l2cap_conn *conn = chan->conn;
506
507 __clear_chan_timer(chan);
508
509 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
510
511 if (conn) {
512 /* Delete from channel list */
513 list_del(&chan->list);
514
515 l2cap_chan_put(chan);
516
517 chan->conn = NULL;
518
519 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
520 hci_conn_put(conn->hcon);
521 }
522
523 if (chan->ops->teardown)
524 chan->ops->teardown(chan, err);
525
526 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
527 return;
528
529 switch(chan->mode) {
530 case L2CAP_MODE_BASIC:
531 break;
532
533 case L2CAP_MODE_ERTM:
534 __clear_retrans_timer(chan);
535 __clear_monitor_timer(chan);
536 __clear_ack_timer(chan);
537
538 skb_queue_purge(&chan->srej_q);
539
540 l2cap_seq_list_free(&chan->srej_list);
541 l2cap_seq_list_free(&chan->retrans_list);
542
543 /* fall through */
544
545 case L2CAP_MODE_STREAMING:
546 skb_queue_purge(&chan->tx_q);
547 break;
548 }
549
550 return;
551 }
552
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
554 {
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
557
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
560
561 switch (chan->state) {
562 case BT_LISTEN:
563 if (chan->ops->teardown)
564 chan->ops->teardown(chan, 0);
565 break;
566
567 case BT_CONNECTED:
568 case BT_CONFIG:
569 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
570 conn->hcon->type == ACL_LINK) {
571 __set_chan_timer(chan, sk->sk_sndtimeo);
572 l2cap_send_disconn_req(conn, chan, reason);
573 } else
574 l2cap_chan_del(chan, reason);
575 break;
576
577 case BT_CONNECT2:
578 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
579 conn->hcon->type == ACL_LINK) {
580 struct l2cap_conn_rsp rsp;
581 __u16 result;
582
583 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
584 result = L2CAP_CR_SEC_BLOCK;
585 else
586 result = L2CAP_CR_BAD_PSM;
587 l2cap_state_change(chan, BT_DISCONN);
588
589 rsp.scid = cpu_to_le16(chan->dcid);
590 rsp.dcid = cpu_to_le16(chan->scid);
591 rsp.result = cpu_to_le16(result);
592 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
593 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
594 sizeof(rsp), &rsp);
595 }
596
597 l2cap_chan_del(chan, reason);
598 break;
599
600 case BT_CONNECT:
601 case BT_DISCONN:
602 l2cap_chan_del(chan, reason);
603 break;
604
605 default:
606 if (chan->ops->teardown)
607 chan->ops->teardown(chan, 0);
608 break;
609 }
610 }
611
612 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
613 {
614 if (chan->chan_type == L2CAP_CHAN_RAW) {
615 switch (chan->sec_level) {
616 case BT_SECURITY_HIGH:
617 return HCI_AT_DEDICATED_BONDING_MITM;
618 case BT_SECURITY_MEDIUM:
619 return HCI_AT_DEDICATED_BONDING;
620 default:
621 return HCI_AT_NO_BONDING;
622 }
623 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
624 if (chan->sec_level == BT_SECURITY_LOW)
625 chan->sec_level = BT_SECURITY_SDP;
626
627 if (chan->sec_level == BT_SECURITY_HIGH)
628 return HCI_AT_NO_BONDING_MITM;
629 else
630 return HCI_AT_NO_BONDING;
631 } else {
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_GENERAL_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_GENERAL_BONDING;
637 default:
638 return HCI_AT_NO_BONDING;
639 }
640 }
641 }
642
643 /* Service level security */
644 int l2cap_chan_check_security(struct l2cap_chan *chan)
645 {
646 struct l2cap_conn *conn = chan->conn;
647 __u8 auth_type;
648
649 auth_type = l2cap_get_auth_type(chan);
650
651 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
652 }
653
654 static u8 l2cap_get_ident(struct l2cap_conn *conn)
655 {
656 u8 id;
657
658 /* Get next available identificator.
659 * 1 - 128 are used by kernel.
660 * 129 - 199 are reserved.
661 * 200 - 254 are used by utilities like l2ping, etc.
662 */
663
664 spin_lock(&conn->lock);
665
666 if (++conn->tx_ident > 128)
667 conn->tx_ident = 1;
668
669 id = conn->tx_ident;
670
671 spin_unlock(&conn->lock);
672
673 return id;
674 }
675
676 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
677 {
678 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
679 u8 flags;
680
681 BT_DBG("code 0x%2.2x", code);
682
683 if (!skb)
684 return;
685
686 if (lmp_no_flush_capable(conn->hcon->hdev))
687 flags = ACL_START_NO_FLUSH;
688 else
689 flags = ACL_START;
690
691 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
692 skb->priority = HCI_PRIO_MAX;
693
694 hci_send_acl(conn->hchan, skb, flags);
695 }
696
697 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
698 {
699 struct hci_conn *hcon = chan->conn->hcon;
700 u16 flags;
701
702 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
703 skb->priority);
704
705 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
706 lmp_no_flush_capable(hcon->hdev))
707 flags = ACL_START_NO_FLUSH;
708 else
709 flags = ACL_START;
710
711 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
712 hci_send_acl(chan->conn->hchan, skb, flags);
713 }
714
715 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
716 {
717 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
718 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
719
720 if (enh & L2CAP_CTRL_FRAME_TYPE) {
721 /* S-Frame */
722 control->sframe = 1;
723 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
724 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
725
726 control->sar = 0;
727 control->txseq = 0;
728 } else {
729 /* I-Frame */
730 control->sframe = 0;
731 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
732 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
733
734 control->poll = 0;
735 control->super = 0;
736 }
737 }
738
739 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
740 {
741 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
742 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
743
744 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
745 /* S-Frame */
746 control->sframe = 1;
747 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
748 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
749
750 control->sar = 0;
751 control->txseq = 0;
752 } else {
753 /* I-Frame */
754 control->sframe = 0;
755 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
756 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
757
758 control->poll = 0;
759 control->super = 0;
760 }
761 }
762
763 static inline void __unpack_control(struct l2cap_chan *chan,
764 struct sk_buff *skb)
765 {
766 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
767 __unpack_extended_control(get_unaligned_le32(skb->data),
768 &bt_cb(skb)->control);
769 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
770 } else {
771 __unpack_enhanced_control(get_unaligned_le16(skb->data),
772 &bt_cb(skb)->control);
773 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
774 }
775 }
776
777 static u32 __pack_extended_control(struct l2cap_ctrl *control)
778 {
779 u32 packed;
780
781 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
782 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
783
784 if (control->sframe) {
785 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
786 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
787 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
788 } else {
789 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
790 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
791 }
792
793 return packed;
794 }
795
796 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
797 {
798 u16 packed;
799
800 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
801 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
802
803 if (control->sframe) {
804 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
805 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
806 packed |= L2CAP_CTRL_FRAME_TYPE;
807 } else {
808 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
809 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
810 }
811
812 return packed;
813 }
814
815 static inline void __pack_control(struct l2cap_chan *chan,
816 struct l2cap_ctrl *control,
817 struct sk_buff *skb)
818 {
819 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
820 put_unaligned_le32(__pack_extended_control(control),
821 skb->data + L2CAP_HDR_SIZE);
822 } else {
823 put_unaligned_le16(__pack_enhanced_control(control),
824 skb->data + L2CAP_HDR_SIZE);
825 }
826 }
827
828 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
829 {
830 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
831 return L2CAP_EXT_HDR_SIZE;
832 else
833 return L2CAP_ENH_HDR_SIZE;
834 }
835
836 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
837 u32 control)
838 {
839 struct sk_buff *skb;
840 struct l2cap_hdr *lh;
841 int hlen = __ertm_hdr_size(chan);
842
843 if (chan->fcs == L2CAP_FCS_CRC16)
844 hlen += L2CAP_FCS_SIZE;
845
846 skb = bt_skb_alloc(hlen, GFP_KERNEL);
847
848 if (!skb)
849 return ERR_PTR(-ENOMEM);
850
851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
852 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
853 lh->cid = cpu_to_le16(chan->dcid);
854
855 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
856 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 else
858 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
859
860 if (chan->fcs == L2CAP_FCS_CRC16) {
861 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
862 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
863 }
864
865 skb->priority = HCI_PRIO_MAX;
866 return skb;
867 }
868
869 static void l2cap_send_sframe(struct l2cap_chan *chan,
870 struct l2cap_ctrl *control)
871 {
872 struct sk_buff *skb;
873 u32 control_field;
874
875 BT_DBG("chan %p, control %p", chan, control);
876
877 if (!control->sframe)
878 return;
879
880 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
881 !control->poll)
882 control->final = 1;
883
884 if (control->super == L2CAP_SUPER_RR)
885 clear_bit(CONN_RNR_SENT, &chan->conn_state);
886 else if (control->super == L2CAP_SUPER_RNR)
887 set_bit(CONN_RNR_SENT, &chan->conn_state);
888
889 if (control->super != L2CAP_SUPER_SREJ) {
890 chan->last_acked_seq = control->reqseq;
891 __clear_ack_timer(chan);
892 }
893
894 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
895 control->final, control->poll, control->super);
896
897 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
898 control_field = __pack_extended_control(control);
899 else
900 control_field = __pack_enhanced_control(control);
901
902 skb = l2cap_create_sframe_pdu(chan, control_field);
903 if (!IS_ERR(skb))
904 l2cap_do_send(chan, skb);
905 }
906
907 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908 {
909 struct l2cap_ctrl control;
910
911 BT_DBG("chan %p, poll %d", chan, poll);
912
913 memset(&control, 0, sizeof(control));
914 control.sframe = 1;
915 control.poll = poll;
916
917 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
918 control.super = L2CAP_SUPER_RNR;
919 else
920 control.super = L2CAP_SUPER_RR;
921
922 control.reqseq = chan->buffer_seq;
923 l2cap_send_sframe(chan, &control);
924 }
925
926 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
927 {
928 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
929 }
930
931 static void l2cap_send_conn_req(struct l2cap_chan *chan)
932 {
933 struct l2cap_conn *conn = chan->conn;
934 struct l2cap_conn_req req;
935
936 req.scid = cpu_to_le16(chan->scid);
937 req.psm = chan->psm;
938
939 chan->ident = l2cap_get_ident(conn);
940
941 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
942
943 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
944 }
945
946 static void l2cap_chan_ready(struct l2cap_chan *chan)
947 {
948 /* This clears all conf flags, including CONF_NOT_COMPLETE */
949 chan->conf_state = 0;
950 __clear_chan_timer(chan);
951
952 chan->state = BT_CONNECTED;
953
954 chan->ops->ready(chan);
955 }
956
957 static void l2cap_do_start(struct l2cap_chan *chan)
958 {
959 struct l2cap_conn *conn = chan->conn;
960
961 if (conn->hcon->type == LE_LINK) {
962 l2cap_chan_ready(chan);
963 return;
964 }
965
966 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
967 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
968 return;
969
970 if (l2cap_chan_check_security(chan) &&
971 __l2cap_no_conn_pending(chan))
972 l2cap_send_conn_req(chan);
973 } else {
974 struct l2cap_info_req req;
975 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
976
977 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
978 conn->info_ident = l2cap_get_ident(conn);
979
980 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
981
982 l2cap_send_cmd(conn, conn->info_ident,
983 L2CAP_INFO_REQ, sizeof(req), &req);
984 }
985 }
986
987 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
988 {
989 u32 local_feat_mask = l2cap_feat_mask;
990 if (!disable_ertm)
991 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
992
993 switch (mode) {
994 case L2CAP_MODE_ERTM:
995 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
996 case L2CAP_MODE_STREAMING:
997 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
998 default:
999 return 0x00;
1000 }
1001 }
1002
1003 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1004 {
1005 struct sock *sk = chan->sk;
1006 struct l2cap_disconn_req req;
1007
1008 if (!conn)
1009 return;
1010
1011 if (chan->mode == L2CAP_MODE_ERTM) {
1012 __clear_retrans_timer(chan);
1013 __clear_monitor_timer(chan);
1014 __clear_ack_timer(chan);
1015 }
1016
1017 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1018 __l2cap_state_change(chan, BT_DISCONN);
1019 return;
1020 }
1021
1022 req.dcid = cpu_to_le16(chan->dcid);
1023 req.scid = cpu_to_le16(chan->scid);
1024 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1025 L2CAP_DISCONN_REQ, sizeof(req), &req);
1026
1027 lock_sock(sk);
1028 __l2cap_state_change(chan, BT_DISCONN);
1029 __l2cap_chan_set_err(chan, err);
1030 release_sock(sk);
1031 }
1032
1033 /* ---- L2CAP connections ---- */
1034 static void l2cap_conn_start(struct l2cap_conn *conn)
1035 {
1036 struct l2cap_chan *chan, *tmp;
1037
1038 BT_DBG("conn %p", conn);
1039
1040 mutex_lock(&conn->chan_lock);
1041
1042 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1043 struct sock *sk = chan->sk;
1044
1045 l2cap_chan_lock(chan);
1046
1047 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1048 l2cap_chan_unlock(chan);
1049 continue;
1050 }
1051
1052 if (chan->state == BT_CONNECT) {
1053 if (!l2cap_chan_check_security(chan) ||
1054 !__l2cap_no_conn_pending(chan)) {
1055 l2cap_chan_unlock(chan);
1056 continue;
1057 }
1058
1059 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1060 && test_bit(CONF_STATE2_DEVICE,
1061 &chan->conf_state)) {
1062 l2cap_chan_close(chan, ECONNRESET);
1063 l2cap_chan_unlock(chan);
1064 continue;
1065 }
1066
1067 l2cap_send_conn_req(chan);
1068
1069 } else if (chan->state == BT_CONNECT2) {
1070 struct l2cap_conn_rsp rsp;
1071 char buf[128];
1072 rsp.scid = cpu_to_le16(chan->dcid);
1073 rsp.dcid = cpu_to_le16(chan->scid);
1074
1075 if (l2cap_chan_check_security(chan)) {
1076 lock_sock(sk);
1077 if (test_bit(BT_SK_DEFER_SETUP,
1078 &bt_sk(sk)->flags)) {
1079 struct sock *parent = bt_sk(sk)->parent;
1080 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1081 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1082 if (parent)
1083 parent->sk_data_ready(parent, 0);
1084
1085 } else {
1086 __l2cap_state_change(chan, BT_CONFIG);
1087 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1088 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1089 }
1090 release_sock(sk);
1091 } else {
1092 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1093 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1094 }
1095
1096 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1097 sizeof(rsp), &rsp);
1098
1099 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1100 rsp.result != L2CAP_CR_SUCCESS) {
1101 l2cap_chan_unlock(chan);
1102 continue;
1103 }
1104
1105 set_bit(CONF_REQ_SENT, &chan->conf_state);
1106 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1107 l2cap_build_conf_req(chan, buf), buf);
1108 chan->num_conf_req++;
1109 }
1110
1111 l2cap_chan_unlock(chan);
1112 }
1113
1114 mutex_unlock(&conn->chan_lock);
1115 }
1116
1117 /* Find socket with cid and source/destination bdaddr.
1118 * Returns closest match, locked.
1119 */
1120 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1121 bdaddr_t *src,
1122 bdaddr_t *dst)
1123 {
1124 struct l2cap_chan *c, *c1 = NULL;
1125
1126 read_lock(&chan_list_lock);
1127
1128 list_for_each_entry(c, &chan_list, global_l) {
1129 struct sock *sk = c->sk;
1130
1131 if (state && c->state != state)
1132 continue;
1133
1134 if (c->scid == cid) {
1135 int src_match, dst_match;
1136 int src_any, dst_any;
1137
1138 /* Exact match. */
1139 src_match = !bacmp(&bt_sk(sk)->src, src);
1140 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1141 if (src_match && dst_match) {
1142 read_unlock(&chan_list_lock);
1143 return c;
1144 }
1145
1146 /* Closest match */
1147 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1148 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1149 if ((src_match && dst_any) || (src_any && dst_match) ||
1150 (src_any && dst_any))
1151 c1 = c;
1152 }
1153 }
1154
1155 read_unlock(&chan_list_lock);
1156
1157 return c1;
1158 }
1159
1160 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1161 {
1162 struct sock *parent, *sk;
1163 struct l2cap_chan *chan, *pchan;
1164
1165 BT_DBG("");
1166
1167 /* Check if we have socket listening on cid */
1168 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1169 conn->src, conn->dst);
1170 if (!pchan)
1171 return;
1172
1173 parent = pchan->sk;
1174
1175 lock_sock(parent);
1176
1177 chan = pchan->ops->new_connection(pchan);
1178 if (!chan)
1179 goto clean;
1180
1181 sk = chan->sk;
1182
1183 hci_conn_hold(conn->hcon);
1184 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1185
1186 bacpy(&bt_sk(sk)->src, conn->src);
1187 bacpy(&bt_sk(sk)->dst, conn->dst);
1188
1189 bt_accept_enqueue(parent, sk);
1190
1191 l2cap_chan_add(conn, chan);
1192
1193 l2cap_chan_ready(chan);
1194
1195 clean:
1196 release_sock(parent);
1197 }
1198
1199 static void l2cap_conn_ready(struct l2cap_conn *conn)
1200 {
1201 struct l2cap_chan *chan;
1202 struct hci_conn *hcon = conn->hcon;
1203
1204 BT_DBG("conn %p", conn);
1205
1206 if (!hcon->out && hcon->type == LE_LINK)
1207 l2cap_le_conn_ready(conn);
1208
1209 if (hcon->out && hcon->type == LE_LINK)
1210 smp_conn_security(hcon, hcon->pending_sec_level);
1211
1212 mutex_lock(&conn->chan_lock);
1213
1214 list_for_each_entry(chan, &conn->chan_l, list) {
1215
1216 l2cap_chan_lock(chan);
1217
1218 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1219 l2cap_chan_unlock(chan);
1220 continue;
1221 }
1222
1223 if (hcon->type == LE_LINK) {
1224 if (smp_conn_security(hcon, chan->sec_level))
1225 l2cap_chan_ready(chan);
1226
1227 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 struct sock *sk = chan->sk;
1229 __clear_chan_timer(chan);
1230 lock_sock(sk);
1231 __l2cap_state_change(chan, BT_CONNECTED);
1232 sk->sk_state_change(sk);
1233 release_sock(sk);
1234
1235 } else if (chan->state == BT_CONNECT)
1236 l2cap_do_start(chan);
1237
1238 l2cap_chan_unlock(chan);
1239 }
1240
1241 mutex_unlock(&conn->chan_lock);
1242 }
1243
1244 /* Notify sockets that we cannot guaranty reliability anymore */
1245 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1246 {
1247 struct l2cap_chan *chan;
1248
1249 BT_DBG("conn %p", conn);
1250
1251 mutex_lock(&conn->chan_lock);
1252
1253 list_for_each_entry(chan, &conn->chan_l, list) {
1254 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1255 __l2cap_chan_set_err(chan, err);
1256 }
1257
1258 mutex_unlock(&conn->chan_lock);
1259 }
1260
1261 static void l2cap_info_timeout(struct work_struct *work)
1262 {
1263 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1264 info_timer.work);
1265
1266 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1267 conn->info_ident = 0;
1268
1269 l2cap_conn_start(conn);
1270 }
1271
1272 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1273 {
1274 struct l2cap_conn *conn = hcon->l2cap_data;
1275 struct l2cap_chan *chan, *l;
1276
1277 if (!conn)
1278 return;
1279
1280 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1281
1282 kfree_skb(conn->rx_skb);
1283
1284 mutex_lock(&conn->chan_lock);
1285
1286 /* Kill channels */
1287 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1288 l2cap_chan_hold(chan);
1289 l2cap_chan_lock(chan);
1290
1291 l2cap_chan_del(chan, err);
1292
1293 l2cap_chan_unlock(chan);
1294
1295 chan->ops->close(chan);
1296 l2cap_chan_put(chan);
1297 }
1298
1299 mutex_unlock(&conn->chan_lock);
1300
1301 hci_chan_del(conn->hchan);
1302
1303 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1304 cancel_delayed_work_sync(&conn->info_timer);
1305
1306 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1307 cancel_delayed_work_sync(&conn->security_timer);
1308 smp_chan_destroy(conn);
1309 }
1310
1311 hcon->l2cap_data = NULL;
1312 kfree(conn);
1313 }
1314
1315 static void security_timeout(struct work_struct *work)
1316 {
1317 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1318 security_timer.work);
1319
1320 BT_DBG("conn %p", conn);
1321
1322 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1323 smp_chan_destroy(conn);
1324 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1325 }
1326 }
1327
1328 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1329 {
1330 struct l2cap_conn *conn = hcon->l2cap_data;
1331 struct hci_chan *hchan;
1332
1333 if (conn || status)
1334 return conn;
1335
1336 hchan = hci_chan_create(hcon);
1337 if (!hchan)
1338 return NULL;
1339
1340 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1341 if (!conn) {
1342 hci_chan_del(hchan);
1343 return NULL;
1344 }
1345
1346 hcon->l2cap_data = conn;
1347 conn->hcon = hcon;
1348 conn->hchan = hchan;
1349
1350 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1351
1352 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1353 conn->mtu = hcon->hdev->le_mtu;
1354 else
1355 conn->mtu = hcon->hdev->acl_mtu;
1356
1357 conn->src = &hcon->hdev->bdaddr;
1358 conn->dst = &hcon->dst;
1359
1360 conn->feat_mask = 0;
1361
1362 spin_lock_init(&conn->lock);
1363 mutex_init(&conn->chan_lock);
1364
1365 INIT_LIST_HEAD(&conn->chan_l);
1366
1367 if (hcon->type == LE_LINK)
1368 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1369 else
1370 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1371
1372 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1373
1374 return conn;
1375 }
1376
1377 /* ---- Socket interface ---- */
1378
1379 /* Find socket with psm and source / destination bdaddr.
1380 * Returns closest match.
1381 */
1382 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1383 bdaddr_t *src,
1384 bdaddr_t *dst)
1385 {
1386 struct l2cap_chan *c, *c1 = NULL;
1387
1388 read_lock(&chan_list_lock);
1389
1390 list_for_each_entry(c, &chan_list, global_l) {
1391 struct sock *sk = c->sk;
1392
1393 if (state && c->state != state)
1394 continue;
1395
1396 if (c->psm == psm) {
1397 int src_match, dst_match;
1398 int src_any, dst_any;
1399
1400 /* Exact match. */
1401 src_match = !bacmp(&bt_sk(sk)->src, src);
1402 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1403 if (src_match && dst_match) {
1404 read_unlock(&chan_list_lock);
1405 return c;
1406 }
1407
1408 /* Closest match */
1409 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1410 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1411 if ((src_match && dst_any) || (src_any && dst_match) ||
1412 (src_any && dst_any))
1413 c1 = c;
1414 }
1415 }
1416
1417 read_unlock(&chan_list_lock);
1418
1419 return c1;
1420 }
1421
1422 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1423 bdaddr_t *dst, u8 dst_type)
1424 {
1425 struct sock *sk = chan->sk;
1426 bdaddr_t *src = &bt_sk(sk)->src;
1427 struct l2cap_conn *conn;
1428 struct hci_conn *hcon;
1429 struct hci_dev *hdev;
1430 __u8 auth_type;
1431 int err;
1432
1433 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1434 dst_type, __le16_to_cpu(chan->psm));
1435
1436 hdev = hci_get_route(dst, src);
1437 if (!hdev)
1438 return -EHOSTUNREACH;
1439
1440 hci_dev_lock(hdev);
1441
1442 l2cap_chan_lock(chan);
1443
1444 /* PSM must be odd and lsb of upper byte must be 0 */
1445 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1446 chan->chan_type != L2CAP_CHAN_RAW) {
1447 err = -EINVAL;
1448 goto done;
1449 }
1450
1451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1452 err = -EINVAL;
1453 goto done;
1454 }
1455
1456 switch (chan->mode) {
1457 case L2CAP_MODE_BASIC:
1458 break;
1459 case L2CAP_MODE_ERTM:
1460 case L2CAP_MODE_STREAMING:
1461 if (!disable_ertm)
1462 break;
1463 /* fall through */
1464 default:
1465 err = -ENOTSUPP;
1466 goto done;
1467 }
1468
1469 switch (chan->state) {
1470 case BT_CONNECT:
1471 case BT_CONNECT2:
1472 case BT_CONFIG:
1473 /* Already connecting */
1474 err = 0;
1475 goto done;
1476
1477 case BT_CONNECTED:
1478 /* Already connected */
1479 err = -EISCONN;
1480 goto done;
1481
1482 case BT_OPEN:
1483 case BT_BOUND:
1484 /* Can connect */
1485 break;
1486
1487 default:
1488 err = -EBADFD;
1489 goto done;
1490 }
1491
1492 /* Set destination address and psm */
1493 lock_sock(sk);
1494 bacpy(&bt_sk(sk)->dst, dst);
1495 release_sock(sk);
1496
1497 chan->psm = psm;
1498 chan->dcid = cid;
1499
1500 auth_type = l2cap_get_auth_type(chan);
1501
1502 if (chan->dcid == L2CAP_CID_LE_DATA)
1503 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1504 chan->sec_level, auth_type);
1505 else
1506 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1507 chan->sec_level, auth_type);
1508
1509 if (IS_ERR(hcon)) {
1510 err = PTR_ERR(hcon);
1511 goto done;
1512 }
1513
1514 conn = l2cap_conn_add(hcon, 0);
1515 if (!conn) {
1516 hci_conn_put(hcon);
1517 err = -ENOMEM;
1518 goto done;
1519 }
1520
1521 if (hcon->type == LE_LINK) {
1522 err = 0;
1523
1524 if (!list_empty(&conn->chan_l)) {
1525 err = -EBUSY;
1526 hci_conn_put(hcon);
1527 }
1528
1529 if (err)
1530 goto done;
1531 }
1532
1533 /* Update source addr of the socket */
1534 bacpy(src, conn->src);
1535
1536 l2cap_chan_unlock(chan);
1537 l2cap_chan_add(conn, chan);
1538 l2cap_chan_lock(chan);
1539
1540 l2cap_state_change(chan, BT_CONNECT);
1541 __set_chan_timer(chan, sk->sk_sndtimeo);
1542
1543 if (hcon->state == BT_CONNECTED) {
1544 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1545 __clear_chan_timer(chan);
1546 if (l2cap_chan_check_security(chan))
1547 l2cap_state_change(chan, BT_CONNECTED);
1548 } else
1549 l2cap_do_start(chan);
1550 }
1551
1552 err = 0;
1553
1554 done:
1555 l2cap_chan_unlock(chan);
1556 hci_dev_unlock(hdev);
1557 hci_dev_put(hdev);
1558 return err;
1559 }
1560
1561 int __l2cap_wait_ack(struct sock *sk)
1562 {
1563 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1564 DECLARE_WAITQUEUE(wait, current);
1565 int err = 0;
1566 int timeo = HZ/5;
1567
1568 add_wait_queue(sk_sleep(sk), &wait);
1569 set_current_state(TASK_INTERRUPTIBLE);
1570 while (chan->unacked_frames > 0 && chan->conn) {
1571 if (!timeo)
1572 timeo = HZ/5;
1573
1574 if (signal_pending(current)) {
1575 err = sock_intr_errno(timeo);
1576 break;
1577 }
1578
1579 release_sock(sk);
1580 timeo = schedule_timeout(timeo);
1581 lock_sock(sk);
1582 set_current_state(TASK_INTERRUPTIBLE);
1583
1584 err = sock_error(sk);
1585 if (err)
1586 break;
1587 }
1588 set_current_state(TASK_RUNNING);
1589 remove_wait_queue(sk_sleep(sk), &wait);
1590 return err;
1591 }
1592
1593 static void l2cap_monitor_timeout(struct work_struct *work)
1594 {
1595 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1596 monitor_timer.work);
1597
1598 BT_DBG("chan %p", chan);
1599
1600 l2cap_chan_lock(chan);
1601
1602 if (!chan->conn) {
1603 l2cap_chan_unlock(chan);
1604 l2cap_chan_put(chan);
1605 return;
1606 }
1607
1608 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1609
1610 l2cap_chan_unlock(chan);
1611 l2cap_chan_put(chan);
1612 }
1613
1614 static void l2cap_retrans_timeout(struct work_struct *work)
1615 {
1616 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1617 retrans_timer.work);
1618
1619 BT_DBG("chan %p", chan);
1620
1621 l2cap_chan_lock(chan);
1622
1623 if (!chan->conn) {
1624 l2cap_chan_unlock(chan);
1625 l2cap_chan_put(chan);
1626 return;
1627 }
1628
1629 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1630 l2cap_chan_unlock(chan);
1631 l2cap_chan_put(chan);
1632 }
1633
1634 static void l2cap_streaming_send(struct l2cap_chan *chan,
1635 struct sk_buff_head *skbs)
1636 {
1637 struct sk_buff *skb;
1638 struct l2cap_ctrl *control;
1639
1640 BT_DBG("chan %p, skbs %p", chan, skbs);
1641
1642 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1643
1644 while (!skb_queue_empty(&chan->tx_q)) {
1645
1646 skb = skb_dequeue(&chan->tx_q);
1647
1648 bt_cb(skb)->control.retries = 1;
1649 control = &bt_cb(skb)->control;
1650
1651 control->reqseq = 0;
1652 control->txseq = chan->next_tx_seq;
1653
1654 __pack_control(chan, control, skb);
1655
1656 if (chan->fcs == L2CAP_FCS_CRC16) {
1657 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1658 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1659 }
1660
1661 l2cap_do_send(chan, skb);
1662
1663 BT_DBG("Sent txseq %u", control->txseq);
1664
1665 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1666 chan->frames_sent++;
1667 }
1668 }
1669
1670 static int l2cap_ertm_send(struct l2cap_chan *chan)
1671 {
1672 struct sk_buff *skb, *tx_skb;
1673 struct l2cap_ctrl *control;
1674 int sent = 0;
1675
1676 BT_DBG("chan %p", chan);
1677
1678 if (chan->state != BT_CONNECTED)
1679 return -ENOTCONN;
1680
1681 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1682 return 0;
1683
1684 while (chan->tx_send_head &&
1685 chan->unacked_frames < chan->remote_tx_win &&
1686 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1687
1688 skb = chan->tx_send_head;
1689
1690 bt_cb(skb)->control.retries = 1;
1691 control = &bt_cb(skb)->control;
1692
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1694 control->final = 1;
1695
1696 control->reqseq = chan->buffer_seq;
1697 chan->last_acked_seq = chan->buffer_seq;
1698 control->txseq = chan->next_tx_seq;
1699
1700 __pack_control(chan, control, skb);
1701
1702 if (chan->fcs == L2CAP_FCS_CRC16) {
1703 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1704 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1705 }
1706
1707 /* Clone after data has been modified. Data is assumed to be
1708 read-only (for locking purposes) on cloned sk_buffs.
1709 */
1710 tx_skb = skb_clone(skb, GFP_KERNEL);
1711
1712 if (!tx_skb)
1713 break;
1714
1715 __set_retrans_timer(chan);
1716
1717 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1718 chan->unacked_frames++;
1719 chan->frames_sent++;
1720 sent++;
1721
1722 if (skb_queue_is_last(&chan->tx_q, skb))
1723 chan->tx_send_head = NULL;
1724 else
1725 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1726
1727 l2cap_do_send(chan, tx_skb);
1728 BT_DBG("Sent txseq %u", control->txseq);
1729 }
1730
1731 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1732 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1733
1734 return sent;
1735 }
1736
1737 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1738 {
1739 struct l2cap_ctrl control;
1740 struct sk_buff *skb;
1741 struct sk_buff *tx_skb;
1742 u16 seq;
1743
1744 BT_DBG("chan %p", chan);
1745
1746 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1747 return;
1748
1749 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1750 seq = l2cap_seq_list_pop(&chan->retrans_list);
1751
1752 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1753 if (!skb) {
1754 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1755 seq);
1756 continue;
1757 }
1758
1759 bt_cb(skb)->control.retries++;
1760 control = bt_cb(skb)->control;
1761
1762 if (chan->max_tx != 0 &&
1763 bt_cb(skb)->control.retries > chan->max_tx) {
1764 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1765 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1766 l2cap_seq_list_clear(&chan->retrans_list);
1767 break;
1768 }
1769
1770 control.reqseq = chan->buffer_seq;
1771 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1772 control.final = 1;
1773 else
1774 control.final = 0;
1775
1776 if (skb_cloned(skb)) {
1777 /* Cloned sk_buffs are read-only, so we need a
1778 * writeable copy
1779 */
1780 tx_skb = skb_copy(skb, GFP_ATOMIC);
1781 } else {
1782 tx_skb = skb_clone(skb, GFP_ATOMIC);
1783 }
1784
1785 if (!tx_skb) {
1786 l2cap_seq_list_clear(&chan->retrans_list);
1787 break;
1788 }
1789
1790 /* Update skb contents */
1791 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1792 put_unaligned_le32(__pack_extended_control(&control),
1793 tx_skb->data + L2CAP_HDR_SIZE);
1794 } else {
1795 put_unaligned_le16(__pack_enhanced_control(&control),
1796 tx_skb->data + L2CAP_HDR_SIZE);
1797 }
1798
1799 if (chan->fcs == L2CAP_FCS_CRC16) {
1800 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1801 put_unaligned_le16(fcs, skb_put(tx_skb,
1802 L2CAP_FCS_SIZE));
1803 }
1804
1805 l2cap_do_send(chan, tx_skb);
1806
1807 BT_DBG("Resent txseq %d", control.txseq);
1808
1809 chan->last_acked_seq = chan->buffer_seq;
1810 }
1811 }
1812
1813 static void l2cap_retransmit(struct l2cap_chan *chan,
1814 struct l2cap_ctrl *control)
1815 {
1816 BT_DBG("chan %p, control %p", chan, control);
1817
1818 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1819 l2cap_ertm_resend(chan);
1820 }
1821
1822 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1823 struct l2cap_ctrl *control)
1824 {
1825 struct sk_buff *skb;
1826
1827 BT_DBG("chan %p, control %p", chan, control);
1828
1829 if (control->poll)
1830 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1831
1832 l2cap_seq_list_clear(&chan->retrans_list);
1833
1834 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1835 return;
1836
1837 if (chan->unacked_frames) {
1838 skb_queue_walk(&chan->tx_q, skb) {
1839 if (bt_cb(skb)->control.txseq == control->reqseq ||
1840 skb == chan->tx_send_head)
1841 break;
1842 }
1843
1844 skb_queue_walk_from(&chan->tx_q, skb) {
1845 if (skb == chan->tx_send_head)
1846 break;
1847
1848 l2cap_seq_list_append(&chan->retrans_list,
1849 bt_cb(skb)->control.txseq);
1850 }
1851
1852 l2cap_ertm_resend(chan);
1853 }
1854 }
1855
1856 static void l2cap_send_ack(struct l2cap_chan *chan)
1857 {
1858 struct l2cap_ctrl control;
1859 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1860 chan->last_acked_seq);
1861 int threshold;
1862
1863 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1864 chan, chan->last_acked_seq, chan->buffer_seq);
1865
1866 memset(&control, 0, sizeof(control));
1867 control.sframe = 1;
1868
1869 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1870 chan->rx_state == L2CAP_RX_STATE_RECV) {
1871 __clear_ack_timer(chan);
1872 control.super = L2CAP_SUPER_RNR;
1873 control.reqseq = chan->buffer_seq;
1874 l2cap_send_sframe(chan, &control);
1875 } else {
1876 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1877 l2cap_ertm_send(chan);
1878 /* If any i-frames were sent, they included an ack */
1879 if (chan->buffer_seq == chan->last_acked_seq)
1880 frames_to_ack = 0;
1881 }
1882
1883 /* Ack now if the window is 3/4ths full.
1884 * Calculate without mul or div
1885 */
1886 threshold = chan->ack_win;
1887 threshold += threshold << 1;
1888 threshold >>= 2;
1889
1890 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1891 threshold);
1892
1893 if (frames_to_ack >= threshold) {
1894 __clear_ack_timer(chan);
1895 control.super = L2CAP_SUPER_RR;
1896 control.reqseq = chan->buffer_seq;
1897 l2cap_send_sframe(chan, &control);
1898 frames_to_ack = 0;
1899 }
1900
1901 if (frames_to_ack)
1902 __set_ack_timer(chan);
1903 }
1904 }
1905
1906 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1907 struct msghdr *msg, int len,
1908 int count, struct sk_buff *skb)
1909 {
1910 struct l2cap_conn *conn = chan->conn;
1911 struct sk_buff **frag;
1912 int sent = 0;
1913
1914 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1915 return -EFAULT;
1916
1917 sent += count;
1918 len -= count;
1919
1920 /* Continuation fragments (no L2CAP header) */
1921 frag = &skb_shinfo(skb)->frag_list;
1922 while (len) {
1923 struct sk_buff *tmp;
1924
1925 count = min_t(unsigned int, conn->mtu, len);
1926
1927 tmp = chan->ops->alloc_skb(chan, count,
1928 msg->msg_flags & MSG_DONTWAIT);
1929 if (IS_ERR(tmp))
1930 return PTR_ERR(tmp);
1931
1932 *frag = tmp;
1933
1934 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1935 return -EFAULT;
1936
1937 (*frag)->priority = skb->priority;
1938
1939 sent += count;
1940 len -= count;
1941
1942 skb->len += (*frag)->len;
1943 skb->data_len += (*frag)->len;
1944
1945 frag = &(*frag)->next;
1946 }
1947
1948 return sent;
1949 }
1950
1951 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1952 struct msghdr *msg, size_t len,
1953 u32 priority)
1954 {
1955 struct l2cap_conn *conn = chan->conn;
1956 struct sk_buff *skb;
1957 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1958 struct l2cap_hdr *lh;
1959
1960 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1961
1962 count = min_t(unsigned int, (conn->mtu - hlen), len);
1963
1964 skb = chan->ops->alloc_skb(chan, count + hlen,
1965 msg->msg_flags & MSG_DONTWAIT);
1966 if (IS_ERR(skb))
1967 return skb;
1968
1969 skb->priority = priority;
1970
1971 /* Create L2CAP header */
1972 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1973 lh->cid = cpu_to_le16(chan->dcid);
1974 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1975 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1976
1977 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1978 if (unlikely(err < 0)) {
1979 kfree_skb(skb);
1980 return ERR_PTR(err);
1981 }
1982 return skb;
1983 }
1984
1985 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1986 struct msghdr *msg, size_t len,
1987 u32 priority)
1988 {
1989 struct l2cap_conn *conn = chan->conn;
1990 struct sk_buff *skb;
1991 int err, count;
1992 struct l2cap_hdr *lh;
1993
1994 BT_DBG("chan %p len %zu", chan, len);
1995
1996 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1997
1998 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1999 msg->msg_flags & MSG_DONTWAIT);
2000 if (IS_ERR(skb))
2001 return skb;
2002
2003 skb->priority = priority;
2004
2005 /* Create L2CAP header */
2006 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2007 lh->cid = cpu_to_le16(chan->dcid);
2008 lh->len = cpu_to_le16(len);
2009
2010 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2011 if (unlikely(err < 0)) {
2012 kfree_skb(skb);
2013 return ERR_PTR(err);
2014 }
2015 return skb;
2016 }
2017
2018 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2019 struct msghdr *msg, size_t len,
2020 u16 sdulen)
2021 {
2022 struct l2cap_conn *conn = chan->conn;
2023 struct sk_buff *skb;
2024 int err, count, hlen;
2025 struct l2cap_hdr *lh;
2026
2027 BT_DBG("chan %p len %zu", chan, len);
2028
2029 if (!conn)
2030 return ERR_PTR(-ENOTCONN);
2031
2032 hlen = __ertm_hdr_size(chan);
2033
2034 if (sdulen)
2035 hlen += L2CAP_SDULEN_SIZE;
2036
2037 if (chan->fcs == L2CAP_FCS_CRC16)
2038 hlen += L2CAP_FCS_SIZE;
2039
2040 count = min_t(unsigned int, (conn->mtu - hlen), len);
2041
2042 skb = chan->ops->alloc_skb(chan, count + hlen,
2043 msg->msg_flags & MSG_DONTWAIT);
2044 if (IS_ERR(skb))
2045 return skb;
2046
2047 /* Create L2CAP header */
2048 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2049 lh->cid = cpu_to_le16(chan->dcid);
2050 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2051
2052 /* Control header is populated later */
2053 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2054 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2055 else
2056 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2057
2058 if (sdulen)
2059 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2060
2061 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2062 if (unlikely(err < 0)) {
2063 kfree_skb(skb);
2064 return ERR_PTR(err);
2065 }
2066
2067 bt_cb(skb)->control.fcs = chan->fcs;
2068 bt_cb(skb)->control.retries = 0;
2069 return skb;
2070 }
2071
2072 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2073 struct sk_buff_head *seg_queue,
2074 struct msghdr *msg, size_t len)
2075 {
2076 struct sk_buff *skb;
2077 u16 sdu_len;
2078 size_t pdu_len;
2079 u8 sar;
2080
2081 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2082
2083 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2084 * so fragmented skbs are not used. The HCI layer's handling
2085 * of fragmented skbs is not compatible with ERTM's queueing.
2086 */
2087
2088 /* PDU size is derived from the HCI MTU */
2089 pdu_len = chan->conn->mtu;
2090
2091 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2092
2093 /* Adjust for largest possible L2CAP overhead. */
2094 if (chan->fcs)
2095 pdu_len -= L2CAP_FCS_SIZE;
2096
2097 pdu_len -= __ertm_hdr_size(chan);
2098
2099 /* Remote device may have requested smaller PDUs */
2100 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2101
2102 if (len <= pdu_len) {
2103 sar = L2CAP_SAR_UNSEGMENTED;
2104 sdu_len = 0;
2105 pdu_len = len;
2106 } else {
2107 sar = L2CAP_SAR_START;
2108 sdu_len = len;
2109 pdu_len -= L2CAP_SDULEN_SIZE;
2110 }
2111
2112 while (len > 0) {
2113 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2114
2115 if (IS_ERR(skb)) {
2116 __skb_queue_purge(seg_queue);
2117 return PTR_ERR(skb);
2118 }
2119
2120 bt_cb(skb)->control.sar = sar;
2121 __skb_queue_tail(seg_queue, skb);
2122
2123 len -= pdu_len;
2124 if (sdu_len) {
2125 sdu_len = 0;
2126 pdu_len += L2CAP_SDULEN_SIZE;
2127 }
2128
2129 if (len <= pdu_len) {
2130 sar = L2CAP_SAR_END;
2131 pdu_len = len;
2132 } else {
2133 sar = L2CAP_SAR_CONTINUE;
2134 }
2135 }
2136
2137 return 0;
2138 }
2139
2140 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2141 u32 priority)
2142 {
2143 struct sk_buff *skb;
2144 int err;
2145 struct sk_buff_head seg_queue;
2146
2147 /* Connectionless channel */
2148 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2149 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2150 if (IS_ERR(skb))
2151 return PTR_ERR(skb);
2152
2153 l2cap_do_send(chan, skb);
2154 return len;
2155 }
2156
2157 switch (chan->mode) {
2158 case L2CAP_MODE_BASIC:
2159 /* Check outgoing MTU */
2160 if (len > chan->omtu)
2161 return -EMSGSIZE;
2162
2163 /* Create a basic PDU */
2164 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2165 if (IS_ERR(skb))
2166 return PTR_ERR(skb);
2167
2168 l2cap_do_send(chan, skb);
2169 err = len;
2170 break;
2171
2172 case L2CAP_MODE_ERTM:
2173 case L2CAP_MODE_STREAMING:
2174 /* Check outgoing MTU */
2175 if (len > chan->omtu) {
2176 err = -EMSGSIZE;
2177 break;
2178 }
2179
2180 __skb_queue_head_init(&seg_queue);
2181
2182 /* Do segmentation before calling in to the state machine,
2183 * since it's possible to block while waiting for memory
2184 * allocation.
2185 */
2186 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2187
2188 /* The channel could have been closed while segmenting,
2189 * check that it is still connected.
2190 */
2191 if (chan->state != BT_CONNECTED) {
2192 __skb_queue_purge(&seg_queue);
2193 err = -ENOTCONN;
2194 }
2195
2196 if (err)
2197 break;
2198
2199 if (chan->mode == L2CAP_MODE_ERTM)
2200 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2201 else
2202 l2cap_streaming_send(chan, &seg_queue);
2203
2204 err = len;
2205
2206 /* If the skbs were not queued for sending, they'll still be in
2207 * seg_queue and need to be purged.
2208 */
2209 __skb_queue_purge(&seg_queue);
2210 break;
2211
2212 default:
2213 BT_DBG("bad state %1.1x", chan->mode);
2214 err = -EBADFD;
2215 }
2216
2217 return err;
2218 }
2219
2220 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2221 {
2222 struct l2cap_ctrl control;
2223 u16 seq;
2224
2225 BT_DBG("chan %p, txseq %u", chan, txseq);
2226
2227 memset(&control, 0, sizeof(control));
2228 control.sframe = 1;
2229 control.super = L2CAP_SUPER_SREJ;
2230
2231 for (seq = chan->expected_tx_seq; seq != txseq;
2232 seq = __next_seq(chan, seq)) {
2233 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2234 control.reqseq = seq;
2235 l2cap_send_sframe(chan, &control);
2236 l2cap_seq_list_append(&chan->srej_list, seq);
2237 }
2238 }
2239
2240 chan->expected_tx_seq = __next_seq(chan, txseq);
2241 }
2242
2243 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2244 {
2245 struct l2cap_ctrl control;
2246
2247 BT_DBG("chan %p", chan);
2248
2249 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2250 return;
2251
2252 memset(&control, 0, sizeof(control));
2253 control.sframe = 1;
2254 control.super = L2CAP_SUPER_SREJ;
2255 control.reqseq = chan->srej_list.tail;
2256 l2cap_send_sframe(chan, &control);
2257 }
2258
2259 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2260 {
2261 struct l2cap_ctrl control;
2262 u16 initial_head;
2263 u16 seq;
2264
2265 BT_DBG("chan %p, txseq %u", chan, txseq);
2266
2267 memset(&control, 0, sizeof(control));
2268 control.sframe = 1;
2269 control.super = L2CAP_SUPER_SREJ;
2270
2271 /* Capture initial list head to allow only one pass through the list. */
2272 initial_head = chan->srej_list.head;
2273
2274 do {
2275 seq = l2cap_seq_list_pop(&chan->srej_list);
2276 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2277 break;
2278
2279 control.reqseq = seq;
2280 l2cap_send_sframe(chan, &control);
2281 l2cap_seq_list_append(&chan->srej_list, seq);
2282 } while (chan->srej_list.head != initial_head);
2283 }
2284
2285 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2286 {
2287 struct sk_buff *acked_skb;
2288 u16 ackseq;
2289
2290 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2291
2292 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2293 return;
2294
2295 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2296 chan->expected_ack_seq, chan->unacked_frames);
2297
2298 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2299 ackseq = __next_seq(chan, ackseq)) {
2300
2301 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2302 if (acked_skb) {
2303 skb_unlink(acked_skb, &chan->tx_q);
2304 kfree_skb(acked_skb);
2305 chan->unacked_frames--;
2306 }
2307 }
2308
2309 chan->expected_ack_seq = reqseq;
2310
2311 if (chan->unacked_frames == 0)
2312 __clear_retrans_timer(chan);
2313
2314 BT_DBG("unacked_frames %u", chan->unacked_frames);
2315 }
2316
2317 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2318 {
2319 BT_DBG("chan %p", chan);
2320
2321 chan->expected_tx_seq = chan->buffer_seq;
2322 l2cap_seq_list_clear(&chan->srej_list);
2323 skb_queue_purge(&chan->srej_q);
2324 chan->rx_state = L2CAP_RX_STATE_RECV;
2325 }
2326
2327 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2328 struct l2cap_ctrl *control,
2329 struct sk_buff_head *skbs, u8 event)
2330 {
2331 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2332 event);
2333
2334 switch (event) {
2335 case L2CAP_EV_DATA_REQUEST:
2336 if (chan->tx_send_head == NULL)
2337 chan->tx_send_head = skb_peek(skbs);
2338
2339 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2340 l2cap_ertm_send(chan);
2341 break;
2342 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2343 BT_DBG("Enter LOCAL_BUSY");
2344 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2345
2346 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2347 /* The SREJ_SENT state must be aborted if we are to
2348 * enter the LOCAL_BUSY state.
2349 */
2350 l2cap_abort_rx_srej_sent(chan);
2351 }
2352
2353 l2cap_send_ack(chan);
2354
2355 break;
2356 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2357 BT_DBG("Exit LOCAL_BUSY");
2358 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2359
2360 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2361 struct l2cap_ctrl local_control;
2362
2363 memset(&local_control, 0, sizeof(local_control));
2364 local_control.sframe = 1;
2365 local_control.super = L2CAP_SUPER_RR;
2366 local_control.poll = 1;
2367 local_control.reqseq = chan->buffer_seq;
2368 l2cap_send_sframe(chan, &local_control);
2369
2370 chan->retry_count = 1;
2371 __set_monitor_timer(chan);
2372 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2373 }
2374 break;
2375 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2376 l2cap_process_reqseq(chan, control->reqseq);
2377 break;
2378 case L2CAP_EV_EXPLICIT_POLL:
2379 l2cap_send_rr_or_rnr(chan, 1);
2380 chan->retry_count = 1;
2381 __set_monitor_timer(chan);
2382 __clear_ack_timer(chan);
2383 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2384 break;
2385 case L2CAP_EV_RETRANS_TO:
2386 l2cap_send_rr_or_rnr(chan, 1);
2387 chan->retry_count = 1;
2388 __set_monitor_timer(chan);
2389 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2390 break;
2391 case L2CAP_EV_RECV_FBIT:
2392 /* Nothing to process */
2393 break;
2394 default:
2395 break;
2396 }
2397 }
2398
2399 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2400 struct l2cap_ctrl *control,
2401 struct sk_buff_head *skbs, u8 event)
2402 {
2403 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2404 event);
2405
2406 switch (event) {
2407 case L2CAP_EV_DATA_REQUEST:
2408 if (chan->tx_send_head == NULL)
2409 chan->tx_send_head = skb_peek(skbs);
2410 /* Queue data, but don't send. */
2411 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2412 break;
2413 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2414 BT_DBG("Enter LOCAL_BUSY");
2415 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2416
2417 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2418 /* The SREJ_SENT state must be aborted if we are to
2419 * enter the LOCAL_BUSY state.
2420 */
2421 l2cap_abort_rx_srej_sent(chan);
2422 }
2423
2424 l2cap_send_ack(chan);
2425
2426 break;
2427 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2428 BT_DBG("Exit LOCAL_BUSY");
2429 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2430
2431 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2432 struct l2cap_ctrl local_control;
2433 memset(&local_control, 0, sizeof(local_control));
2434 local_control.sframe = 1;
2435 local_control.super = L2CAP_SUPER_RR;
2436 local_control.poll = 1;
2437 local_control.reqseq = chan->buffer_seq;
2438 l2cap_send_sframe(chan, &local_control);
2439
2440 chan->retry_count = 1;
2441 __set_monitor_timer(chan);
2442 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2443 }
2444 break;
2445 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2446 l2cap_process_reqseq(chan, control->reqseq);
2447
2448 /* Fall through */
2449
2450 case L2CAP_EV_RECV_FBIT:
2451 if (control && control->final) {
2452 __clear_monitor_timer(chan);
2453 if (chan->unacked_frames > 0)
2454 __set_retrans_timer(chan);
2455 chan->retry_count = 0;
2456 chan->tx_state = L2CAP_TX_STATE_XMIT;
2457 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2458 }
2459 break;
2460 case L2CAP_EV_EXPLICIT_POLL:
2461 /* Ignore */
2462 break;
2463 case L2CAP_EV_MONITOR_TO:
2464 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2465 l2cap_send_rr_or_rnr(chan, 1);
2466 __set_monitor_timer(chan);
2467 chan->retry_count++;
2468 } else {
2469 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2470 }
2471 break;
2472 default:
2473 break;
2474 }
2475 }
2476
2477 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2478 struct sk_buff_head *skbs, u8 event)
2479 {
2480 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2481 chan, control, skbs, event, chan->tx_state);
2482
2483 switch (chan->tx_state) {
2484 case L2CAP_TX_STATE_XMIT:
2485 l2cap_tx_state_xmit(chan, control, skbs, event);
2486 break;
2487 case L2CAP_TX_STATE_WAIT_F:
2488 l2cap_tx_state_wait_f(chan, control, skbs, event);
2489 break;
2490 default:
2491 /* Ignore event */
2492 break;
2493 }
2494 }
2495
2496 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2497 struct l2cap_ctrl *control)
2498 {
2499 BT_DBG("chan %p, control %p", chan, control);
2500 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2501 }
2502
2503 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2504 struct l2cap_ctrl *control)
2505 {
2506 BT_DBG("chan %p, control %p", chan, control);
2507 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2508 }
2509
2510 /* Copy frame to all raw sockets on that connection */
2511 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2512 {
2513 struct sk_buff *nskb;
2514 struct l2cap_chan *chan;
2515
2516 BT_DBG("conn %p", conn);
2517
2518 mutex_lock(&conn->chan_lock);
2519
2520 list_for_each_entry(chan, &conn->chan_l, list) {
2521 struct sock *sk = chan->sk;
2522 if (chan->chan_type != L2CAP_CHAN_RAW)
2523 continue;
2524
2525 /* Don't send frame to the socket it came from */
2526 if (skb->sk == sk)
2527 continue;
2528 nskb = skb_clone(skb, GFP_ATOMIC);
2529 if (!nskb)
2530 continue;
2531
2532 if (chan->ops->recv(chan, nskb))
2533 kfree_skb(nskb);
2534 }
2535
2536 mutex_unlock(&conn->chan_lock);
2537 }
2538
2539 /* ---- L2CAP signalling commands ---- */
2540 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2541 u8 ident, u16 dlen, void *data)
2542 {
2543 struct sk_buff *skb, **frag;
2544 struct l2cap_cmd_hdr *cmd;
2545 struct l2cap_hdr *lh;
2546 int len, count;
2547
2548 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2549 conn, code, ident, dlen);
2550
2551 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2552 count = min_t(unsigned int, conn->mtu, len);
2553
2554 skb = bt_skb_alloc(count, GFP_ATOMIC);
2555 if (!skb)
2556 return NULL;
2557
2558 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2559 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2560
2561 if (conn->hcon->type == LE_LINK)
2562 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2563 else
2564 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2565
2566 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2567 cmd->code = code;
2568 cmd->ident = ident;
2569 cmd->len = cpu_to_le16(dlen);
2570
2571 if (dlen) {
2572 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2573 memcpy(skb_put(skb, count), data, count);
2574 data += count;
2575 }
2576
2577 len -= skb->len;
2578
2579 /* Continuation fragments (no L2CAP header) */
2580 frag = &skb_shinfo(skb)->frag_list;
2581 while (len) {
2582 count = min_t(unsigned int, conn->mtu, len);
2583
2584 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2585 if (!*frag)
2586 goto fail;
2587
2588 memcpy(skb_put(*frag, count), data, count);
2589
2590 len -= count;
2591 data += count;
2592
2593 frag = &(*frag)->next;
2594 }
2595
2596 return skb;
2597
2598 fail:
2599 kfree_skb(skb);
2600 return NULL;
2601 }
2602
2603 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2604 {
2605 struct l2cap_conf_opt *opt = *ptr;
2606 int len;
2607
2608 len = L2CAP_CONF_OPT_SIZE + opt->len;
2609 *ptr += len;
2610
2611 *type = opt->type;
2612 *olen = opt->len;
2613
2614 switch (opt->len) {
2615 case 1:
2616 *val = *((u8 *) opt->val);
2617 break;
2618
2619 case 2:
2620 *val = get_unaligned_le16(opt->val);
2621 break;
2622
2623 case 4:
2624 *val = get_unaligned_le32(opt->val);
2625 break;
2626
2627 default:
2628 *val = (unsigned long) opt->val;
2629 break;
2630 }
2631
2632 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2633 return len;
2634 }
2635
2636 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2637 {
2638 struct l2cap_conf_opt *opt = *ptr;
2639
2640 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2641
2642 opt->type = type;
2643 opt->len = len;
2644
2645 switch (len) {
2646 case 1:
2647 *((u8 *) opt->val) = val;
2648 break;
2649
2650 case 2:
2651 put_unaligned_le16(val, opt->val);
2652 break;
2653
2654 case 4:
2655 put_unaligned_le32(val, opt->val);
2656 break;
2657
2658 default:
2659 memcpy(opt->val, (void *) val, len);
2660 break;
2661 }
2662
2663 *ptr += L2CAP_CONF_OPT_SIZE + len;
2664 }
2665
2666 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2667 {
2668 struct l2cap_conf_efs efs;
2669
2670 switch (chan->mode) {
2671 case L2CAP_MODE_ERTM:
2672 efs.id = chan->local_id;
2673 efs.stype = chan->local_stype;
2674 efs.msdu = cpu_to_le16(chan->local_msdu);
2675 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2676 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2677 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2678 break;
2679
2680 case L2CAP_MODE_STREAMING:
2681 efs.id = 1;
2682 efs.stype = L2CAP_SERV_BESTEFFORT;
2683 efs.msdu = cpu_to_le16(chan->local_msdu);
2684 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2685 efs.acc_lat = 0;
2686 efs.flush_to = 0;
2687 break;
2688
2689 default:
2690 return;
2691 }
2692
2693 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2694 (unsigned long) &efs);
2695 }
2696
2697 static void l2cap_ack_timeout(struct work_struct *work)
2698 {
2699 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2700 ack_timer.work);
2701 u16 frames_to_ack;
2702
2703 BT_DBG("chan %p", chan);
2704
2705 l2cap_chan_lock(chan);
2706
2707 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2708 chan->last_acked_seq);
2709
2710 if (frames_to_ack)
2711 l2cap_send_rr_or_rnr(chan, 0);
2712
2713 l2cap_chan_unlock(chan);
2714 l2cap_chan_put(chan);
2715 }
2716
2717 int l2cap_ertm_init(struct l2cap_chan *chan)
2718 {
2719 int err;
2720
2721 chan->next_tx_seq = 0;
2722 chan->expected_tx_seq = 0;
2723 chan->expected_ack_seq = 0;
2724 chan->unacked_frames = 0;
2725 chan->buffer_seq = 0;
2726 chan->frames_sent = 0;
2727 chan->last_acked_seq = 0;
2728 chan->sdu = NULL;
2729 chan->sdu_last_frag = NULL;
2730 chan->sdu_len = 0;
2731
2732 skb_queue_head_init(&chan->tx_q);
2733
2734 if (chan->mode != L2CAP_MODE_ERTM)
2735 return 0;
2736
2737 chan->rx_state = L2CAP_RX_STATE_RECV;
2738 chan->tx_state = L2CAP_TX_STATE_XMIT;
2739
2740 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2741 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2742 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2743
2744 skb_queue_head_init(&chan->srej_q);
2745
2746 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2747 if (err < 0)
2748 return err;
2749
2750 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2751 if (err < 0)
2752 l2cap_seq_list_free(&chan->srej_list);
2753
2754 return err;
2755 }
2756
2757 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2758 {
2759 switch (mode) {
2760 case L2CAP_MODE_STREAMING:
2761 case L2CAP_MODE_ERTM:
2762 if (l2cap_mode_supported(mode, remote_feat_mask))
2763 return mode;
2764 /* fall through */
2765 default:
2766 return L2CAP_MODE_BASIC;
2767 }
2768 }
2769
2770 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2771 {
2772 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2773 }
2774
2775 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2776 {
2777 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2778 }
2779
2780 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2781 {
2782 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2783 __l2cap_ews_supported(chan)) {
2784 /* use extended control field */
2785 set_bit(FLAG_EXT_CTRL, &chan->flags);
2786 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2787 } else {
2788 chan->tx_win = min_t(u16, chan->tx_win,
2789 L2CAP_DEFAULT_TX_WINDOW);
2790 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2791 }
2792 chan->ack_win = chan->tx_win;
2793 }
2794
2795 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2796 {
2797 struct l2cap_conf_req *req = data;
2798 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2799 void *ptr = req->data;
2800 u16 size;
2801
2802 BT_DBG("chan %p", chan);
2803
2804 if (chan->num_conf_req || chan->num_conf_rsp)
2805 goto done;
2806
2807 switch (chan->mode) {
2808 case L2CAP_MODE_STREAMING:
2809 case L2CAP_MODE_ERTM:
2810 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2811 break;
2812
2813 if (__l2cap_efs_supported(chan))
2814 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2815
2816 /* fall through */
2817 default:
2818 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2819 break;
2820 }
2821
2822 done:
2823 if (chan->imtu != L2CAP_DEFAULT_MTU)
2824 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2825
2826 switch (chan->mode) {
2827 case L2CAP_MODE_BASIC:
2828 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2829 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2830 break;
2831
2832 rfc.mode = L2CAP_MODE_BASIC;
2833 rfc.txwin_size = 0;
2834 rfc.max_transmit = 0;
2835 rfc.retrans_timeout = 0;
2836 rfc.monitor_timeout = 0;
2837 rfc.max_pdu_size = 0;
2838
2839 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2840 (unsigned long) &rfc);
2841 break;
2842
2843 case L2CAP_MODE_ERTM:
2844 rfc.mode = L2CAP_MODE_ERTM;
2845 rfc.max_transmit = chan->max_tx;
2846 rfc.retrans_timeout = 0;
2847 rfc.monitor_timeout = 0;
2848
2849 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2850 L2CAP_EXT_HDR_SIZE -
2851 L2CAP_SDULEN_SIZE -
2852 L2CAP_FCS_SIZE);
2853 rfc.max_pdu_size = cpu_to_le16(size);
2854
2855 l2cap_txwin_setup(chan);
2856
2857 rfc.txwin_size = min_t(u16, chan->tx_win,
2858 L2CAP_DEFAULT_TX_WINDOW);
2859
2860 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2861 (unsigned long) &rfc);
2862
2863 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2864 l2cap_add_opt_efs(&ptr, chan);
2865
2866 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2867 break;
2868
2869 if (chan->fcs == L2CAP_FCS_NONE ||
2870 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2871 chan->fcs = L2CAP_FCS_NONE;
2872 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2873 }
2874
2875 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2876 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2877 chan->tx_win);
2878 break;
2879
2880 case L2CAP_MODE_STREAMING:
2881 l2cap_txwin_setup(chan);
2882 rfc.mode = L2CAP_MODE_STREAMING;
2883 rfc.txwin_size = 0;
2884 rfc.max_transmit = 0;
2885 rfc.retrans_timeout = 0;
2886 rfc.monitor_timeout = 0;
2887
2888 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2889 L2CAP_EXT_HDR_SIZE -
2890 L2CAP_SDULEN_SIZE -
2891 L2CAP_FCS_SIZE);
2892 rfc.max_pdu_size = cpu_to_le16(size);
2893
2894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2895 (unsigned long) &rfc);
2896
2897 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2898 l2cap_add_opt_efs(&ptr, chan);
2899
2900 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2901 break;
2902
2903 if (chan->fcs == L2CAP_FCS_NONE ||
2904 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2905 chan->fcs = L2CAP_FCS_NONE;
2906 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2907 }
2908 break;
2909 }
2910
2911 req->dcid = cpu_to_le16(chan->dcid);
2912 req->flags = __constant_cpu_to_le16(0);
2913
2914 return ptr - data;
2915 }
2916
2917 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2918 {
2919 struct l2cap_conf_rsp *rsp = data;
2920 void *ptr = rsp->data;
2921 void *req = chan->conf_req;
2922 int len = chan->conf_len;
2923 int type, hint, olen;
2924 unsigned long val;
2925 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2926 struct l2cap_conf_efs efs;
2927 u8 remote_efs = 0;
2928 u16 mtu = L2CAP_DEFAULT_MTU;
2929 u16 result = L2CAP_CONF_SUCCESS;
2930 u16 size;
2931
2932 BT_DBG("chan %p", chan);
2933
2934 while (len >= L2CAP_CONF_OPT_SIZE) {
2935 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2936
2937 hint = type & L2CAP_CONF_HINT;
2938 type &= L2CAP_CONF_MASK;
2939
2940 switch (type) {
2941 case L2CAP_CONF_MTU:
2942 mtu = val;
2943 break;
2944
2945 case L2CAP_CONF_FLUSH_TO:
2946 chan->flush_to = val;
2947 break;
2948
2949 case L2CAP_CONF_QOS:
2950 break;
2951
2952 case L2CAP_CONF_RFC:
2953 if (olen == sizeof(rfc))
2954 memcpy(&rfc, (void *) val, olen);
2955 break;
2956
2957 case L2CAP_CONF_FCS:
2958 if (val == L2CAP_FCS_NONE)
2959 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2960 break;
2961
2962 case L2CAP_CONF_EFS:
2963 remote_efs = 1;
2964 if (olen == sizeof(efs))
2965 memcpy(&efs, (void *) val, olen);
2966 break;
2967
2968 case L2CAP_CONF_EWS:
2969 if (!enable_hs)
2970 return -ECONNREFUSED;
2971
2972 set_bit(FLAG_EXT_CTRL, &chan->flags);
2973 set_bit(CONF_EWS_RECV, &chan->conf_state);
2974 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2975 chan->remote_tx_win = val;
2976 break;
2977
2978 default:
2979 if (hint)
2980 break;
2981
2982 result = L2CAP_CONF_UNKNOWN;
2983 *((u8 *) ptr++) = type;
2984 break;
2985 }
2986 }
2987
2988 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2989 goto done;
2990
2991 switch (chan->mode) {
2992 case L2CAP_MODE_STREAMING:
2993 case L2CAP_MODE_ERTM:
2994 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2995 chan->mode = l2cap_select_mode(rfc.mode,
2996 chan->conn->feat_mask);
2997 break;
2998 }
2999
3000 if (remote_efs) {
3001 if (__l2cap_efs_supported(chan))
3002 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3003 else
3004 return -ECONNREFUSED;
3005 }
3006
3007 if (chan->mode != rfc.mode)
3008 return -ECONNREFUSED;
3009
3010 break;
3011 }
3012
3013 done:
3014 if (chan->mode != rfc.mode) {
3015 result = L2CAP_CONF_UNACCEPT;
3016 rfc.mode = chan->mode;
3017
3018 if (chan->num_conf_rsp == 1)
3019 return -ECONNREFUSED;
3020
3021 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3022 sizeof(rfc), (unsigned long) &rfc);
3023 }
3024
3025 if (result == L2CAP_CONF_SUCCESS) {
3026 /* Configure output options and let the other side know
3027 * which ones we don't like. */
3028
3029 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3030 result = L2CAP_CONF_UNACCEPT;
3031 else {
3032 chan->omtu = mtu;
3033 set_bit(CONF_MTU_DONE, &chan->conf_state);
3034 }
3035 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3036
3037 if (remote_efs) {
3038 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3039 efs.stype != L2CAP_SERV_NOTRAFIC &&
3040 efs.stype != chan->local_stype) {
3041
3042 result = L2CAP_CONF_UNACCEPT;
3043
3044 if (chan->num_conf_req >= 1)
3045 return -ECONNREFUSED;
3046
3047 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3048 sizeof(efs),
3049 (unsigned long) &efs);
3050 } else {
3051 /* Send PENDING Conf Rsp */
3052 result = L2CAP_CONF_PENDING;
3053 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3054 }
3055 }
3056
3057 switch (rfc.mode) {
3058 case L2CAP_MODE_BASIC:
3059 chan->fcs = L2CAP_FCS_NONE;
3060 set_bit(CONF_MODE_DONE, &chan->conf_state);
3061 break;
3062
3063 case L2CAP_MODE_ERTM:
3064 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3065 chan->remote_tx_win = rfc.txwin_size;
3066 else
3067 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3068
3069 chan->remote_max_tx = rfc.max_transmit;
3070
3071 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3072 chan->conn->mtu -
3073 L2CAP_EXT_HDR_SIZE -
3074 L2CAP_SDULEN_SIZE -
3075 L2CAP_FCS_SIZE);
3076 rfc.max_pdu_size = cpu_to_le16(size);
3077 chan->remote_mps = size;
3078
3079 rfc.retrans_timeout =
3080 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3081 rfc.monitor_timeout =
3082 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3083
3084 set_bit(CONF_MODE_DONE, &chan->conf_state);
3085
3086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3087 sizeof(rfc), (unsigned long) &rfc);
3088
3089 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3090 chan->remote_id = efs.id;
3091 chan->remote_stype = efs.stype;
3092 chan->remote_msdu = le16_to_cpu(efs.msdu);
3093 chan->remote_flush_to =
3094 le32_to_cpu(efs.flush_to);
3095 chan->remote_acc_lat =
3096 le32_to_cpu(efs.acc_lat);
3097 chan->remote_sdu_itime =
3098 le32_to_cpu(efs.sdu_itime);
3099 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3100 sizeof(efs), (unsigned long) &efs);
3101 }
3102 break;
3103
3104 case L2CAP_MODE_STREAMING:
3105 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3106 chan->conn->mtu -
3107 L2CAP_EXT_HDR_SIZE -
3108 L2CAP_SDULEN_SIZE -
3109 L2CAP_FCS_SIZE);
3110 rfc.max_pdu_size = cpu_to_le16(size);
3111 chan->remote_mps = size;
3112
3113 set_bit(CONF_MODE_DONE, &chan->conf_state);
3114
3115 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3116 sizeof(rfc), (unsigned long) &rfc);
3117
3118 break;
3119
3120 default:
3121 result = L2CAP_CONF_UNACCEPT;
3122
3123 memset(&rfc, 0, sizeof(rfc));
3124 rfc.mode = chan->mode;
3125 }
3126
3127 if (result == L2CAP_CONF_SUCCESS)
3128 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3129 }
3130 rsp->scid = cpu_to_le16(chan->dcid);
3131 rsp->result = cpu_to_le16(result);
3132 rsp->flags = __constant_cpu_to_le16(0);
3133
3134 return ptr - data;
3135 }
3136
3137 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3138 {
3139 struct l2cap_conf_req *req = data;
3140 void *ptr = req->data;
3141 int type, olen;
3142 unsigned long val;
3143 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3144 struct l2cap_conf_efs efs;
3145
3146 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3147
3148 while (len >= L2CAP_CONF_OPT_SIZE) {
3149 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3150
3151 switch (type) {
3152 case L2CAP_CONF_MTU:
3153 if (val < L2CAP_DEFAULT_MIN_MTU) {
3154 *result = L2CAP_CONF_UNACCEPT;
3155 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3156 } else
3157 chan->imtu = val;
3158 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3159 break;
3160
3161 case L2CAP_CONF_FLUSH_TO:
3162 chan->flush_to = val;
3163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3164 2, chan->flush_to);
3165 break;
3166
3167 case L2CAP_CONF_RFC:
3168 if (olen == sizeof(rfc))
3169 memcpy(&rfc, (void *)val, olen);
3170
3171 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3172 rfc.mode != chan->mode)
3173 return -ECONNREFUSED;
3174
3175 chan->fcs = 0;
3176
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3178 sizeof(rfc), (unsigned long) &rfc);
3179 break;
3180
3181 case L2CAP_CONF_EWS:
3182 chan->ack_win = min_t(u16, val, chan->ack_win);
3183 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3184 chan->tx_win);
3185 break;
3186
3187 case L2CAP_CONF_EFS:
3188 if (olen == sizeof(efs))
3189 memcpy(&efs, (void *)val, olen);
3190
3191 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3192 efs.stype != L2CAP_SERV_NOTRAFIC &&
3193 efs.stype != chan->local_stype)
3194 return -ECONNREFUSED;
3195
3196 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3197 sizeof(efs), (unsigned long) &efs);
3198 break;
3199 }
3200 }
3201
3202 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3203 return -ECONNREFUSED;
3204
3205 chan->mode = rfc.mode;
3206
3207 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3208 switch (rfc.mode) {
3209 case L2CAP_MODE_ERTM:
3210 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3211 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3212 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3213 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3214 chan->ack_win = min_t(u16, chan->ack_win,
3215 rfc.txwin_size);
3216
3217 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3218 chan->local_msdu = le16_to_cpu(efs.msdu);
3219 chan->local_sdu_itime =
3220 le32_to_cpu(efs.sdu_itime);
3221 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3222 chan->local_flush_to =
3223 le32_to_cpu(efs.flush_to);
3224 }
3225 break;
3226
3227 case L2CAP_MODE_STREAMING:
3228 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3229 }
3230 }
3231
3232 req->dcid = cpu_to_le16(chan->dcid);
3233 req->flags = __constant_cpu_to_le16(0);
3234
3235 return ptr - data;
3236 }
3237
3238 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3239 {
3240 struct l2cap_conf_rsp *rsp = data;
3241 void *ptr = rsp->data;
3242
3243 BT_DBG("chan %p", chan);
3244
3245 rsp->scid = cpu_to_le16(chan->dcid);
3246 rsp->result = cpu_to_le16(result);
3247 rsp->flags = cpu_to_le16(flags);
3248
3249 return ptr - data;
3250 }
3251
3252 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3253 {
3254 struct l2cap_conn_rsp rsp;
3255 struct l2cap_conn *conn = chan->conn;
3256 u8 buf[128];
3257
3258 rsp.scid = cpu_to_le16(chan->dcid);
3259 rsp.dcid = cpu_to_le16(chan->scid);
3260 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3261 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3262 l2cap_send_cmd(conn, chan->ident,
3263 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3264
3265 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3266 return;
3267
3268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3269 l2cap_build_conf_req(chan, buf), buf);
3270 chan->num_conf_req++;
3271 }
3272
3273 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3274 {
3275 int type, olen;
3276 unsigned long val;
3277 /* Use sane default values in case a misbehaving remote device
3278 * did not send an RFC or extended window size option.
3279 */
3280 u16 txwin_ext = chan->ack_win;
3281 struct l2cap_conf_rfc rfc = {
3282 .mode = chan->mode,
3283 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3284 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3285 .max_pdu_size = cpu_to_le16(chan->imtu),
3286 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3287 };
3288
3289 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3290
3291 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3292 return;
3293
3294 while (len >= L2CAP_CONF_OPT_SIZE) {
3295 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3296
3297 switch (type) {
3298 case L2CAP_CONF_RFC:
3299 if (olen == sizeof(rfc))
3300 memcpy(&rfc, (void *)val, olen);
3301 break;
3302 case L2CAP_CONF_EWS:
3303 txwin_ext = val;
3304 break;
3305 }
3306 }
3307
3308 switch (rfc.mode) {
3309 case L2CAP_MODE_ERTM:
3310 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3311 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3312 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3313 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3314 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3315 else
3316 chan->ack_win = min_t(u16, chan->ack_win,
3317 rfc.txwin_size);
3318 break;
3319 case L2CAP_MODE_STREAMING:
3320 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3321 }
3322 }
3323
3324 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3325 {
3326 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3327
3328 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3329 return 0;
3330
3331 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3332 cmd->ident == conn->info_ident) {
3333 cancel_delayed_work(&conn->info_timer);
3334
3335 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3336 conn->info_ident = 0;
3337
3338 l2cap_conn_start(conn);
3339 }
3340
3341 return 0;
3342 }
3343
3344 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3345 {
3346 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3347 struct l2cap_conn_rsp rsp;
3348 struct l2cap_chan *chan = NULL, *pchan;
3349 struct sock *parent, *sk = NULL;
3350 int result, status = L2CAP_CS_NO_INFO;
3351
3352 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3353 __le16 psm = req->psm;
3354
3355 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3356
3357 /* Check if we have socket listening on psm */
3358 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3359 if (!pchan) {
3360 result = L2CAP_CR_BAD_PSM;
3361 goto sendresp;
3362 }
3363
3364 parent = pchan->sk;
3365
3366 mutex_lock(&conn->chan_lock);
3367 lock_sock(parent);
3368
3369 /* Check if the ACL is secure enough (if not SDP) */
3370 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3371 !hci_conn_check_link_mode(conn->hcon)) {
3372 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3373 result = L2CAP_CR_SEC_BLOCK;
3374 goto response;
3375 }
3376
3377 result = L2CAP_CR_NO_MEM;
3378
3379 /* Check if we already have channel with that dcid */
3380 if (__l2cap_get_chan_by_dcid(conn, scid))
3381 goto response;
3382
3383 chan = pchan->ops->new_connection(pchan);
3384 if (!chan)
3385 goto response;
3386
3387 sk = chan->sk;
3388
3389 hci_conn_hold(conn->hcon);
3390
3391 bacpy(&bt_sk(sk)->src, conn->src);
3392 bacpy(&bt_sk(sk)->dst, conn->dst);
3393 chan->psm = psm;
3394 chan->dcid = scid;
3395
3396 bt_accept_enqueue(parent, sk);
3397
3398 __l2cap_chan_add(conn, chan);
3399
3400 dcid = chan->scid;
3401
3402 __set_chan_timer(chan, sk->sk_sndtimeo);
3403
3404 chan->ident = cmd->ident;
3405
3406 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3407 if (l2cap_chan_check_security(chan)) {
3408 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3409 __l2cap_state_change(chan, BT_CONNECT2);
3410 result = L2CAP_CR_PEND;
3411 status = L2CAP_CS_AUTHOR_PEND;
3412 parent->sk_data_ready(parent, 0);
3413 } else {
3414 __l2cap_state_change(chan, BT_CONFIG);
3415 result = L2CAP_CR_SUCCESS;
3416 status = L2CAP_CS_NO_INFO;
3417 }
3418 } else {
3419 __l2cap_state_change(chan, BT_CONNECT2);
3420 result = L2CAP_CR_PEND;
3421 status = L2CAP_CS_AUTHEN_PEND;
3422 }
3423 } else {
3424 __l2cap_state_change(chan, BT_CONNECT2);
3425 result = L2CAP_CR_PEND;
3426 status = L2CAP_CS_NO_INFO;
3427 }
3428
3429 response:
3430 release_sock(parent);
3431 mutex_unlock(&conn->chan_lock);
3432
3433 sendresp:
3434 rsp.scid = cpu_to_le16(scid);
3435 rsp.dcid = cpu_to_le16(dcid);
3436 rsp.result = cpu_to_le16(result);
3437 rsp.status = cpu_to_le16(status);
3438 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3439
3440 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3441 struct l2cap_info_req info;
3442 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3443
3444 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3445 conn->info_ident = l2cap_get_ident(conn);
3446
3447 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3448
3449 l2cap_send_cmd(conn, conn->info_ident,
3450 L2CAP_INFO_REQ, sizeof(info), &info);
3451 }
3452
3453 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3454 result == L2CAP_CR_SUCCESS) {
3455 u8 buf[128];
3456 set_bit(CONF_REQ_SENT, &chan->conf_state);
3457 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3458 l2cap_build_conf_req(chan, buf), buf);
3459 chan->num_conf_req++;
3460 }
3461
3462 return 0;
3463 }
3464
3465 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3466 {
3467 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3468 u16 scid, dcid, result, status;
3469 struct l2cap_chan *chan;
3470 u8 req[128];
3471 int err;
3472
3473 scid = __le16_to_cpu(rsp->scid);
3474 dcid = __le16_to_cpu(rsp->dcid);
3475 result = __le16_to_cpu(rsp->result);
3476 status = __le16_to_cpu(rsp->status);
3477
3478 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3479 dcid, scid, result, status);
3480
3481 mutex_lock(&conn->chan_lock);
3482
3483 if (scid) {
3484 chan = __l2cap_get_chan_by_scid(conn, scid);
3485 if (!chan) {
3486 err = -EFAULT;
3487 goto unlock;
3488 }
3489 } else {
3490 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3491 if (!chan) {
3492 err = -EFAULT;
3493 goto unlock;
3494 }
3495 }
3496
3497 err = 0;
3498
3499 l2cap_chan_lock(chan);
3500
3501 switch (result) {
3502 case L2CAP_CR_SUCCESS:
3503 l2cap_state_change(chan, BT_CONFIG);
3504 chan->ident = 0;
3505 chan->dcid = dcid;
3506 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3507
3508 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3509 break;
3510
3511 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3512 l2cap_build_conf_req(chan, req), req);
3513 chan->num_conf_req++;
3514 break;
3515
3516 case L2CAP_CR_PEND:
3517 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3518 break;
3519
3520 default:
3521 l2cap_chan_del(chan, ECONNREFUSED);
3522 break;
3523 }
3524
3525 l2cap_chan_unlock(chan);
3526
3527 unlock:
3528 mutex_unlock(&conn->chan_lock);
3529
3530 return err;
3531 }
3532
3533 static inline void set_default_fcs(struct l2cap_chan *chan)
3534 {
3535 /* FCS is enabled only in ERTM or streaming mode, if one or both
3536 * sides request it.
3537 */
3538 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3539 chan->fcs = L2CAP_FCS_NONE;
3540 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3541 chan->fcs = L2CAP_FCS_CRC16;
3542 }
3543
3544 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3545 {
3546 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3547 u16 dcid, flags;
3548 u8 rsp[64];
3549 struct l2cap_chan *chan;
3550 int len, err = 0;
3551
3552 dcid = __le16_to_cpu(req->dcid);
3553 flags = __le16_to_cpu(req->flags);
3554
3555 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3556
3557 chan = l2cap_get_chan_by_scid(conn, dcid);
3558 if (!chan)
3559 return -ENOENT;
3560
3561 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3562 struct l2cap_cmd_rej_cid rej;
3563
3564 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3565 rej.scid = cpu_to_le16(chan->scid);
3566 rej.dcid = cpu_to_le16(chan->dcid);
3567
3568 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3569 sizeof(rej), &rej);
3570 goto unlock;
3571 }
3572
3573 /* Reject if config buffer is too small. */
3574 len = cmd_len - sizeof(*req);
3575 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3576 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3577 l2cap_build_conf_rsp(chan, rsp,
3578 L2CAP_CONF_REJECT, flags), rsp);
3579 goto unlock;
3580 }
3581
3582 /* Store config. */
3583 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3584 chan->conf_len += len;
3585
3586 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3587 /* Incomplete config. Send empty response. */
3588 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3589 l2cap_build_conf_rsp(chan, rsp,
3590 L2CAP_CONF_SUCCESS, flags), rsp);
3591 goto unlock;
3592 }
3593
3594 /* Complete config. */
3595 len = l2cap_parse_conf_req(chan, rsp);
3596 if (len < 0) {
3597 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3598 goto unlock;
3599 }
3600
3601 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3602 chan->num_conf_rsp++;
3603
3604 /* Reset config buffer. */
3605 chan->conf_len = 0;
3606
3607 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3608 goto unlock;
3609
3610 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3611 set_default_fcs(chan);
3612
3613 if (chan->mode == L2CAP_MODE_ERTM ||
3614 chan->mode == L2CAP_MODE_STREAMING)
3615 err = l2cap_ertm_init(chan);
3616
3617 if (err < 0)
3618 l2cap_send_disconn_req(chan->conn, chan, -err);
3619 else
3620 l2cap_chan_ready(chan);
3621
3622 goto unlock;
3623 }
3624
3625 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3626 u8 buf[64];
3627 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3628 l2cap_build_conf_req(chan, buf), buf);
3629 chan->num_conf_req++;
3630 }
3631
3632 /* Got Conf Rsp PENDING from remote side and asume we sent
3633 Conf Rsp PENDING in the code above */
3634 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3635 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3636
3637 /* check compatibility */
3638
3639 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3640 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3641
3642 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3643 l2cap_build_conf_rsp(chan, rsp,
3644 L2CAP_CONF_SUCCESS, flags), rsp);
3645 }
3646
3647 unlock:
3648 l2cap_chan_unlock(chan);
3649 return err;
3650 }
3651
3652 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3653 {
3654 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3655 u16 scid, flags, result;
3656 struct l2cap_chan *chan;
3657 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3658 int err = 0;
3659
3660 scid = __le16_to_cpu(rsp->scid);
3661 flags = __le16_to_cpu(rsp->flags);
3662 result = __le16_to_cpu(rsp->result);
3663
3664 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3665 result, len);
3666
3667 chan = l2cap_get_chan_by_scid(conn, scid);
3668 if (!chan)
3669 return 0;
3670
3671 switch (result) {
3672 case L2CAP_CONF_SUCCESS:
3673 l2cap_conf_rfc_get(chan, rsp->data, len);
3674 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3675 break;
3676
3677 case L2CAP_CONF_PENDING:
3678 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3679
3680 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3681 char buf[64];
3682
3683 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3684 buf, &result);
3685 if (len < 0) {
3686 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3687 goto done;
3688 }
3689
3690 /* check compatibility */
3691
3692 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3693 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3694
3695 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3696 l2cap_build_conf_rsp(chan, buf,
3697 L2CAP_CONF_SUCCESS, 0x0000), buf);
3698 }
3699 goto done;
3700
3701 case L2CAP_CONF_UNACCEPT:
3702 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3703 char req[64];
3704
3705 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3706 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3707 goto done;
3708 }
3709
3710 /* throw out any old stored conf requests */
3711 result = L2CAP_CONF_SUCCESS;
3712 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3713 req, &result);
3714 if (len < 0) {
3715 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3716 goto done;
3717 }
3718
3719 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3720 L2CAP_CONF_REQ, len, req);
3721 chan->num_conf_req++;
3722 if (result != L2CAP_CONF_SUCCESS)
3723 goto done;
3724 break;
3725 }
3726
3727 default:
3728 l2cap_chan_set_err(chan, ECONNRESET);
3729
3730 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3731 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3732 goto done;
3733 }
3734
3735 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3736 goto done;
3737
3738 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3739
3740 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3741 set_default_fcs(chan);
3742
3743 if (chan->mode == L2CAP_MODE_ERTM ||
3744 chan->mode == L2CAP_MODE_STREAMING)
3745 err = l2cap_ertm_init(chan);
3746
3747 if (err < 0)
3748 l2cap_send_disconn_req(chan->conn, chan, -err);
3749 else
3750 l2cap_chan_ready(chan);
3751 }
3752
3753 done:
3754 l2cap_chan_unlock(chan);
3755 return err;
3756 }
3757
3758 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3759 {
3760 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3761 struct l2cap_disconn_rsp rsp;
3762 u16 dcid, scid;
3763 struct l2cap_chan *chan;
3764 struct sock *sk;
3765
3766 scid = __le16_to_cpu(req->scid);
3767 dcid = __le16_to_cpu(req->dcid);
3768
3769 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3770
3771 mutex_lock(&conn->chan_lock);
3772
3773 chan = __l2cap_get_chan_by_scid(conn, dcid);
3774 if (!chan) {
3775 mutex_unlock(&conn->chan_lock);
3776 return 0;
3777 }
3778
3779 l2cap_chan_lock(chan);
3780
3781 sk = chan->sk;
3782
3783 rsp.dcid = cpu_to_le16(chan->scid);
3784 rsp.scid = cpu_to_le16(chan->dcid);
3785 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3786
3787 lock_sock(sk);
3788 sk->sk_shutdown = SHUTDOWN_MASK;
3789 release_sock(sk);
3790
3791 l2cap_chan_hold(chan);
3792 l2cap_chan_del(chan, ECONNRESET);
3793
3794 l2cap_chan_unlock(chan);
3795
3796 chan->ops->close(chan);
3797 l2cap_chan_put(chan);
3798
3799 mutex_unlock(&conn->chan_lock);
3800
3801 return 0;
3802 }
3803
3804 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3805 {
3806 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3807 u16 dcid, scid;
3808 struct l2cap_chan *chan;
3809
3810 scid = __le16_to_cpu(rsp->scid);
3811 dcid = __le16_to_cpu(rsp->dcid);
3812
3813 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3814
3815 mutex_lock(&conn->chan_lock);
3816
3817 chan = __l2cap_get_chan_by_scid(conn, scid);
3818 if (!chan) {
3819 mutex_unlock(&conn->chan_lock);
3820 return 0;
3821 }
3822
3823 l2cap_chan_lock(chan);
3824
3825 l2cap_chan_hold(chan);
3826 l2cap_chan_del(chan, 0);
3827
3828 l2cap_chan_unlock(chan);
3829
3830 chan->ops->close(chan);
3831 l2cap_chan_put(chan);
3832
3833 mutex_unlock(&conn->chan_lock);
3834
3835 return 0;
3836 }
3837
3838 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3839 {
3840 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3841 u16 type;
3842
3843 type = __le16_to_cpu(req->type);
3844
3845 BT_DBG("type 0x%4.4x", type);
3846
3847 if (type == L2CAP_IT_FEAT_MASK) {
3848 u8 buf[8];
3849 u32 feat_mask = l2cap_feat_mask;
3850 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3851 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3852 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3853 if (!disable_ertm)
3854 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3855 | L2CAP_FEAT_FCS;
3856 if (enable_hs)
3857 feat_mask |= L2CAP_FEAT_EXT_FLOW
3858 | L2CAP_FEAT_EXT_WINDOW;
3859
3860 put_unaligned_le32(feat_mask, rsp->data);
3861 l2cap_send_cmd(conn, cmd->ident,
3862 L2CAP_INFO_RSP, sizeof(buf), buf);
3863 } else if (type == L2CAP_IT_FIXED_CHAN) {
3864 u8 buf[12];
3865 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3866
3867 if (enable_hs)
3868 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3869 else
3870 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3871
3872 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3873 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3874 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3875 l2cap_send_cmd(conn, cmd->ident,
3876 L2CAP_INFO_RSP, sizeof(buf), buf);
3877 } else {
3878 struct l2cap_info_rsp rsp;
3879 rsp.type = cpu_to_le16(type);
3880 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3881 l2cap_send_cmd(conn, cmd->ident,
3882 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3883 }
3884
3885 return 0;
3886 }
3887
3888 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3889 {
3890 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3891 u16 type, result;
3892
3893 type = __le16_to_cpu(rsp->type);
3894 result = __le16_to_cpu(rsp->result);
3895
3896 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3897
3898 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3899 if (cmd->ident != conn->info_ident ||
3900 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3901 return 0;
3902
3903 cancel_delayed_work(&conn->info_timer);
3904
3905 if (result != L2CAP_IR_SUCCESS) {
3906 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3907 conn->info_ident = 0;
3908
3909 l2cap_conn_start(conn);
3910
3911 return 0;
3912 }
3913
3914 switch (type) {
3915 case L2CAP_IT_FEAT_MASK:
3916 conn->feat_mask = get_unaligned_le32(rsp->data);
3917
3918 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3919 struct l2cap_info_req req;
3920 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3921
3922 conn->info_ident = l2cap_get_ident(conn);
3923
3924 l2cap_send_cmd(conn, conn->info_ident,
3925 L2CAP_INFO_REQ, sizeof(req), &req);
3926 } else {
3927 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3928 conn->info_ident = 0;
3929
3930 l2cap_conn_start(conn);
3931 }
3932 break;
3933
3934 case L2CAP_IT_FIXED_CHAN:
3935 conn->fixed_chan_mask = rsp->data[0];
3936 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3937 conn->info_ident = 0;
3938
3939 l2cap_conn_start(conn);
3940 break;
3941 }
3942
3943 return 0;
3944 }
3945
3946 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3948 void *data)
3949 {
3950 struct l2cap_create_chan_req *req = data;
3951 struct l2cap_create_chan_rsp rsp;
3952 u16 psm, scid;
3953
3954 if (cmd_len != sizeof(*req))
3955 return -EPROTO;
3956
3957 if (!enable_hs)
3958 return -EINVAL;
3959
3960 psm = le16_to_cpu(req->psm);
3961 scid = le16_to_cpu(req->scid);
3962
3963 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3964
3965 /* Placeholder: Always reject */
3966 rsp.dcid = 0;
3967 rsp.scid = cpu_to_le16(scid);
3968 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3969 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3970
3971 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3972 sizeof(rsp), &rsp);
3973
3974 return 0;
3975 }
3976
3977 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3978 struct l2cap_cmd_hdr *cmd, void *data)
3979 {
3980 BT_DBG("conn %p", conn);
3981
3982 return l2cap_connect_rsp(conn, cmd, data);
3983 }
3984
3985 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3986 u16 icid, u16 result)
3987 {
3988 struct l2cap_move_chan_rsp rsp;
3989
3990 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3991
3992 rsp.icid = cpu_to_le16(icid);
3993 rsp.result = cpu_to_le16(result);
3994
3995 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3996 }
3997
3998 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3999 struct l2cap_chan *chan,
4000 u16 icid, u16 result)
4001 {
4002 struct l2cap_move_chan_cfm cfm;
4003 u8 ident;
4004
4005 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4006
4007 ident = l2cap_get_ident(conn);
4008 if (chan)
4009 chan->ident = ident;
4010
4011 cfm.icid = cpu_to_le16(icid);
4012 cfm.result = cpu_to_le16(result);
4013
4014 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4015 }
4016
4017 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4018 u16 icid)
4019 {
4020 struct l2cap_move_chan_cfm_rsp rsp;
4021
4022 BT_DBG("icid 0x%4.4x", icid);
4023
4024 rsp.icid = cpu_to_le16(icid);
4025 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4026 }
4027
4028 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4029 struct l2cap_cmd_hdr *cmd,
4030 u16 cmd_len, void *data)
4031 {
4032 struct l2cap_move_chan_req *req = data;
4033 u16 icid = 0;
4034 u16 result = L2CAP_MR_NOT_ALLOWED;
4035
4036 if (cmd_len != sizeof(*req))
4037 return -EPROTO;
4038
4039 icid = le16_to_cpu(req->icid);
4040
4041 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4042
4043 if (!enable_hs)
4044 return -EINVAL;
4045
4046 /* Placeholder: Always refuse */
4047 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4048
4049 return 0;
4050 }
4051
4052 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4053 struct l2cap_cmd_hdr *cmd,
4054 u16 cmd_len, void *data)
4055 {
4056 struct l2cap_move_chan_rsp *rsp = data;
4057 u16 icid, result;
4058
4059 if (cmd_len != sizeof(*rsp))
4060 return -EPROTO;
4061
4062 icid = le16_to_cpu(rsp->icid);
4063 result = le16_to_cpu(rsp->result);
4064
4065 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4066
4067 /* Placeholder: Always unconfirmed */
4068 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4069
4070 return 0;
4071 }
4072
4073 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd,
4075 u16 cmd_len, void *data)
4076 {
4077 struct l2cap_move_chan_cfm *cfm = data;
4078 u16 icid, result;
4079
4080 if (cmd_len != sizeof(*cfm))
4081 return -EPROTO;
4082
4083 icid = le16_to_cpu(cfm->icid);
4084 result = le16_to_cpu(cfm->result);
4085
4086 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4087
4088 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4089
4090 return 0;
4091 }
4092
4093 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4094 struct l2cap_cmd_hdr *cmd,
4095 u16 cmd_len, void *data)
4096 {
4097 struct l2cap_move_chan_cfm_rsp *rsp = data;
4098 u16 icid;
4099
4100 if (cmd_len != sizeof(*rsp))
4101 return -EPROTO;
4102
4103 icid = le16_to_cpu(rsp->icid);
4104
4105 BT_DBG("icid 0x%4.4x", icid);
4106
4107 return 0;
4108 }
4109
4110 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4111 u16 to_multiplier)
4112 {
4113 u16 max_latency;
4114
4115 if (min > max || min < 6 || max > 3200)
4116 return -EINVAL;
4117
4118 if (to_multiplier < 10 || to_multiplier > 3200)
4119 return -EINVAL;
4120
4121 if (max >= to_multiplier * 8)
4122 return -EINVAL;
4123
4124 max_latency = (to_multiplier * 8 / max) - 1;
4125 if (latency > 499 || latency > max_latency)
4126 return -EINVAL;
4127
4128 return 0;
4129 }
4130
4131 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4132 struct l2cap_cmd_hdr *cmd, u8 *data)
4133 {
4134 struct hci_conn *hcon = conn->hcon;
4135 struct l2cap_conn_param_update_req *req;
4136 struct l2cap_conn_param_update_rsp rsp;
4137 u16 min, max, latency, to_multiplier, cmd_len;
4138 int err;
4139
4140 if (!(hcon->link_mode & HCI_LM_MASTER))
4141 return -EINVAL;
4142
4143 cmd_len = __le16_to_cpu(cmd->len);
4144 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4145 return -EPROTO;
4146
4147 req = (struct l2cap_conn_param_update_req *) data;
4148 min = __le16_to_cpu(req->min);
4149 max = __le16_to_cpu(req->max);
4150 latency = __le16_to_cpu(req->latency);
4151 to_multiplier = __le16_to_cpu(req->to_multiplier);
4152
4153 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4154 min, max, latency, to_multiplier);
4155
4156 memset(&rsp, 0, sizeof(rsp));
4157
4158 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4159 if (err)
4160 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4161 else
4162 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4163
4164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4165 sizeof(rsp), &rsp);
4166
4167 if (!err)
4168 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4169
4170 return 0;
4171 }
4172
4173 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4174 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4175 {
4176 int err = 0;
4177
4178 switch (cmd->code) {
4179 case L2CAP_COMMAND_REJ:
4180 l2cap_command_rej(conn, cmd, data);
4181 break;
4182
4183 case L2CAP_CONN_REQ:
4184 err = l2cap_connect_req(conn, cmd, data);
4185 break;
4186
4187 case L2CAP_CONN_RSP:
4188 err = l2cap_connect_rsp(conn, cmd, data);
4189 break;
4190
4191 case L2CAP_CONF_REQ:
4192 err = l2cap_config_req(conn, cmd, cmd_len, data);
4193 break;
4194
4195 case L2CAP_CONF_RSP:
4196 err = l2cap_config_rsp(conn, cmd, data);
4197 break;
4198
4199 case L2CAP_DISCONN_REQ:
4200 err = l2cap_disconnect_req(conn, cmd, data);
4201 break;
4202
4203 case L2CAP_DISCONN_RSP:
4204 err = l2cap_disconnect_rsp(conn, cmd, data);
4205 break;
4206
4207 case L2CAP_ECHO_REQ:
4208 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4209 break;
4210
4211 case L2CAP_ECHO_RSP:
4212 break;
4213
4214 case L2CAP_INFO_REQ:
4215 err = l2cap_information_req(conn, cmd, data);
4216 break;
4217
4218 case L2CAP_INFO_RSP:
4219 err = l2cap_information_rsp(conn, cmd, data);
4220 break;
4221
4222 case L2CAP_CREATE_CHAN_REQ:
4223 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4224 break;
4225
4226 case L2CAP_CREATE_CHAN_RSP:
4227 err = l2cap_create_channel_rsp(conn, cmd, data);
4228 break;
4229
4230 case L2CAP_MOVE_CHAN_REQ:
4231 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4232 break;
4233
4234 case L2CAP_MOVE_CHAN_RSP:
4235 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4236 break;
4237
4238 case L2CAP_MOVE_CHAN_CFM:
4239 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4240 break;
4241
4242 case L2CAP_MOVE_CHAN_CFM_RSP:
4243 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4244 break;
4245
4246 default:
4247 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4248 err = -EINVAL;
4249 break;
4250 }
4251
4252 return err;
4253 }
4254
4255 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4256 struct l2cap_cmd_hdr *cmd, u8 *data)
4257 {
4258 switch (cmd->code) {
4259 case L2CAP_COMMAND_REJ:
4260 return 0;
4261
4262 case L2CAP_CONN_PARAM_UPDATE_REQ:
4263 return l2cap_conn_param_update_req(conn, cmd, data);
4264
4265 case L2CAP_CONN_PARAM_UPDATE_RSP:
4266 return 0;
4267
4268 default:
4269 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4270 return -EINVAL;
4271 }
4272 }
4273
4274 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4275 struct sk_buff *skb)
4276 {
4277 u8 *data = skb->data;
4278 int len = skb->len;
4279 struct l2cap_cmd_hdr cmd;
4280 int err;
4281
4282 l2cap_raw_recv(conn, skb);
4283
4284 while (len >= L2CAP_CMD_HDR_SIZE) {
4285 u16 cmd_len;
4286 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4287 data += L2CAP_CMD_HDR_SIZE;
4288 len -= L2CAP_CMD_HDR_SIZE;
4289
4290 cmd_len = le16_to_cpu(cmd.len);
4291
4292 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4293
4294 if (cmd_len > len || !cmd.ident) {
4295 BT_DBG("corrupted command");
4296 break;
4297 }
4298
4299 if (conn->hcon->type == LE_LINK)
4300 err = l2cap_le_sig_cmd(conn, &cmd, data);
4301 else
4302 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4303
4304 if (err) {
4305 struct l2cap_cmd_rej_unk rej;
4306
4307 BT_ERR("Wrong link type (%d)", err);
4308
4309 /* FIXME: Map err to a valid reason */
4310 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4311 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4312 }
4313
4314 data += cmd_len;
4315 len -= cmd_len;
4316 }
4317
4318 kfree_skb(skb);
4319 }
4320
4321 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4322 {
4323 u16 our_fcs, rcv_fcs;
4324 int hdr_size;
4325
4326 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4327 hdr_size = L2CAP_EXT_HDR_SIZE;
4328 else
4329 hdr_size = L2CAP_ENH_HDR_SIZE;
4330
4331 if (chan->fcs == L2CAP_FCS_CRC16) {
4332 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4333 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4334 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4335
4336 if (our_fcs != rcv_fcs)
4337 return -EBADMSG;
4338 }
4339 return 0;
4340 }
4341
4342 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4343 {
4344 struct l2cap_ctrl control;
4345
4346 BT_DBG("chan %p", chan);
4347
4348 memset(&control, 0, sizeof(control));
4349 control.sframe = 1;
4350 control.final = 1;
4351 control.reqseq = chan->buffer_seq;
4352 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4353
4354 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4355 control.super = L2CAP_SUPER_RNR;
4356 l2cap_send_sframe(chan, &control);
4357 }
4358
4359 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4360 chan->unacked_frames > 0)
4361 __set_retrans_timer(chan);
4362
4363 /* Send pending iframes */
4364 l2cap_ertm_send(chan);
4365
4366 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4367 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4368 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4369 * send it now.
4370 */
4371 control.super = L2CAP_SUPER_RR;
4372 l2cap_send_sframe(chan, &control);
4373 }
4374 }
4375
4376 static void append_skb_frag(struct sk_buff *skb,
4377 struct sk_buff *new_frag, struct sk_buff **last_frag)
4378 {
4379 /* skb->len reflects data in skb as well as all fragments
4380 * skb->data_len reflects only data in fragments
4381 */
4382 if (!skb_has_frag_list(skb))
4383 skb_shinfo(skb)->frag_list = new_frag;
4384
4385 new_frag->next = NULL;
4386
4387 (*last_frag)->next = new_frag;
4388 *last_frag = new_frag;
4389
4390 skb->len += new_frag->len;
4391 skb->data_len += new_frag->len;
4392 skb->truesize += new_frag->truesize;
4393 }
4394
4395 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4396 struct l2cap_ctrl *control)
4397 {
4398 int err = -EINVAL;
4399
4400 switch (control->sar) {
4401 case L2CAP_SAR_UNSEGMENTED:
4402 if (chan->sdu)
4403 break;
4404
4405 err = chan->ops->recv(chan, skb);
4406 break;
4407
4408 case L2CAP_SAR_START:
4409 if (chan->sdu)
4410 break;
4411
4412 chan->sdu_len = get_unaligned_le16(skb->data);
4413 skb_pull(skb, L2CAP_SDULEN_SIZE);
4414
4415 if (chan->sdu_len > chan->imtu) {
4416 err = -EMSGSIZE;
4417 break;
4418 }
4419
4420 if (skb->len >= chan->sdu_len)
4421 break;
4422
4423 chan->sdu = skb;
4424 chan->sdu_last_frag = skb;
4425
4426 skb = NULL;
4427 err = 0;
4428 break;
4429
4430 case L2CAP_SAR_CONTINUE:
4431 if (!chan->sdu)
4432 break;
4433
4434 append_skb_frag(chan->sdu, skb,
4435 &chan->sdu_last_frag);
4436 skb = NULL;
4437
4438 if (chan->sdu->len >= chan->sdu_len)
4439 break;
4440
4441 err = 0;
4442 break;
4443
4444 case L2CAP_SAR_END:
4445 if (!chan->sdu)
4446 break;
4447
4448 append_skb_frag(chan->sdu, skb,
4449 &chan->sdu_last_frag);
4450 skb = NULL;
4451
4452 if (chan->sdu->len != chan->sdu_len)
4453 break;
4454
4455 err = chan->ops->recv(chan, chan->sdu);
4456
4457 if (!err) {
4458 /* Reassembly complete */
4459 chan->sdu = NULL;
4460 chan->sdu_last_frag = NULL;
4461 chan->sdu_len = 0;
4462 }
4463 break;
4464 }
4465
4466 if (err) {
4467 kfree_skb(skb);
4468 kfree_skb(chan->sdu);
4469 chan->sdu = NULL;
4470 chan->sdu_last_frag = NULL;
4471 chan->sdu_len = 0;
4472 }
4473
4474 return err;
4475 }
4476
4477 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4478 {
4479 u8 event;
4480
4481 if (chan->mode != L2CAP_MODE_ERTM)
4482 return;
4483
4484 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4485 l2cap_tx(chan, NULL, NULL, event);
4486 }
4487
4488 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4489 {
4490 int err = 0;
4491 /* Pass sequential frames to l2cap_reassemble_sdu()
4492 * until a gap is encountered.
4493 */
4494
4495 BT_DBG("chan %p", chan);
4496
4497 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4498 struct sk_buff *skb;
4499 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4500 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4501
4502 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4503
4504 if (!skb)
4505 break;
4506
4507 skb_unlink(skb, &chan->srej_q);
4508 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4509 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4510 if (err)
4511 break;
4512 }
4513
4514 if (skb_queue_empty(&chan->srej_q)) {
4515 chan->rx_state = L2CAP_RX_STATE_RECV;
4516 l2cap_send_ack(chan);
4517 }
4518
4519 return err;
4520 }
4521
4522 static void l2cap_handle_srej(struct l2cap_chan *chan,
4523 struct l2cap_ctrl *control)
4524 {
4525 struct sk_buff *skb;
4526
4527 BT_DBG("chan %p, control %p", chan, control);
4528
4529 if (control->reqseq == chan->next_tx_seq) {
4530 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4531 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4532 return;
4533 }
4534
4535 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4536
4537 if (skb == NULL) {
4538 BT_DBG("Seq %d not available for retransmission",
4539 control->reqseq);
4540 return;
4541 }
4542
4543 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4544 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4545 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4546 return;
4547 }
4548
4549 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4550
4551 if (control->poll) {
4552 l2cap_pass_to_tx(chan, control);
4553
4554 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4555 l2cap_retransmit(chan, control);
4556 l2cap_ertm_send(chan);
4557
4558 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4559 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4560 chan->srej_save_reqseq = control->reqseq;
4561 }
4562 } else {
4563 l2cap_pass_to_tx_fbit(chan, control);
4564
4565 if (control->final) {
4566 if (chan->srej_save_reqseq != control->reqseq ||
4567 !test_and_clear_bit(CONN_SREJ_ACT,
4568 &chan->conn_state))
4569 l2cap_retransmit(chan, control);
4570 } else {
4571 l2cap_retransmit(chan, control);
4572 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4573 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4574 chan->srej_save_reqseq = control->reqseq;
4575 }
4576 }
4577 }
4578 }
4579
4580 static void l2cap_handle_rej(struct l2cap_chan *chan,
4581 struct l2cap_ctrl *control)
4582 {
4583 struct sk_buff *skb;
4584
4585 BT_DBG("chan %p, control %p", chan, control);
4586
4587 if (control->reqseq == chan->next_tx_seq) {
4588 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4589 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4590 return;
4591 }
4592
4593 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4594
4595 if (chan->max_tx && skb &&
4596 bt_cb(skb)->control.retries >= chan->max_tx) {
4597 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4598 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4599 return;
4600 }
4601
4602 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4603
4604 l2cap_pass_to_tx(chan, control);
4605
4606 if (control->final) {
4607 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4608 l2cap_retransmit_all(chan, control);
4609 } else {
4610 l2cap_retransmit_all(chan, control);
4611 l2cap_ertm_send(chan);
4612 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4613 set_bit(CONN_REJ_ACT, &chan->conn_state);
4614 }
4615 }
4616
4617 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4618 {
4619 BT_DBG("chan %p, txseq %d", chan, txseq);
4620
4621 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4622 chan->expected_tx_seq);
4623
4624 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4625 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4626 chan->tx_win) {
4627 /* See notes below regarding "double poll" and
4628 * invalid packets.
4629 */
4630 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4631 BT_DBG("Invalid/Ignore - after SREJ");
4632 return L2CAP_TXSEQ_INVALID_IGNORE;
4633 } else {
4634 BT_DBG("Invalid - in window after SREJ sent");
4635 return L2CAP_TXSEQ_INVALID;
4636 }
4637 }
4638
4639 if (chan->srej_list.head == txseq) {
4640 BT_DBG("Expected SREJ");
4641 return L2CAP_TXSEQ_EXPECTED_SREJ;
4642 }
4643
4644 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4645 BT_DBG("Duplicate SREJ - txseq already stored");
4646 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4647 }
4648
4649 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4650 BT_DBG("Unexpected SREJ - not requested");
4651 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4652 }
4653 }
4654
4655 if (chan->expected_tx_seq == txseq) {
4656 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4657 chan->tx_win) {
4658 BT_DBG("Invalid - txseq outside tx window");
4659 return L2CAP_TXSEQ_INVALID;
4660 } else {
4661 BT_DBG("Expected");
4662 return L2CAP_TXSEQ_EXPECTED;
4663 }
4664 }
4665
4666 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4667 __seq_offset(chan, chan->expected_tx_seq,
4668 chan->last_acked_seq)){
4669 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4670 return L2CAP_TXSEQ_DUPLICATE;
4671 }
4672
4673 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4674 /* A source of invalid packets is a "double poll" condition,
4675 * where delays cause us to send multiple poll packets. If
4676 * the remote stack receives and processes both polls,
4677 * sequence numbers can wrap around in such a way that a
4678 * resent frame has a sequence number that looks like new data
4679 * with a sequence gap. This would trigger an erroneous SREJ
4680 * request.
4681 *
4682 * Fortunately, this is impossible with a tx window that's
4683 * less than half of the maximum sequence number, which allows
4684 * invalid frames to be safely ignored.
4685 *
4686 * With tx window sizes greater than half of the tx window
4687 * maximum, the frame is invalid and cannot be ignored. This
4688 * causes a disconnect.
4689 */
4690
4691 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4692 BT_DBG("Invalid/Ignore - txseq outside tx window");
4693 return L2CAP_TXSEQ_INVALID_IGNORE;
4694 } else {
4695 BT_DBG("Invalid - txseq outside tx window");
4696 return L2CAP_TXSEQ_INVALID;
4697 }
4698 } else {
4699 BT_DBG("Unexpected - txseq indicates missing frames");
4700 return L2CAP_TXSEQ_UNEXPECTED;
4701 }
4702 }
4703
4704 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4705 struct l2cap_ctrl *control,
4706 struct sk_buff *skb, u8 event)
4707 {
4708 int err = 0;
4709 bool skb_in_use = 0;
4710
4711 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4712 event);
4713
4714 switch (event) {
4715 case L2CAP_EV_RECV_IFRAME:
4716 switch (l2cap_classify_txseq(chan, control->txseq)) {
4717 case L2CAP_TXSEQ_EXPECTED:
4718 l2cap_pass_to_tx(chan, control);
4719
4720 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4721 BT_DBG("Busy, discarding expected seq %d",
4722 control->txseq);
4723 break;
4724 }
4725
4726 chan->expected_tx_seq = __next_seq(chan,
4727 control->txseq);
4728
4729 chan->buffer_seq = chan->expected_tx_seq;
4730 skb_in_use = 1;
4731
4732 err = l2cap_reassemble_sdu(chan, skb, control);
4733 if (err)
4734 break;
4735
4736 if (control->final) {
4737 if (!test_and_clear_bit(CONN_REJ_ACT,
4738 &chan->conn_state)) {
4739 control->final = 0;
4740 l2cap_retransmit_all(chan, control);
4741 l2cap_ertm_send(chan);
4742 }
4743 }
4744
4745 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4746 l2cap_send_ack(chan);
4747 break;
4748 case L2CAP_TXSEQ_UNEXPECTED:
4749 l2cap_pass_to_tx(chan, control);
4750
4751 /* Can't issue SREJ frames in the local busy state.
4752 * Drop this frame, it will be seen as missing
4753 * when local busy is exited.
4754 */
4755 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4756 BT_DBG("Busy, discarding unexpected seq %d",
4757 control->txseq);
4758 break;
4759 }
4760
4761 /* There was a gap in the sequence, so an SREJ
4762 * must be sent for each missing frame. The
4763 * current frame is stored for later use.
4764 */
4765 skb_queue_tail(&chan->srej_q, skb);
4766 skb_in_use = 1;
4767 BT_DBG("Queued %p (queue len %d)", skb,
4768 skb_queue_len(&chan->srej_q));
4769
4770 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4771 l2cap_seq_list_clear(&chan->srej_list);
4772 l2cap_send_srej(chan, control->txseq);
4773
4774 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4775 break;
4776 case L2CAP_TXSEQ_DUPLICATE:
4777 l2cap_pass_to_tx(chan, control);
4778 break;
4779 case L2CAP_TXSEQ_INVALID_IGNORE:
4780 break;
4781 case L2CAP_TXSEQ_INVALID:
4782 default:
4783 l2cap_send_disconn_req(chan->conn, chan,
4784 ECONNRESET);
4785 break;
4786 }
4787 break;
4788 case L2CAP_EV_RECV_RR:
4789 l2cap_pass_to_tx(chan, control);
4790 if (control->final) {
4791 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4792
4793 if (!test_and_clear_bit(CONN_REJ_ACT,
4794 &chan->conn_state)) {
4795 control->final = 0;
4796 l2cap_retransmit_all(chan, control);
4797 }
4798
4799 l2cap_ertm_send(chan);
4800 } else if (control->poll) {
4801 l2cap_send_i_or_rr_or_rnr(chan);
4802 } else {
4803 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4804 &chan->conn_state) &&
4805 chan->unacked_frames)
4806 __set_retrans_timer(chan);
4807
4808 l2cap_ertm_send(chan);
4809 }
4810 break;
4811 case L2CAP_EV_RECV_RNR:
4812 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4813 l2cap_pass_to_tx(chan, control);
4814 if (control && control->poll) {
4815 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4816 l2cap_send_rr_or_rnr(chan, 0);
4817 }
4818 __clear_retrans_timer(chan);
4819 l2cap_seq_list_clear(&chan->retrans_list);
4820 break;
4821 case L2CAP_EV_RECV_REJ:
4822 l2cap_handle_rej(chan, control);
4823 break;
4824 case L2CAP_EV_RECV_SREJ:
4825 l2cap_handle_srej(chan, control);
4826 break;
4827 default:
4828 break;
4829 }
4830
4831 if (skb && !skb_in_use) {
4832 BT_DBG("Freeing %p", skb);
4833 kfree_skb(skb);
4834 }
4835
4836 return err;
4837 }
4838
4839 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4840 struct l2cap_ctrl *control,
4841 struct sk_buff *skb, u8 event)
4842 {
4843 int err = 0;
4844 u16 txseq = control->txseq;
4845 bool skb_in_use = 0;
4846
4847 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4848 event);
4849
4850 switch (event) {
4851 case L2CAP_EV_RECV_IFRAME:
4852 switch (l2cap_classify_txseq(chan, txseq)) {
4853 case L2CAP_TXSEQ_EXPECTED:
4854 /* Keep frame for reassembly later */
4855 l2cap_pass_to_tx(chan, control);
4856 skb_queue_tail(&chan->srej_q, skb);
4857 skb_in_use = 1;
4858 BT_DBG("Queued %p (queue len %d)", skb,
4859 skb_queue_len(&chan->srej_q));
4860
4861 chan->expected_tx_seq = __next_seq(chan, txseq);
4862 break;
4863 case L2CAP_TXSEQ_EXPECTED_SREJ:
4864 l2cap_seq_list_pop(&chan->srej_list);
4865
4866 l2cap_pass_to_tx(chan, control);
4867 skb_queue_tail(&chan->srej_q, skb);
4868 skb_in_use = 1;
4869 BT_DBG("Queued %p (queue len %d)", skb,
4870 skb_queue_len(&chan->srej_q));
4871
4872 err = l2cap_rx_queued_iframes(chan);
4873 if (err)
4874 break;
4875
4876 break;
4877 case L2CAP_TXSEQ_UNEXPECTED:
4878 /* Got a frame that can't be reassembled yet.
4879 * Save it for later, and send SREJs to cover
4880 * the missing frames.
4881 */
4882 skb_queue_tail(&chan->srej_q, skb);
4883 skb_in_use = 1;
4884 BT_DBG("Queued %p (queue len %d)", skb,
4885 skb_queue_len(&chan->srej_q));
4886
4887 l2cap_pass_to_tx(chan, control);
4888 l2cap_send_srej(chan, control->txseq);
4889 break;
4890 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4891 /* This frame was requested with an SREJ, but
4892 * some expected retransmitted frames are
4893 * missing. Request retransmission of missing
4894 * SREJ'd frames.
4895 */
4896 skb_queue_tail(&chan->srej_q, skb);
4897 skb_in_use = 1;
4898 BT_DBG("Queued %p (queue len %d)", skb,
4899 skb_queue_len(&chan->srej_q));
4900
4901 l2cap_pass_to_tx(chan, control);
4902 l2cap_send_srej_list(chan, control->txseq);
4903 break;
4904 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4905 /* We've already queued this frame. Drop this copy. */
4906 l2cap_pass_to_tx(chan, control);
4907 break;
4908 case L2CAP_TXSEQ_DUPLICATE:
4909 /* Expecting a later sequence number, so this frame
4910 * was already received. Ignore it completely.
4911 */
4912 break;
4913 case L2CAP_TXSEQ_INVALID_IGNORE:
4914 break;
4915 case L2CAP_TXSEQ_INVALID:
4916 default:
4917 l2cap_send_disconn_req(chan->conn, chan,
4918 ECONNRESET);
4919 break;
4920 }
4921 break;
4922 case L2CAP_EV_RECV_RR:
4923 l2cap_pass_to_tx(chan, control);
4924 if (control->final) {
4925 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4926
4927 if (!test_and_clear_bit(CONN_REJ_ACT,
4928 &chan->conn_state)) {
4929 control->final = 0;
4930 l2cap_retransmit_all(chan, control);
4931 }
4932
4933 l2cap_ertm_send(chan);
4934 } else if (control->poll) {
4935 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4936 &chan->conn_state) &&
4937 chan->unacked_frames) {
4938 __set_retrans_timer(chan);
4939 }
4940
4941 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4942 l2cap_send_srej_tail(chan);
4943 } else {
4944 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4945 &chan->conn_state) &&
4946 chan->unacked_frames)
4947 __set_retrans_timer(chan);
4948
4949 l2cap_send_ack(chan);
4950 }
4951 break;
4952 case L2CAP_EV_RECV_RNR:
4953 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4954 l2cap_pass_to_tx(chan, control);
4955 if (control->poll) {
4956 l2cap_send_srej_tail(chan);
4957 } else {
4958 struct l2cap_ctrl rr_control;
4959 memset(&rr_control, 0, sizeof(rr_control));
4960 rr_control.sframe = 1;
4961 rr_control.super = L2CAP_SUPER_RR;
4962 rr_control.reqseq = chan->buffer_seq;
4963 l2cap_send_sframe(chan, &rr_control);
4964 }
4965
4966 break;
4967 case L2CAP_EV_RECV_REJ:
4968 l2cap_handle_rej(chan, control);
4969 break;
4970 case L2CAP_EV_RECV_SREJ:
4971 l2cap_handle_srej(chan, control);
4972 break;
4973 }
4974
4975 if (skb && !skb_in_use) {
4976 BT_DBG("Freeing %p", skb);
4977 kfree_skb(skb);
4978 }
4979
4980 return err;
4981 }
4982
4983 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4984 {
4985 /* Make sure reqseq is for a packet that has been sent but not acked */
4986 u16 unacked;
4987
4988 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4989 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4990 }
4991
4992 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4993 struct sk_buff *skb, u8 event)
4994 {
4995 int err = 0;
4996
4997 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4998 control, skb, event, chan->rx_state);
4999
5000 if (__valid_reqseq(chan, control->reqseq)) {
5001 switch (chan->rx_state) {
5002 case L2CAP_RX_STATE_RECV:
5003 err = l2cap_rx_state_recv(chan, control, skb, event);
5004 break;
5005 case L2CAP_RX_STATE_SREJ_SENT:
5006 err = l2cap_rx_state_srej_sent(chan, control, skb,
5007 event);
5008 break;
5009 default:
5010 /* shut it down */
5011 break;
5012 }
5013 } else {
5014 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5015 control->reqseq, chan->next_tx_seq,
5016 chan->expected_ack_seq);
5017 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5018 }
5019
5020 return err;
5021 }
5022
5023 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5024 struct sk_buff *skb)
5025 {
5026 int err = 0;
5027
5028 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5029 chan->rx_state);
5030
5031 if (l2cap_classify_txseq(chan, control->txseq) ==
5032 L2CAP_TXSEQ_EXPECTED) {
5033 l2cap_pass_to_tx(chan, control);
5034
5035 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5036 __next_seq(chan, chan->buffer_seq));
5037
5038 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5039
5040 l2cap_reassemble_sdu(chan, skb, control);
5041 } else {
5042 if (chan->sdu) {
5043 kfree_skb(chan->sdu);
5044 chan->sdu = NULL;
5045 }
5046 chan->sdu_last_frag = NULL;
5047 chan->sdu_len = 0;
5048
5049 if (skb) {
5050 BT_DBG("Freeing %p", skb);
5051 kfree_skb(skb);
5052 }
5053 }
5054
5055 chan->last_acked_seq = control->txseq;
5056 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5057
5058 return err;
5059 }
5060
5061 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5062 {
5063 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5064 u16 len;
5065 u8 event;
5066
5067 __unpack_control(chan, skb);
5068
5069 len = skb->len;
5070
5071 /*
5072 * We can just drop the corrupted I-frame here.
5073 * Receiver will miss it and start proper recovery
5074 * procedures and ask for retransmission.
5075 */
5076 if (l2cap_check_fcs(chan, skb))
5077 goto drop;
5078
5079 if (!control->sframe && control->sar == L2CAP_SAR_START)
5080 len -= L2CAP_SDULEN_SIZE;
5081
5082 if (chan->fcs == L2CAP_FCS_CRC16)
5083 len -= L2CAP_FCS_SIZE;
5084
5085 if (len > chan->mps) {
5086 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5087 goto drop;
5088 }
5089
5090 if (!control->sframe) {
5091 int err;
5092
5093 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5094 control->sar, control->reqseq, control->final,
5095 control->txseq);
5096
5097 /* Validate F-bit - F=0 always valid, F=1 only
5098 * valid in TX WAIT_F
5099 */
5100 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5101 goto drop;
5102
5103 if (chan->mode != L2CAP_MODE_STREAMING) {
5104 event = L2CAP_EV_RECV_IFRAME;
5105 err = l2cap_rx(chan, control, skb, event);
5106 } else {
5107 err = l2cap_stream_rx(chan, control, skb);
5108 }
5109
5110 if (err)
5111 l2cap_send_disconn_req(chan->conn, chan,
5112 ECONNRESET);
5113 } else {
5114 const u8 rx_func_to_event[4] = {
5115 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5116 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5117 };
5118
5119 /* Only I-frames are expected in streaming mode */
5120 if (chan->mode == L2CAP_MODE_STREAMING)
5121 goto drop;
5122
5123 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5124 control->reqseq, control->final, control->poll,
5125 control->super);
5126
5127 if (len != 0) {
5128 BT_ERR("%d", len);
5129 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5130 goto drop;
5131 }
5132
5133 /* Validate F and P bits */
5134 if (control->final && (control->poll ||
5135 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5136 goto drop;
5137
5138 event = rx_func_to_event[control->super];
5139 if (l2cap_rx(chan, control, skb, event))
5140 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5141 }
5142
5143 return 0;
5144
5145 drop:
5146 kfree_skb(skb);
5147 return 0;
5148 }
5149
5150 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5151 struct sk_buff *skb)
5152 {
5153 struct l2cap_chan *chan;
5154
5155 chan = l2cap_get_chan_by_scid(conn, cid);
5156 if (!chan) {
5157 if (cid == L2CAP_CID_A2MP) {
5158 chan = a2mp_channel_create(conn, skb);
5159 if (!chan) {
5160 kfree_skb(skb);
5161 return;
5162 }
5163
5164 l2cap_chan_lock(chan);
5165 } else {
5166 BT_DBG("unknown cid 0x%4.4x", cid);
5167 /* Drop packet and return */
5168 kfree_skb(skb);
5169 return;
5170 }
5171 }
5172
5173 BT_DBG("chan %p, len %d", chan, skb->len);
5174
5175 if (chan->state != BT_CONNECTED)
5176 goto drop;
5177
5178 switch (chan->mode) {
5179 case L2CAP_MODE_BASIC:
5180 /* If socket recv buffers overflows we drop data here
5181 * which is *bad* because L2CAP has to be reliable.
5182 * But we don't have any other choice. L2CAP doesn't
5183 * provide flow control mechanism. */
5184
5185 if (chan->imtu < skb->len)
5186 goto drop;
5187
5188 if (!chan->ops->recv(chan, skb))
5189 goto done;
5190 break;
5191
5192 case L2CAP_MODE_ERTM:
5193 case L2CAP_MODE_STREAMING:
5194 l2cap_data_rcv(chan, skb);
5195 goto done;
5196
5197 default:
5198 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5199 break;
5200 }
5201
5202 drop:
5203 kfree_skb(skb);
5204
5205 done:
5206 l2cap_chan_unlock(chan);
5207 }
5208
5209 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5210 struct sk_buff *skb)
5211 {
5212 struct l2cap_chan *chan;
5213
5214 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5215 if (!chan)
5216 goto drop;
5217
5218 BT_DBG("chan %p, len %d", chan, skb->len);
5219
5220 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5221 goto drop;
5222
5223 if (chan->imtu < skb->len)
5224 goto drop;
5225
5226 if (!chan->ops->recv(chan, skb))
5227 return;
5228
5229 drop:
5230 kfree_skb(skb);
5231 }
5232
5233 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5234 struct sk_buff *skb)
5235 {
5236 struct l2cap_chan *chan;
5237
5238 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5239 if (!chan)
5240 goto drop;
5241
5242 BT_DBG("chan %p, len %d", chan, skb->len);
5243
5244 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5245 goto drop;
5246
5247 if (chan->imtu < skb->len)
5248 goto drop;
5249
5250 if (!chan->ops->recv(chan, skb))
5251 return;
5252
5253 drop:
5254 kfree_skb(skb);
5255 }
5256
5257 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5258 {
5259 struct l2cap_hdr *lh = (void *) skb->data;
5260 u16 cid, len;
5261 __le16 psm;
5262
5263 skb_pull(skb, L2CAP_HDR_SIZE);
5264 cid = __le16_to_cpu(lh->cid);
5265 len = __le16_to_cpu(lh->len);
5266
5267 if (len != skb->len) {
5268 kfree_skb(skb);
5269 return;
5270 }
5271
5272 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5273
5274 switch (cid) {
5275 case L2CAP_CID_LE_SIGNALING:
5276 case L2CAP_CID_SIGNALING:
5277 l2cap_sig_channel(conn, skb);
5278 break;
5279
5280 case L2CAP_CID_CONN_LESS:
5281 psm = get_unaligned((__le16 *) skb->data);
5282 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5283 l2cap_conless_channel(conn, psm, skb);
5284 break;
5285
5286 case L2CAP_CID_LE_DATA:
5287 l2cap_att_channel(conn, cid, skb);
5288 break;
5289
5290 case L2CAP_CID_SMP:
5291 if (smp_sig_channel(conn, skb))
5292 l2cap_conn_del(conn->hcon, EACCES);
5293 break;
5294
5295 default:
5296 l2cap_data_channel(conn, cid, skb);
5297 break;
5298 }
5299 }
5300
5301 /* ---- L2CAP interface with lower layer (HCI) ---- */
5302
5303 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5304 {
5305 int exact = 0, lm1 = 0, lm2 = 0;
5306 struct l2cap_chan *c;
5307
5308 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5309
5310 /* Find listening sockets and check their link_mode */
5311 read_lock(&chan_list_lock);
5312 list_for_each_entry(c, &chan_list, global_l) {
5313 struct sock *sk = c->sk;
5314
5315 if (c->state != BT_LISTEN)
5316 continue;
5317
5318 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5319 lm1 |= HCI_LM_ACCEPT;
5320 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5321 lm1 |= HCI_LM_MASTER;
5322 exact++;
5323 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5324 lm2 |= HCI_LM_ACCEPT;
5325 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5326 lm2 |= HCI_LM_MASTER;
5327 }
5328 }
5329 read_unlock(&chan_list_lock);
5330
5331 return exact ? lm1 : lm2;
5332 }
5333
5334 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5335 {
5336 struct l2cap_conn *conn;
5337
5338 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5339
5340 if (!status) {
5341 conn = l2cap_conn_add(hcon, status);
5342 if (conn)
5343 l2cap_conn_ready(conn);
5344 } else
5345 l2cap_conn_del(hcon, bt_to_errno(status));
5346
5347 return 0;
5348 }
5349
5350 int l2cap_disconn_ind(struct hci_conn *hcon)
5351 {
5352 struct l2cap_conn *conn = hcon->l2cap_data;
5353
5354 BT_DBG("hcon %p", hcon);
5355
5356 if (!conn)
5357 return HCI_ERROR_REMOTE_USER_TERM;
5358 return conn->disc_reason;
5359 }
5360
5361 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5362 {
5363 BT_DBG("hcon %p reason %d", hcon, reason);
5364
5365 l2cap_conn_del(hcon, bt_to_errno(reason));
5366 return 0;
5367 }
5368
5369 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5370 {
5371 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5372 return;
5373
5374 if (encrypt == 0x00) {
5375 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5376 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5377 } else if (chan->sec_level == BT_SECURITY_HIGH)
5378 l2cap_chan_close(chan, ECONNREFUSED);
5379 } else {
5380 if (chan->sec_level == BT_SECURITY_MEDIUM)
5381 __clear_chan_timer(chan);
5382 }
5383 }
5384
5385 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5386 {
5387 struct l2cap_conn *conn = hcon->l2cap_data;
5388 struct l2cap_chan *chan;
5389
5390 if (!conn)
5391 return 0;
5392
5393 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5394
5395 if (hcon->type == LE_LINK) {
5396 if (!status && encrypt)
5397 smp_distribute_keys(conn, 0);
5398 cancel_delayed_work(&conn->security_timer);
5399 }
5400
5401 mutex_lock(&conn->chan_lock);
5402
5403 list_for_each_entry(chan, &conn->chan_l, list) {
5404 l2cap_chan_lock(chan);
5405
5406 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5407 state_to_string(chan->state));
5408
5409 if (chan->scid == L2CAP_CID_LE_DATA) {
5410 if (!status && encrypt) {
5411 chan->sec_level = hcon->sec_level;
5412 l2cap_chan_ready(chan);
5413 }
5414
5415 l2cap_chan_unlock(chan);
5416 continue;
5417 }
5418
5419 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5420 l2cap_chan_unlock(chan);
5421 continue;
5422 }
5423
5424 if (!status && (chan->state == BT_CONNECTED ||
5425 chan->state == BT_CONFIG)) {
5426 struct sock *sk = chan->sk;
5427
5428 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5429 sk->sk_state_change(sk);
5430
5431 l2cap_check_encryption(chan, encrypt);
5432 l2cap_chan_unlock(chan);
5433 continue;
5434 }
5435
5436 if (chan->state == BT_CONNECT) {
5437 if (!status) {
5438 l2cap_send_conn_req(chan);
5439 } else {
5440 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5441 }
5442 } else if (chan->state == BT_CONNECT2) {
5443 struct sock *sk = chan->sk;
5444 struct l2cap_conn_rsp rsp;
5445 __u16 res, stat;
5446
5447 lock_sock(sk);
5448
5449 if (!status) {
5450 if (test_bit(BT_SK_DEFER_SETUP,
5451 &bt_sk(sk)->flags)) {
5452 struct sock *parent = bt_sk(sk)->parent;
5453 res = L2CAP_CR_PEND;
5454 stat = L2CAP_CS_AUTHOR_PEND;
5455 if (parent)
5456 parent->sk_data_ready(parent, 0);
5457 } else {
5458 __l2cap_state_change(chan, BT_CONFIG);
5459 res = L2CAP_CR_SUCCESS;
5460 stat = L2CAP_CS_NO_INFO;
5461 }
5462 } else {
5463 __l2cap_state_change(chan, BT_DISCONN);
5464 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5465 res = L2CAP_CR_SEC_BLOCK;
5466 stat = L2CAP_CS_NO_INFO;
5467 }
5468
5469 release_sock(sk);
5470
5471 rsp.scid = cpu_to_le16(chan->dcid);
5472 rsp.dcid = cpu_to_le16(chan->scid);
5473 rsp.result = cpu_to_le16(res);
5474 rsp.status = cpu_to_le16(stat);
5475 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5476 sizeof(rsp), &rsp);
5477
5478 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5479 res == L2CAP_CR_SUCCESS) {
5480 char buf[128];
5481 set_bit(CONF_REQ_SENT, &chan->conf_state);
5482 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5483 L2CAP_CONF_REQ,
5484 l2cap_build_conf_req(chan, buf),
5485 buf);
5486 chan->num_conf_req++;
5487 }
5488 }
5489
5490 l2cap_chan_unlock(chan);
5491 }
5492
5493 mutex_unlock(&conn->chan_lock);
5494
5495 return 0;
5496 }
5497
5498 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5499 {
5500 struct l2cap_conn *conn = hcon->l2cap_data;
5501
5502 if (!conn)
5503 conn = l2cap_conn_add(hcon, 0);
5504
5505 if (!conn)
5506 goto drop;
5507
5508 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5509
5510 if (!(flags & ACL_CONT)) {
5511 struct l2cap_hdr *hdr;
5512 int len;
5513
5514 if (conn->rx_len) {
5515 BT_ERR("Unexpected start frame (len %d)", skb->len);
5516 kfree_skb(conn->rx_skb);
5517 conn->rx_skb = NULL;
5518 conn->rx_len = 0;
5519 l2cap_conn_unreliable(conn, ECOMM);
5520 }
5521
5522 /* Start fragment always begin with Basic L2CAP header */
5523 if (skb->len < L2CAP_HDR_SIZE) {
5524 BT_ERR("Frame is too short (len %d)", skb->len);
5525 l2cap_conn_unreliable(conn, ECOMM);
5526 goto drop;
5527 }
5528
5529 hdr = (struct l2cap_hdr *) skb->data;
5530 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5531
5532 if (len == skb->len) {
5533 /* Complete frame received */
5534 l2cap_recv_frame(conn, skb);
5535 return 0;
5536 }
5537
5538 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5539
5540 if (skb->len > len) {
5541 BT_ERR("Frame is too long (len %d, expected len %d)",
5542 skb->len, len);
5543 l2cap_conn_unreliable(conn, ECOMM);
5544 goto drop;
5545 }
5546
5547 /* Allocate skb for the complete frame (with header) */
5548 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5549 if (!conn->rx_skb)
5550 goto drop;
5551
5552 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5553 skb->len);
5554 conn->rx_len = len - skb->len;
5555 } else {
5556 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5557
5558 if (!conn->rx_len) {
5559 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5560 l2cap_conn_unreliable(conn, ECOMM);
5561 goto drop;
5562 }
5563
5564 if (skb->len > conn->rx_len) {
5565 BT_ERR("Fragment is too long (len %d, expected %d)",
5566 skb->len, conn->rx_len);
5567 kfree_skb(conn->rx_skb);
5568 conn->rx_skb = NULL;
5569 conn->rx_len = 0;
5570 l2cap_conn_unreliable(conn, ECOMM);
5571 goto drop;
5572 }
5573
5574 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5575 skb->len);
5576 conn->rx_len -= skb->len;
5577
5578 if (!conn->rx_len) {
5579 /* Complete frame received */
5580 l2cap_recv_frame(conn, conn->rx_skb);
5581 conn->rx_skb = NULL;
5582 }
5583 }
5584
5585 drop:
5586 kfree_skb(skb);
5587 return 0;
5588 }
5589
5590 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5591 {
5592 struct l2cap_chan *c;
5593
5594 read_lock(&chan_list_lock);
5595
5596 list_for_each_entry(c, &chan_list, global_l) {
5597 struct sock *sk = c->sk;
5598
5599 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5600 batostr(&bt_sk(sk)->src),
5601 batostr(&bt_sk(sk)->dst),
5602 c->state, __le16_to_cpu(c->psm),
5603 c->scid, c->dcid, c->imtu, c->omtu,
5604 c->sec_level, c->mode);
5605 }
5606
5607 read_unlock(&chan_list_lock);
5608
5609 return 0;
5610 }
5611
5612 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5613 {
5614 return single_open(file, l2cap_debugfs_show, inode->i_private);
5615 }
5616
5617 static const struct file_operations l2cap_debugfs_fops = {
5618 .open = l2cap_debugfs_open,
5619 .read = seq_read,
5620 .llseek = seq_lseek,
5621 .release = single_release,
5622 };
5623
5624 static struct dentry *l2cap_debugfs;
5625
5626 int __init l2cap_init(void)
5627 {
5628 int err;
5629
5630 err = l2cap_init_sockets();
5631 if (err < 0)
5632 return err;
5633
5634 if (bt_debugfs) {
5635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5636 bt_debugfs, NULL, &l2cap_debugfs_fops);
5637 if (!l2cap_debugfs)
5638 BT_ERR("Failed to create L2CAP debug file");
5639 }
5640
5641 return 0;
5642 }
5643
5644 void l2cap_exit(void)
5645 {
5646 debugfs_remove(l2cap_debugfs);
5647 l2cap_cleanup_sockets();
5648 }
5649
5650 module_param(disable_ertm, bool, 0644);
5651 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");