]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/l2cap_core.c
Merge tag 'please-pull-mce' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 if (c)
109 l2cap_chan_lock(c);
110 mutex_unlock(&conn->chan_lock);
111
112 return c;
113 }
114
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
116 {
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
121 return c;
122 }
123 return NULL;
124 }
125
126 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
127 {
128 struct l2cap_chan *c;
129
130 list_for_each_entry(c, &chan_list, global_l) {
131 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
132 return c;
133 }
134 return NULL;
135 }
136
137 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
138 {
139 int err;
140
141 write_lock(&chan_list_lock);
142
143 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
144 err = -EADDRINUSE;
145 goto done;
146 }
147
148 if (psm) {
149 chan->psm = psm;
150 chan->sport = psm;
151 err = 0;
152 } else {
153 u16 p;
154
155 err = -EINVAL;
156 for (p = 0x1001; p < 0x1100; p += 2)
157 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
158 chan->psm = cpu_to_le16(p);
159 chan->sport = cpu_to_le16(p);
160 err = 0;
161 break;
162 }
163 }
164
165 done:
166 write_unlock(&chan_list_lock);
167 return err;
168 }
169
170 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
171 {
172 write_lock(&chan_list_lock);
173
174 chan->scid = scid;
175
176 write_unlock(&chan_list_lock);
177
178 return 0;
179 }
180
181 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
182 {
183 u16 cid = L2CAP_CID_DYN_START;
184
185 for (; cid < L2CAP_CID_DYN_END; cid++) {
186 if (!__l2cap_get_chan_by_scid(conn, cid))
187 return cid;
188 }
189
190 return 0;
191 }
192
193 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
194 {
195 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
196 state_to_string(state));
197
198 chan->state = state;
199 chan->ops->state_change(chan->data, state);
200 }
201
202 static void l2cap_state_change(struct l2cap_chan *chan, int state)
203 {
204 struct sock *sk = chan->sk;
205
206 lock_sock(sk);
207 __l2cap_state_change(chan, state);
208 release_sock(sk);
209 }
210
211 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
212 {
213 struct sock *sk = chan->sk;
214
215 sk->sk_err = err;
216 }
217
218 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
219 {
220 struct sock *sk = chan->sk;
221
222 lock_sock(sk);
223 __l2cap_chan_set_err(chan, err);
224 release_sock(sk);
225 }
226
227 /* ---- L2CAP sequence number lists ---- */
228
229 /* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
235 * allocs or frees.
236 */
237
238 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
239 {
240 size_t alloc_size, i;
241
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
245 */
246 alloc_size = roundup_pow_of_two(size);
247
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
249 if (!seq_list->list)
250 return -ENOMEM;
251
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
257
258 return 0;
259 }
260
261 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
262 {
263 kfree(seq_list->list);
264 }
265
266 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
267 u16 seq)
268 {
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
271 }
272
273 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
274 {
275 u16 mask = seq_list->mask;
276
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
284
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
288 }
289 } else {
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
296 }
297
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
303 }
304 return seq;
305 }
306
307 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
308 {
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
311 }
312
313 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
314 {
315 u16 i;
316
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
318 return;
319
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
322
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325 }
326
327 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
328 {
329 u16 mask = seq_list->mask;
330
331 /* All appends happen in constant time */
332
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
338 else
339 seq_list->list[seq_list->tail & mask] = seq;
340
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
343 }
344
345 static void l2cap_chan_timeout(struct work_struct *work)
346 {
347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
348 chan_timer.work);
349 struct l2cap_conn *conn = chan->conn;
350 int reason;
351
352 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
353
354 mutex_lock(&conn->chan_lock);
355 l2cap_chan_lock(chan);
356
357 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
358 reason = ECONNREFUSED;
359 else if (chan->state == BT_CONNECT &&
360 chan->sec_level != BT_SECURITY_SDP)
361 reason = ECONNREFUSED;
362 else
363 reason = ETIMEDOUT;
364
365 l2cap_chan_close(chan, reason);
366
367 l2cap_chan_unlock(chan);
368
369 chan->ops->close(chan->data);
370 mutex_unlock(&conn->chan_lock);
371
372 l2cap_chan_put(chan);
373 }
374
375 struct l2cap_chan *l2cap_chan_create(void)
376 {
377 struct l2cap_chan *chan;
378
379 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
380 if (!chan)
381 return NULL;
382
383 mutex_init(&chan->lock);
384
385 write_lock(&chan_list_lock);
386 list_add(&chan->global_l, &chan_list);
387 write_unlock(&chan_list_lock);
388
389 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
390
391 chan->state = BT_OPEN;
392
393 atomic_set(&chan->refcnt, 1);
394
395 BT_DBG("chan %p", chan);
396
397 return chan;
398 }
399
400 void l2cap_chan_destroy(struct l2cap_chan *chan)
401 {
402 write_lock(&chan_list_lock);
403 list_del(&chan->global_l);
404 write_unlock(&chan_list_lock);
405
406 l2cap_chan_put(chan);
407 }
408
409 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
410 {
411 chan->fcs = L2CAP_FCS_CRC16;
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW;
416
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
418 }
419
420 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
421 {
422 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
423 __le16_to_cpu(chan->psm), chan->dcid);
424
425 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
426
427 chan->conn = conn;
428
429 switch (chan->chan_type) {
430 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA;
436 } else {
437 /* Alloc CID for connection-oriented socket */
438 chan->scid = l2cap_alloc_cid(conn);
439 chan->omtu = L2CAP_DEFAULT_MTU;
440 }
441 break;
442
443 case L2CAP_CHAN_CONN_LESS:
444 /* Connectionless socket */
445 chan->scid = L2CAP_CID_CONN_LESS;
446 chan->dcid = L2CAP_CID_CONN_LESS;
447 chan->omtu = L2CAP_DEFAULT_MTU;
448 break;
449
450 default:
451 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING;
453 chan->dcid = L2CAP_CID_SIGNALING;
454 chan->omtu = L2CAP_DEFAULT_MTU;
455 }
456
457 chan->local_id = L2CAP_BESTEFFORT_ID;
458 chan->local_stype = L2CAP_SERV_BESTEFFORT;
459 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
460 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
461 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
462 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
463
464 l2cap_chan_hold(chan);
465
466 list_add(&chan->list, &conn->chan_l);
467 }
468
469 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470 {
471 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock);
474 }
475
476 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
477 {
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481
482 __clear_chan_timer(chan);
483
484 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
485
486 if (conn) {
487 /* Delete from channel list */
488 list_del(&chan->list);
489
490 l2cap_chan_put(chan);
491
492 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500
501 if (err)
502 __l2cap_chan_set_err(chan, err);
503
504 if (parent) {
505 bt_accept_unlink(sk);
506 parent->sk_data_ready(parent, 0);
507 } else
508 sk->sk_state_change(sk);
509
510 release_sock(sk);
511
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return;
515
516 skb_queue_purge(&chan->tx_q);
517
518 if (chan->mode == L2CAP_MODE_ERTM) {
519 struct srej_list *l, *tmp;
520
521 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan);
524
525 skb_queue_purge(&chan->srej_q);
526
527 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534 }
535
536 static void l2cap_chan_cleanup_listen(struct sock *parent)
537 {
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541
542 /* Close not yet accepted channels */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550
551 chan->ops->close(chan->data);
552 }
553 }
554
555 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
556 {
557 struct l2cap_conn *conn = chan->conn;
558 struct sock *sk = chan->sk;
559
560 BT_DBG("chan %p state %s sk %p", chan,
561 state_to_string(chan->state), sk);
562
563 switch (chan->state) {
564 case BT_LISTEN:
565 lock_sock(sk);
566 l2cap_chan_cleanup_listen(sk);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break;
572
573 case BT_CONNECTED:
574 case BT_CONFIG:
575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
576 conn->hcon->type == ACL_LINK) {
577 __set_chan_timer(chan, sk->sk_sndtimeo);
578 l2cap_send_disconn_req(conn, chan, reason);
579 } else
580 l2cap_chan_del(chan, reason);
581 break;
582
583 case BT_CONNECT2:
584 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
585 conn->hcon->type == ACL_LINK) {
586 struct l2cap_conn_rsp rsp;
587 __u16 result;
588
589 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
590 result = L2CAP_CR_SEC_BLOCK;
591 else
592 result = L2CAP_CR_BAD_PSM;
593 l2cap_state_change(chan, BT_DISCONN);
594
595 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp);
601 }
602
603 l2cap_chan_del(chan, reason);
604 break;
605
606 case BT_CONNECT:
607 case BT_DISCONN:
608 l2cap_chan_del(chan, reason);
609 break;
610
611 default:
612 lock_sock(sk);
613 sock_set_flag(sk, SOCK_ZAPPED);
614 release_sock(sk);
615 break;
616 }
617 }
618
619 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
620 {
621 if (chan->chan_type == L2CAP_CHAN_RAW) {
622 switch (chan->sec_level) {
623 case BT_SECURITY_HIGH:
624 return HCI_AT_DEDICATED_BONDING_MITM;
625 case BT_SECURITY_MEDIUM:
626 return HCI_AT_DEDICATED_BONDING;
627 default:
628 return HCI_AT_NO_BONDING;
629 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) {
631 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP;
633
634 if (chan->sec_level == BT_SECURITY_HIGH)
635 return HCI_AT_NO_BONDING_MITM;
636 else
637 return HCI_AT_NO_BONDING;
638 } else {
639 switch (chan->sec_level) {
640 case BT_SECURITY_HIGH:
641 return HCI_AT_GENERAL_BONDING_MITM;
642 case BT_SECURITY_MEDIUM:
643 return HCI_AT_GENERAL_BONDING;
644 default:
645 return HCI_AT_NO_BONDING;
646 }
647 }
648 }
649
650 /* Service level security */
651 int l2cap_chan_check_security(struct l2cap_chan *chan)
652 {
653 struct l2cap_conn *conn = chan->conn;
654 __u8 auth_type;
655
656 auth_type = l2cap_get_auth_type(chan);
657
658 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
659 }
660
661 static u8 l2cap_get_ident(struct l2cap_conn *conn)
662 {
663 u8 id;
664
665 /* Get next available identificator.
666 * 1 - 128 are used by kernel.
667 * 129 - 199 are reserved.
668 * 200 - 254 are used by utilities like l2ping, etc.
669 */
670
671 spin_lock(&conn->lock);
672
673 if (++conn->tx_ident > 128)
674 conn->tx_ident = 1;
675
676 id = conn->tx_ident;
677
678 spin_unlock(&conn->lock);
679
680 return id;
681 }
682
683 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
684 {
685 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
686 u8 flags;
687
688 BT_DBG("code 0x%2.2x", code);
689
690 if (!skb)
691 return;
692
693 if (lmp_no_flush_capable(conn->hcon->hdev))
694 flags = ACL_START_NO_FLUSH;
695 else
696 flags = ACL_START;
697
698 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
699 skb->priority = HCI_PRIO_MAX;
700
701 hci_send_acl(conn->hchan, skb, flags);
702 }
703
704 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
705 {
706 struct hci_conn *hcon = chan->conn->hcon;
707 u16 flags;
708
709 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
710 skb->priority);
711
712 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
713 lmp_no_flush_capable(hcon->hdev))
714 flags = ACL_START_NO_FLUSH;
715 else
716 flags = ACL_START;
717
718 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
719 hci_send_acl(chan->conn->hchan, skb, flags);
720 }
721
722 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
723 {
724 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
725 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
726
727 if (enh & L2CAP_CTRL_FRAME_TYPE) {
728 /* S-Frame */
729 control->sframe = 1;
730 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
731 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
732
733 control->sar = 0;
734 control->txseq = 0;
735 } else {
736 /* I-Frame */
737 control->sframe = 0;
738 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
739 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
740
741 control->poll = 0;
742 control->super = 0;
743 }
744 }
745
746 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
747 {
748 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
749 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
750
751 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
752 /* S-Frame */
753 control->sframe = 1;
754 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
755 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
756
757 control->sar = 0;
758 control->txseq = 0;
759 } else {
760 /* I-Frame */
761 control->sframe = 0;
762 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
763 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
764
765 control->poll = 0;
766 control->super = 0;
767 }
768 }
769
770 static inline void __unpack_control(struct l2cap_chan *chan,
771 struct sk_buff *skb)
772 {
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control);
776 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control);
779 }
780 }
781
782 static u32 __pack_extended_control(struct l2cap_ctrl *control)
783 {
784 u32 packed;
785
786 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
787 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
788
789 if (control->sframe) {
790 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
791 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
792 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
793 } else {
794 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
795 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 }
797
798 return packed;
799 }
800
801 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
802 {
803 u16 packed;
804
805 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
807
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_CTRL_FRAME_TYPE;
812 } else {
813 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 }
816
817 return packed;
818 }
819
820 static inline void __pack_control(struct l2cap_chan *chan,
821 struct l2cap_ctrl *control,
822 struct sk_buff *skb)
823 {
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
825 put_unaligned_le32(__pack_extended_control(control),
826 skb->data + L2CAP_HDR_SIZE);
827 } else {
828 put_unaligned_le16(__pack_enhanced_control(control),
829 skb->data + L2CAP_HDR_SIZE);
830 }
831 }
832
833 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
834 {
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE;
845 else
846 hlen = L2CAP_ENH_HDR_SIZE;
847
848 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE;
850
851 BT_DBG("chan %p, control 0x%8.8x", chan, control);
852
853 count = min_t(unsigned int, conn->mtu, hlen);
854
855 control |= __set_sframe(chan);
856
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb)
865 return;
866
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
870
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
872
873 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 }
877
878 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb);
880 }
881
882 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
883 {
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889
890 control |= __set_reqseq(chan, chan->buffer_seq);
891
892 l2cap_send_sframe(chan, control);
893 }
894
895 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
896 {
897 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
898 }
899
900 static void l2cap_send_conn_req(struct l2cap_chan *chan)
901 {
902 struct l2cap_conn *conn = chan->conn;
903 struct l2cap_conn_req req;
904
905 req.scid = cpu_to_le16(chan->scid);
906 req.psm = chan->psm;
907
908 chan->ident = l2cap_get_ident(conn);
909
910 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
911
912 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
913 }
914
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
916 {
917 struct sock *sk = chan->sk;
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
928
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934
935 release_sock(sk);
936 }
937
938 static void l2cap_do_start(struct l2cap_chan *chan)
939 {
940 struct l2cap_conn *conn = chan->conn;
941
942 if (conn->hcon->type == LE_LINK) {
943 l2cap_chan_ready(chan);
944 return;
945 }
946
947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
948 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
949 return;
950
951 if (l2cap_chan_check_security(chan) &&
952 __l2cap_no_conn_pending(chan))
953 l2cap_send_conn_req(chan);
954 } else {
955 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
957
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn);
960
961 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
962
963 l2cap_send_cmd(conn, conn->info_ident,
964 L2CAP_INFO_REQ, sizeof(req), &req);
965 }
966 }
967
968 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
969 {
970 u32 local_feat_mask = l2cap_feat_mask;
971 if (!disable_ertm)
972 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
973
974 switch (mode) {
975 case L2CAP_MODE_ERTM:
976 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
977 case L2CAP_MODE_STREAMING:
978 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
979 default:
980 return 0x00;
981 }
982 }
983
984 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
985 {
986 struct sock *sk = chan->sk;
987 struct l2cap_disconn_req req;
988
989 if (!conn)
990 return;
991
992 if (chan->mode == L2CAP_MODE_ERTM) {
993 __clear_retrans_timer(chan);
994 __clear_monitor_timer(chan);
995 __clear_ack_timer(chan);
996 }
997
998 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1001 L2CAP_DISCONN_REQ, sizeof(req), &req);
1002
1003 lock_sock(sk);
1004 __l2cap_state_change(chan, BT_DISCONN);
1005 __l2cap_chan_set_err(chan, err);
1006 release_sock(sk);
1007 }
1008
1009 /* ---- L2CAP connections ---- */
1010 static void l2cap_conn_start(struct l2cap_conn *conn)
1011 {
1012 struct l2cap_chan *chan, *tmp;
1013
1014 BT_DBG("conn %p", conn);
1015
1016 mutex_lock(&conn->chan_lock);
1017
1018 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1019 struct sock *sk = chan->sk;
1020
1021 l2cap_chan_lock(chan);
1022
1023 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1024 l2cap_chan_unlock(chan);
1025 continue;
1026 }
1027
1028 if (chan->state == BT_CONNECT) {
1029 if (!l2cap_chan_check_security(chan) ||
1030 !__l2cap_no_conn_pending(chan)) {
1031 l2cap_chan_unlock(chan);
1032 continue;
1033 }
1034
1035 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1036 && test_bit(CONF_STATE2_DEVICE,
1037 &chan->conf_state)) {
1038 l2cap_chan_close(chan, ECONNRESET);
1039 l2cap_chan_unlock(chan);
1040 continue;
1041 }
1042
1043 l2cap_send_conn_req(chan);
1044
1045 } else if (chan->state == BT_CONNECT2) {
1046 struct l2cap_conn_rsp rsp;
1047 char buf[128];
1048 rsp.scid = cpu_to_le16(chan->dcid);
1049 rsp.dcid = cpu_to_le16(chan->scid);
1050
1051 if (l2cap_chan_check_security(chan)) {
1052 lock_sock(sk);
1053 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 } else {
1062 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1065 }
1066 release_sock(sk);
1067 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 }
1071
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1073 sizeof(rsp), &rsp);
1074
1075 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1076 rsp.result != L2CAP_CR_SUCCESS) {
1077 l2cap_chan_unlock(chan);
1078 continue;
1079 }
1080
1081 set_bit(CONF_REQ_SENT, &chan->conf_state);
1082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1083 l2cap_build_conf_req(chan, buf), buf);
1084 chan->num_conf_req++;
1085 }
1086
1087 l2cap_chan_unlock(chan);
1088 }
1089
1090 mutex_unlock(&conn->chan_lock);
1091 }
1092
1093 /* Find socket with cid and source/destination bdaddr.
1094 * Returns closest match, locked.
1095 */
1096 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1097 bdaddr_t *src,
1098 bdaddr_t *dst)
1099 {
1100 struct l2cap_chan *c, *c1 = NULL;
1101
1102 read_lock(&chan_list_lock);
1103
1104 list_for_each_entry(c, &chan_list, global_l) {
1105 struct sock *sk = c->sk;
1106
1107 if (state && c->state != state)
1108 continue;
1109
1110 if (c->scid == cid) {
1111 int src_match, dst_match;
1112 int src_any, dst_any;
1113
1114 /* Exact match. */
1115 src_match = !bacmp(&bt_sk(sk)->src, src);
1116 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1117 if (src_match && dst_match) {
1118 read_unlock(&chan_list_lock);
1119 return c;
1120 }
1121
1122 /* Closest match */
1123 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1124 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1125 if ((src_match && dst_any) || (src_any && dst_match) ||
1126 (src_any && dst_any))
1127 c1 = c;
1128 }
1129 }
1130
1131 read_unlock(&chan_list_lock);
1132
1133 return c1;
1134 }
1135
1136 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1137 {
1138 struct sock *parent, *sk;
1139 struct l2cap_chan *chan, *pchan;
1140
1141 BT_DBG("");
1142
1143 /* Check if we have socket listening on cid */
1144 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1145 conn->src, conn->dst);
1146 if (!pchan)
1147 return;
1148
1149 parent = pchan->sk;
1150
1151 lock_sock(parent);
1152
1153 /* Check for backlog size */
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan)
1161 goto clean;
1162
1163 sk = chan->sk;
1164
1165 hci_conn_hold(conn->hcon);
1166
1167 bacpy(&bt_sk(sk)->src, conn->src);
1168 bacpy(&bt_sk(sk)->dst, conn->dst);
1169
1170 bt_accept_enqueue(parent, sk);
1171
1172 l2cap_chan_add(conn, chan);
1173
1174 __set_chan_timer(chan, sk->sk_sndtimeo);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178
1179 clean:
1180 release_sock(parent);
1181 }
1182
1183 static void l2cap_conn_ready(struct l2cap_conn *conn)
1184 {
1185 struct l2cap_chan *chan;
1186
1187 BT_DBG("conn %p", conn);
1188
1189 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1190 l2cap_le_conn_ready(conn);
1191
1192 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1193 smp_conn_security(conn, conn->hcon->pending_sec_level);
1194
1195 mutex_lock(&conn->chan_lock);
1196
1197 list_for_each_entry(chan, &conn->chan_l, list) {
1198
1199 l2cap_chan_lock(chan);
1200
1201 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan);
1204
1205 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1206 struct sock *sk = chan->sk;
1207 __clear_chan_timer(chan);
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_CONNECTED);
1210 sk->sk_state_change(sk);
1211 release_sock(sk);
1212
1213 } else if (chan->state == BT_CONNECT)
1214 l2cap_do_start(chan);
1215
1216 l2cap_chan_unlock(chan);
1217 }
1218
1219 mutex_unlock(&conn->chan_lock);
1220 }
1221
1222 /* Notify sockets that we cannot guaranty reliability anymore */
1223 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1224 {
1225 struct l2cap_chan *chan;
1226
1227 BT_DBG("conn %p", conn);
1228
1229 mutex_lock(&conn->chan_lock);
1230
1231 list_for_each_entry(chan, &conn->chan_l, list) {
1232 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1233 __l2cap_chan_set_err(chan, err);
1234 }
1235
1236 mutex_unlock(&conn->chan_lock);
1237 }
1238
1239 static void l2cap_info_timeout(struct work_struct *work)
1240 {
1241 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1242 info_timer.work);
1243
1244 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1245 conn->info_ident = 0;
1246
1247 l2cap_conn_start(conn);
1248 }
1249
1250 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1251 {
1252 struct l2cap_conn *conn = hcon->l2cap_data;
1253 struct l2cap_chan *chan, *l;
1254
1255 if (!conn)
1256 return;
1257
1258 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1259
1260 kfree_skb(conn->rx_skb);
1261
1262 mutex_lock(&conn->chan_lock);
1263
1264 /* Kill channels */
1265 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1266 l2cap_chan_hold(chan);
1267 l2cap_chan_lock(chan);
1268
1269 l2cap_chan_del(chan, err);
1270
1271 l2cap_chan_unlock(chan);
1272
1273 chan->ops->close(chan->data);
1274 l2cap_chan_put(chan);
1275 }
1276
1277 mutex_unlock(&conn->chan_lock);
1278
1279 hci_chan_del(conn->hchan);
1280
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1282 cancel_delayed_work_sync(&conn->info_timer);
1283
1284 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1285 cancel_delayed_work_sync(&conn->security_timer);
1286 smp_chan_destroy(conn);
1287 }
1288
1289 hcon->l2cap_data = NULL;
1290 kfree(conn);
1291 }
1292
1293 static void security_timeout(struct work_struct *work)
1294 {
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1296 security_timer.work);
1297
1298 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1299 }
1300
1301 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1302 {
1303 struct l2cap_conn *conn = hcon->l2cap_data;
1304 struct hci_chan *hchan;
1305
1306 if (conn || status)
1307 return conn;
1308
1309 hchan = hci_chan_create(hcon);
1310 if (!hchan)
1311 return NULL;
1312
1313 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1314 if (!conn) {
1315 hci_chan_del(hchan);
1316 return NULL;
1317 }
1318
1319 hcon->l2cap_data = conn;
1320 conn->hcon = hcon;
1321 conn->hchan = hchan;
1322
1323 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1324
1325 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1326 conn->mtu = hcon->hdev->le_mtu;
1327 else
1328 conn->mtu = hcon->hdev->acl_mtu;
1329
1330 conn->src = &hcon->hdev->bdaddr;
1331 conn->dst = &hcon->dst;
1332
1333 conn->feat_mask = 0;
1334
1335 spin_lock_init(&conn->lock);
1336 mutex_init(&conn->chan_lock);
1337
1338 INIT_LIST_HEAD(&conn->chan_l);
1339
1340 if (hcon->type == LE_LINK)
1341 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1342 else
1343 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1344
1345 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1346
1347 return conn;
1348 }
1349
1350 /* ---- Socket interface ---- */
1351
1352 /* Find socket with psm and source / destination bdaddr.
1353 * Returns closest match.
1354 */
1355 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1356 bdaddr_t *src,
1357 bdaddr_t *dst)
1358 {
1359 struct l2cap_chan *c, *c1 = NULL;
1360
1361 read_lock(&chan_list_lock);
1362
1363 list_for_each_entry(c, &chan_list, global_l) {
1364 struct sock *sk = c->sk;
1365
1366 if (state && c->state != state)
1367 continue;
1368
1369 if (c->psm == psm) {
1370 int src_match, dst_match;
1371 int src_any, dst_any;
1372
1373 /* Exact match. */
1374 src_match = !bacmp(&bt_sk(sk)->src, src);
1375 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1376 if (src_match && dst_match) {
1377 read_unlock(&chan_list_lock);
1378 return c;
1379 }
1380
1381 /* Closest match */
1382 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1383 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1384 if ((src_match && dst_any) || (src_any && dst_match) ||
1385 (src_any && dst_any))
1386 c1 = c;
1387 }
1388 }
1389
1390 read_unlock(&chan_list_lock);
1391
1392 return c1;
1393 }
1394
1395 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1396 bdaddr_t *dst, u8 dst_type)
1397 {
1398 struct sock *sk = chan->sk;
1399 bdaddr_t *src = &bt_sk(sk)->src;
1400 struct l2cap_conn *conn;
1401 struct hci_conn *hcon;
1402 struct hci_dev *hdev;
1403 __u8 auth_type;
1404 int err;
1405
1406 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1407 dst_type, __le16_to_cpu(chan->psm));
1408
1409 hdev = hci_get_route(dst, src);
1410 if (!hdev)
1411 return -EHOSTUNREACH;
1412
1413 hci_dev_lock(hdev);
1414
1415 l2cap_chan_lock(chan);
1416
1417 /* PSM must be odd and lsb of upper byte must be 0 */
1418 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1419 chan->chan_type != L2CAP_CHAN_RAW) {
1420 err = -EINVAL;
1421 goto done;
1422 }
1423
1424 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1425 err = -EINVAL;
1426 goto done;
1427 }
1428
1429 switch (chan->mode) {
1430 case L2CAP_MODE_BASIC:
1431 break;
1432 case L2CAP_MODE_ERTM:
1433 case L2CAP_MODE_STREAMING:
1434 if (!disable_ertm)
1435 break;
1436 /* fall through */
1437 default:
1438 err = -ENOTSUPP;
1439 goto done;
1440 }
1441
1442 lock_sock(sk);
1443
1444 switch (sk->sk_state) {
1445 case BT_CONNECT:
1446 case BT_CONNECT2:
1447 case BT_CONFIG:
1448 /* Already connecting */
1449 err = 0;
1450 release_sock(sk);
1451 goto done;
1452
1453 case BT_CONNECTED:
1454 /* Already connected */
1455 err = -EISCONN;
1456 release_sock(sk);
1457 goto done;
1458
1459 case BT_OPEN:
1460 case BT_BOUND:
1461 /* Can connect */
1462 break;
1463
1464 default:
1465 err = -EBADFD;
1466 release_sock(sk);
1467 goto done;
1468 }
1469
1470 /* Set destination address and psm */
1471 bacpy(&bt_sk(sk)->dst, dst);
1472
1473 release_sock(sk);
1474
1475 chan->psm = psm;
1476 chan->dcid = cid;
1477
1478 auth_type = l2cap_get_auth_type(chan);
1479
1480 if (chan->dcid == L2CAP_CID_LE_DATA)
1481 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1482 chan->sec_level, auth_type);
1483 else
1484 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1485 chan->sec_level, auth_type);
1486
1487 if (IS_ERR(hcon)) {
1488 err = PTR_ERR(hcon);
1489 goto done;
1490 }
1491
1492 conn = l2cap_conn_add(hcon, 0);
1493 if (!conn) {
1494 hci_conn_put(hcon);
1495 err = -ENOMEM;
1496 goto done;
1497 }
1498
1499 if (hcon->type == LE_LINK) {
1500 err = 0;
1501
1502 if (!list_empty(&conn->chan_l)) {
1503 err = -EBUSY;
1504 hci_conn_put(hcon);
1505 }
1506
1507 if (err)
1508 goto done;
1509 }
1510
1511 /* Update source addr of the socket */
1512 bacpy(src, conn->src);
1513
1514 l2cap_chan_unlock(chan);
1515 l2cap_chan_add(conn, chan);
1516 l2cap_chan_lock(chan);
1517
1518 l2cap_state_change(chan, BT_CONNECT);
1519 __set_chan_timer(chan, sk->sk_sndtimeo);
1520
1521 if (hcon->state == BT_CONNECTED) {
1522 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1523 __clear_chan_timer(chan);
1524 if (l2cap_chan_check_security(chan))
1525 l2cap_state_change(chan, BT_CONNECTED);
1526 } else
1527 l2cap_do_start(chan);
1528 }
1529
1530 err = 0;
1531
1532 done:
1533 l2cap_chan_unlock(chan);
1534 hci_dev_unlock(hdev);
1535 hci_dev_put(hdev);
1536 return err;
1537 }
1538
1539 int __l2cap_wait_ack(struct sock *sk)
1540 {
1541 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1542 DECLARE_WAITQUEUE(wait, current);
1543 int err = 0;
1544 int timeo = HZ/5;
1545
1546 add_wait_queue(sk_sleep(sk), &wait);
1547 set_current_state(TASK_INTERRUPTIBLE);
1548 while (chan->unacked_frames > 0 && chan->conn) {
1549 if (!timeo)
1550 timeo = HZ/5;
1551
1552 if (signal_pending(current)) {
1553 err = sock_intr_errno(timeo);
1554 break;
1555 }
1556
1557 release_sock(sk);
1558 timeo = schedule_timeout(timeo);
1559 lock_sock(sk);
1560 set_current_state(TASK_INTERRUPTIBLE);
1561
1562 err = sock_error(sk);
1563 if (err)
1564 break;
1565 }
1566 set_current_state(TASK_RUNNING);
1567 remove_wait_queue(sk_sleep(sk), &wait);
1568 return err;
1569 }
1570
1571 static void l2cap_monitor_timeout(struct work_struct *work)
1572 {
1573 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1574 monitor_timer.work);
1575
1576 BT_DBG("chan %p", chan);
1577
1578 l2cap_chan_lock(chan);
1579
1580 if (chan->retry_count >= chan->remote_max_tx) {
1581 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1582 l2cap_chan_unlock(chan);
1583 l2cap_chan_put(chan);
1584 return;
1585 }
1586
1587 chan->retry_count++;
1588 __set_monitor_timer(chan);
1589
1590 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1591 l2cap_chan_unlock(chan);
1592 l2cap_chan_put(chan);
1593 }
1594
1595 static void l2cap_retrans_timeout(struct work_struct *work)
1596 {
1597 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1598 retrans_timer.work);
1599
1600 BT_DBG("chan %p", chan);
1601
1602 l2cap_chan_lock(chan);
1603
1604 chan->retry_count = 1;
1605 __set_monitor_timer(chan);
1606
1607 set_bit(CONN_WAIT_F, &chan->conn_state);
1608
1609 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1610
1611 l2cap_chan_unlock(chan);
1612 l2cap_chan_put(chan);
1613 }
1614
1615 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1616 {
1617 struct sk_buff *skb;
1618
1619 while ((skb = skb_peek(&chan->tx_q)) &&
1620 chan->unacked_frames) {
1621 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1622 break;
1623
1624 skb = skb_dequeue(&chan->tx_q);
1625 kfree_skb(skb);
1626
1627 chan->unacked_frames--;
1628 }
1629
1630 if (!chan->unacked_frames)
1631 __clear_retrans_timer(chan);
1632 }
1633
1634 static void l2cap_streaming_send(struct l2cap_chan *chan)
1635 {
1636 struct sk_buff *skb;
1637 u32 control;
1638 u16 fcs;
1639
1640 while ((skb = skb_dequeue(&chan->tx_q))) {
1641 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1642 control |= __set_txseq(chan, chan->next_tx_seq);
1643 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1645
1646 if (chan->fcs == L2CAP_FCS_CRC16) {
1647 fcs = crc16(0, (u8 *)skb->data,
1648 skb->len - L2CAP_FCS_SIZE);
1649 put_unaligned_le16(fcs,
1650 skb->data + skb->len - L2CAP_FCS_SIZE);
1651 }
1652
1653 l2cap_do_send(chan, skb);
1654
1655 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1656 }
1657 }
1658
1659 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1660 {
1661 struct sk_buff *skb, *tx_skb;
1662 u16 fcs;
1663 u32 control;
1664
1665 skb = skb_peek(&chan->tx_q);
1666 if (!skb)
1667 return;
1668
1669 while (bt_cb(skb)->control.txseq != tx_seq) {
1670 if (skb_queue_is_last(&chan->tx_q, skb))
1671 return;
1672
1673 skb = skb_queue_next(&chan->tx_q, skb);
1674 }
1675
1676 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1677 chan->remote_max_tx) {
1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1679 return;
1680 }
1681
1682 tx_skb = skb_clone(skb, GFP_ATOMIC);
1683 bt_cb(skb)->control.retries++;
1684
1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1686 control &= __get_sar_mask(chan);
1687
1688 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1689 control |= __set_ctrl_final(chan);
1690
1691 control |= __set_reqseq(chan, chan->buffer_seq);
1692 control |= __set_txseq(chan, tx_seq);
1693
1694 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1695
1696 if (chan->fcs == L2CAP_FCS_CRC16) {
1697 fcs = crc16(0, (u8 *)tx_skb->data,
1698 tx_skb->len - L2CAP_FCS_SIZE);
1699 put_unaligned_le16(fcs,
1700 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1701 }
1702
1703 l2cap_do_send(chan, tx_skb);
1704 }
1705
1706 static int l2cap_ertm_send(struct l2cap_chan *chan)
1707 {
1708 struct sk_buff *skb, *tx_skb;
1709 u16 fcs;
1710 u32 control;
1711 int nsent = 0;
1712
1713 if (chan->state != BT_CONNECTED)
1714 return -ENOTCONN;
1715
1716 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1717 return 0;
1718
1719 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1720
1721 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1722 chan->remote_max_tx) {
1723 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1724 break;
1725 }
1726
1727 tx_skb = skb_clone(skb, GFP_ATOMIC);
1728
1729 bt_cb(skb)->control.retries++;
1730
1731 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1732 control &= __get_sar_mask(chan);
1733
1734 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1735 control |= __set_ctrl_final(chan);
1736
1737 control |= __set_reqseq(chan, chan->buffer_seq);
1738 control |= __set_txseq(chan, chan->next_tx_seq);
1739 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1740
1741 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1742
1743 if (chan->fcs == L2CAP_FCS_CRC16) {
1744 fcs = crc16(0, (u8 *)skb->data,
1745 tx_skb->len - L2CAP_FCS_SIZE);
1746 put_unaligned_le16(fcs, skb->data +
1747 tx_skb->len - L2CAP_FCS_SIZE);
1748 }
1749
1750 l2cap_do_send(chan, tx_skb);
1751
1752 __set_retrans_timer(chan);
1753
1754 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1755
1756 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1757
1758 if (bt_cb(skb)->control.retries == 1) {
1759 chan->unacked_frames++;
1760
1761 if (!nsent++)
1762 __clear_ack_timer(chan);
1763 }
1764
1765 chan->frames_sent++;
1766
1767 if (skb_queue_is_last(&chan->tx_q, skb))
1768 chan->tx_send_head = NULL;
1769 else
1770 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1771 }
1772
1773 return nsent;
1774 }
1775
1776 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1777 {
1778 int ret;
1779
1780 if (!skb_queue_empty(&chan->tx_q))
1781 chan->tx_send_head = chan->tx_q.next;
1782
1783 chan->next_tx_seq = chan->expected_ack_seq;
1784 ret = l2cap_ertm_send(chan);
1785 return ret;
1786 }
1787
1788 static void __l2cap_send_ack(struct l2cap_chan *chan)
1789 {
1790 u32 control = 0;
1791
1792 control |= __set_reqseq(chan, chan->buffer_seq);
1793
1794 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1795 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1796 set_bit(CONN_RNR_SENT, &chan->conn_state);
1797 l2cap_send_sframe(chan, control);
1798 return;
1799 }
1800
1801 if (l2cap_ertm_send(chan) > 0)
1802 return;
1803
1804 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1805 l2cap_send_sframe(chan, control);
1806 }
1807
1808 static void l2cap_send_ack(struct l2cap_chan *chan)
1809 {
1810 __clear_ack_timer(chan);
1811 __l2cap_send_ack(chan);
1812 }
1813
1814 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1815 {
1816 struct srej_list *tail;
1817 u32 control;
1818
1819 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1820 control |= __set_ctrl_final(chan);
1821
1822 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1823 control |= __set_reqseq(chan, tail->tx_seq);
1824
1825 l2cap_send_sframe(chan, control);
1826 }
1827
1828 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1829 struct msghdr *msg, int len,
1830 int count, struct sk_buff *skb)
1831 {
1832 struct l2cap_conn *conn = chan->conn;
1833 struct sk_buff **frag;
1834 int sent = 0;
1835
1836 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1837 return -EFAULT;
1838
1839 sent += count;
1840 len -= count;
1841
1842 /* Continuation fragments (no L2CAP header) */
1843 frag = &skb_shinfo(skb)->frag_list;
1844 while (len) {
1845 struct sk_buff *tmp;
1846
1847 count = min_t(unsigned int, conn->mtu, len);
1848
1849 tmp = chan->ops->alloc_skb(chan, count,
1850 msg->msg_flags & MSG_DONTWAIT);
1851 if (IS_ERR(tmp))
1852 return PTR_ERR(tmp);
1853
1854 *frag = tmp;
1855
1856 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1857 return -EFAULT;
1858
1859 (*frag)->priority = skb->priority;
1860
1861 sent += count;
1862 len -= count;
1863
1864 skb->len += (*frag)->len;
1865 skb->data_len += (*frag)->len;
1866
1867 frag = &(*frag)->next;
1868 }
1869
1870 return sent;
1871 }
1872
1873 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1874 struct msghdr *msg, size_t len,
1875 u32 priority)
1876 {
1877 struct l2cap_conn *conn = chan->conn;
1878 struct sk_buff *skb;
1879 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1880 struct l2cap_hdr *lh;
1881
1882 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1883
1884 count = min_t(unsigned int, (conn->mtu - hlen), len);
1885
1886 skb = chan->ops->alloc_skb(chan, count + hlen,
1887 msg->msg_flags & MSG_DONTWAIT);
1888 if (IS_ERR(skb))
1889 return skb;
1890
1891 skb->priority = priority;
1892
1893 /* Create L2CAP header */
1894 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1895 lh->cid = cpu_to_le16(chan->dcid);
1896 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1897 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1898
1899 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1900 if (unlikely(err < 0)) {
1901 kfree_skb(skb);
1902 return ERR_PTR(err);
1903 }
1904 return skb;
1905 }
1906
1907 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1908 struct msghdr *msg, size_t len,
1909 u32 priority)
1910 {
1911 struct l2cap_conn *conn = chan->conn;
1912 struct sk_buff *skb;
1913 int err, count;
1914 struct l2cap_hdr *lh;
1915
1916 BT_DBG("chan %p len %d", chan, (int)len);
1917
1918 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1919
1920 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1921 msg->msg_flags & MSG_DONTWAIT);
1922 if (IS_ERR(skb))
1923 return skb;
1924
1925 skb->priority = priority;
1926
1927 /* Create L2CAP header */
1928 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1929 lh->cid = cpu_to_le16(chan->dcid);
1930 lh->len = cpu_to_le16(len);
1931
1932 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1933 if (unlikely(err < 0)) {
1934 kfree_skb(skb);
1935 return ERR_PTR(err);
1936 }
1937 return skb;
1938 }
1939
1940 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1941 struct msghdr *msg, size_t len,
1942 u16 sdulen)
1943 {
1944 struct l2cap_conn *conn = chan->conn;
1945 struct sk_buff *skb;
1946 int err, count, hlen;
1947 struct l2cap_hdr *lh;
1948
1949 BT_DBG("chan %p len %d", chan, (int)len);
1950
1951 if (!conn)
1952 return ERR_PTR(-ENOTCONN);
1953
1954 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1955 hlen = L2CAP_EXT_HDR_SIZE;
1956 else
1957 hlen = L2CAP_ENH_HDR_SIZE;
1958
1959 if (sdulen)
1960 hlen += L2CAP_SDULEN_SIZE;
1961
1962 if (chan->fcs == L2CAP_FCS_CRC16)
1963 hlen += L2CAP_FCS_SIZE;
1964
1965 count = min_t(unsigned int, (conn->mtu - hlen), len);
1966
1967 skb = chan->ops->alloc_skb(chan, count + hlen,
1968 msg->msg_flags & MSG_DONTWAIT);
1969 if (IS_ERR(skb))
1970 return skb;
1971
1972 /* Create L2CAP header */
1973 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1974 lh->cid = cpu_to_le16(chan->dcid);
1975 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1976
1977 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1978
1979 if (sdulen)
1980 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1981
1982 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1983 if (unlikely(err < 0)) {
1984 kfree_skb(skb);
1985 return ERR_PTR(err);
1986 }
1987
1988 if (chan->fcs == L2CAP_FCS_CRC16)
1989 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1990
1991 bt_cb(skb)->control.retries = 0;
1992 return skb;
1993 }
1994
1995 static int l2cap_segment_sdu(struct l2cap_chan *chan,
1996 struct sk_buff_head *seg_queue,
1997 struct msghdr *msg, size_t len)
1998 {
1999 struct sk_buff *skb;
2000 u16 sdu_len;
2001 size_t pdu_len;
2002 int err = 0;
2003 u8 sar;
2004
2005 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2006
2007 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2008 * so fragmented skbs are not used. The HCI layer's handling
2009 * of fragmented skbs is not compatible with ERTM's queueing.
2010 */
2011
2012 /* PDU size is derived from the HCI MTU */
2013 pdu_len = chan->conn->mtu;
2014
2015 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2016
2017 /* Adjust for largest possible L2CAP overhead. */
2018 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2019
2020 /* Remote device may have requested smaller PDUs */
2021 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2022
2023 if (len <= pdu_len) {
2024 sar = L2CAP_SAR_UNSEGMENTED;
2025 sdu_len = 0;
2026 pdu_len = len;
2027 } else {
2028 sar = L2CAP_SAR_START;
2029 sdu_len = len;
2030 pdu_len -= L2CAP_SDULEN_SIZE;
2031 }
2032
2033 while (len > 0) {
2034 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2035
2036 if (IS_ERR(skb)) {
2037 __skb_queue_purge(seg_queue);
2038 return PTR_ERR(skb);
2039 }
2040
2041 bt_cb(skb)->control.sar = sar;
2042 __skb_queue_tail(seg_queue, skb);
2043
2044 len -= pdu_len;
2045 if (sdu_len) {
2046 sdu_len = 0;
2047 pdu_len += L2CAP_SDULEN_SIZE;
2048 }
2049
2050 if (len <= pdu_len) {
2051 sar = L2CAP_SAR_END;
2052 pdu_len = len;
2053 } else {
2054 sar = L2CAP_SAR_CONTINUE;
2055 }
2056 }
2057
2058 return err;
2059 }
2060
2061 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2062 u32 priority)
2063 {
2064 struct sk_buff *skb;
2065 int err;
2066 struct sk_buff_head seg_queue;
2067
2068 /* Connectionless channel */
2069 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2070 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2071 if (IS_ERR(skb))
2072 return PTR_ERR(skb);
2073
2074 l2cap_do_send(chan, skb);
2075 return len;
2076 }
2077
2078 switch (chan->mode) {
2079 case L2CAP_MODE_BASIC:
2080 /* Check outgoing MTU */
2081 if (len > chan->omtu)
2082 return -EMSGSIZE;
2083
2084 /* Create a basic PDU */
2085 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2086 if (IS_ERR(skb))
2087 return PTR_ERR(skb);
2088
2089 l2cap_do_send(chan, skb);
2090 err = len;
2091 break;
2092
2093 case L2CAP_MODE_ERTM:
2094 case L2CAP_MODE_STREAMING:
2095 /* Check outgoing MTU */
2096 if (len > chan->omtu) {
2097 err = -EMSGSIZE;
2098 break;
2099 }
2100
2101 __skb_queue_head_init(&seg_queue);
2102
2103 /* Do segmentation before calling in to the state machine,
2104 * since it's possible to block while waiting for memory
2105 * allocation.
2106 */
2107 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2108
2109 /* The channel could have been closed while segmenting,
2110 * check that it is still connected.
2111 */
2112 if (chan->state != BT_CONNECTED) {
2113 __skb_queue_purge(&seg_queue);
2114 err = -ENOTCONN;
2115 }
2116
2117 if (err)
2118 break;
2119
2120 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2121 chan->tx_send_head = seg_queue.next;
2122 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2123
2124 if (chan->mode == L2CAP_MODE_ERTM)
2125 err = l2cap_ertm_send(chan);
2126 else
2127 l2cap_streaming_send(chan);
2128
2129 if (err >= 0)
2130 err = len;
2131
2132 /* If the skbs were not queued for sending, they'll still be in
2133 * seg_queue and need to be purged.
2134 */
2135 __skb_queue_purge(&seg_queue);
2136 break;
2137
2138 default:
2139 BT_DBG("bad state %1.1x", chan->mode);
2140 err = -EBADFD;
2141 }
2142
2143 return err;
2144 }
2145
2146 /* Copy frame to all raw sockets on that connection */
2147 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2148 {
2149 struct sk_buff *nskb;
2150 struct l2cap_chan *chan;
2151
2152 BT_DBG("conn %p", conn);
2153
2154 mutex_lock(&conn->chan_lock);
2155
2156 list_for_each_entry(chan, &conn->chan_l, list) {
2157 struct sock *sk = chan->sk;
2158 if (chan->chan_type != L2CAP_CHAN_RAW)
2159 continue;
2160
2161 /* Don't send frame to the socket it came from */
2162 if (skb->sk == sk)
2163 continue;
2164 nskb = skb_clone(skb, GFP_ATOMIC);
2165 if (!nskb)
2166 continue;
2167
2168 if (chan->ops->recv(chan->data, nskb))
2169 kfree_skb(nskb);
2170 }
2171
2172 mutex_unlock(&conn->chan_lock);
2173 }
2174
2175 /* ---- L2CAP signalling commands ---- */
2176 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2177 u8 code, u8 ident, u16 dlen, void *data)
2178 {
2179 struct sk_buff *skb, **frag;
2180 struct l2cap_cmd_hdr *cmd;
2181 struct l2cap_hdr *lh;
2182 int len, count;
2183
2184 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2185 conn, code, ident, dlen);
2186
2187 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2188 count = min_t(unsigned int, conn->mtu, len);
2189
2190 skb = bt_skb_alloc(count, GFP_ATOMIC);
2191 if (!skb)
2192 return NULL;
2193
2194 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2195 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2196
2197 if (conn->hcon->type == LE_LINK)
2198 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2199 else
2200 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2201
2202 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2203 cmd->code = code;
2204 cmd->ident = ident;
2205 cmd->len = cpu_to_le16(dlen);
2206
2207 if (dlen) {
2208 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2209 memcpy(skb_put(skb, count), data, count);
2210 data += count;
2211 }
2212
2213 len -= skb->len;
2214
2215 /* Continuation fragments (no L2CAP header) */
2216 frag = &skb_shinfo(skb)->frag_list;
2217 while (len) {
2218 count = min_t(unsigned int, conn->mtu, len);
2219
2220 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2221 if (!*frag)
2222 goto fail;
2223
2224 memcpy(skb_put(*frag, count), data, count);
2225
2226 len -= count;
2227 data += count;
2228
2229 frag = &(*frag)->next;
2230 }
2231
2232 return skb;
2233
2234 fail:
2235 kfree_skb(skb);
2236 return NULL;
2237 }
2238
2239 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2240 {
2241 struct l2cap_conf_opt *opt = *ptr;
2242 int len;
2243
2244 len = L2CAP_CONF_OPT_SIZE + opt->len;
2245 *ptr += len;
2246
2247 *type = opt->type;
2248 *olen = opt->len;
2249
2250 switch (opt->len) {
2251 case 1:
2252 *val = *((u8 *) opt->val);
2253 break;
2254
2255 case 2:
2256 *val = get_unaligned_le16(opt->val);
2257 break;
2258
2259 case 4:
2260 *val = get_unaligned_le32(opt->val);
2261 break;
2262
2263 default:
2264 *val = (unsigned long) opt->val;
2265 break;
2266 }
2267
2268 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2269 return len;
2270 }
2271
2272 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2273 {
2274 struct l2cap_conf_opt *opt = *ptr;
2275
2276 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2277
2278 opt->type = type;
2279 opt->len = len;
2280
2281 switch (len) {
2282 case 1:
2283 *((u8 *) opt->val) = val;
2284 break;
2285
2286 case 2:
2287 put_unaligned_le16(val, opt->val);
2288 break;
2289
2290 case 4:
2291 put_unaligned_le32(val, opt->val);
2292 break;
2293
2294 default:
2295 memcpy(opt->val, (void *) val, len);
2296 break;
2297 }
2298
2299 *ptr += L2CAP_CONF_OPT_SIZE + len;
2300 }
2301
2302 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2303 {
2304 struct l2cap_conf_efs efs;
2305
2306 switch (chan->mode) {
2307 case L2CAP_MODE_ERTM:
2308 efs.id = chan->local_id;
2309 efs.stype = chan->local_stype;
2310 efs.msdu = cpu_to_le16(chan->local_msdu);
2311 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2312 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2313 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2314 break;
2315
2316 case L2CAP_MODE_STREAMING:
2317 efs.id = 1;
2318 efs.stype = L2CAP_SERV_BESTEFFORT;
2319 efs.msdu = cpu_to_le16(chan->local_msdu);
2320 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2321 efs.acc_lat = 0;
2322 efs.flush_to = 0;
2323 break;
2324
2325 default:
2326 return;
2327 }
2328
2329 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2330 (unsigned long) &efs);
2331 }
2332
2333 static void l2cap_ack_timeout(struct work_struct *work)
2334 {
2335 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2336 ack_timer.work);
2337
2338 BT_DBG("chan %p", chan);
2339
2340 l2cap_chan_lock(chan);
2341
2342 __l2cap_send_ack(chan);
2343
2344 l2cap_chan_unlock(chan);
2345
2346 l2cap_chan_put(chan);
2347 }
2348
2349 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2350 {
2351 int err;
2352
2353 chan->next_tx_seq = 0;
2354 chan->expected_tx_seq = 0;
2355 chan->expected_ack_seq = 0;
2356 chan->unacked_frames = 0;
2357 chan->buffer_seq = 0;
2358 chan->num_acked = 0;
2359 chan->frames_sent = 0;
2360 chan->last_acked_seq = 0;
2361 chan->sdu = NULL;
2362 chan->sdu_last_frag = NULL;
2363 chan->sdu_len = 0;
2364
2365 skb_queue_head_init(&chan->tx_q);
2366
2367 if (chan->mode != L2CAP_MODE_ERTM)
2368 return 0;
2369
2370 chan->rx_state = L2CAP_RX_STATE_RECV;
2371 chan->tx_state = L2CAP_TX_STATE_XMIT;
2372
2373 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2374 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2375 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2376
2377 skb_queue_head_init(&chan->srej_q);
2378
2379 INIT_LIST_HEAD(&chan->srej_l);
2380 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 if (err < 0)
2382 return err;
2383
2384 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2385 }
2386
2387 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2388 {
2389 switch (mode) {
2390 case L2CAP_MODE_STREAMING:
2391 case L2CAP_MODE_ERTM:
2392 if (l2cap_mode_supported(mode, remote_feat_mask))
2393 return mode;
2394 /* fall through */
2395 default:
2396 return L2CAP_MODE_BASIC;
2397 }
2398 }
2399
2400 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2401 {
2402 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2403 }
2404
2405 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2406 {
2407 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2408 }
2409
2410 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2411 {
2412 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2413 __l2cap_ews_supported(chan)) {
2414 /* use extended control field */
2415 set_bit(FLAG_EXT_CTRL, &chan->flags);
2416 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2417 } else {
2418 chan->tx_win = min_t(u16, chan->tx_win,
2419 L2CAP_DEFAULT_TX_WINDOW);
2420 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2421 }
2422 }
2423
2424 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2425 {
2426 struct l2cap_conf_req *req = data;
2427 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2428 void *ptr = req->data;
2429 u16 size;
2430
2431 BT_DBG("chan %p", chan);
2432
2433 if (chan->num_conf_req || chan->num_conf_rsp)
2434 goto done;
2435
2436 switch (chan->mode) {
2437 case L2CAP_MODE_STREAMING:
2438 case L2CAP_MODE_ERTM:
2439 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2440 break;
2441
2442 if (__l2cap_efs_supported(chan))
2443 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2444
2445 /* fall through */
2446 default:
2447 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2448 break;
2449 }
2450
2451 done:
2452 if (chan->imtu != L2CAP_DEFAULT_MTU)
2453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2454
2455 switch (chan->mode) {
2456 case L2CAP_MODE_BASIC:
2457 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2458 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2459 break;
2460
2461 rfc.mode = L2CAP_MODE_BASIC;
2462 rfc.txwin_size = 0;
2463 rfc.max_transmit = 0;
2464 rfc.retrans_timeout = 0;
2465 rfc.monitor_timeout = 0;
2466 rfc.max_pdu_size = 0;
2467
2468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2469 (unsigned long) &rfc);
2470 break;
2471
2472 case L2CAP_MODE_ERTM:
2473 rfc.mode = L2CAP_MODE_ERTM;
2474 rfc.max_transmit = chan->max_tx;
2475 rfc.retrans_timeout = 0;
2476 rfc.monitor_timeout = 0;
2477
2478 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2479 L2CAP_EXT_HDR_SIZE -
2480 L2CAP_SDULEN_SIZE -
2481 L2CAP_FCS_SIZE);
2482 rfc.max_pdu_size = cpu_to_le16(size);
2483
2484 l2cap_txwin_setup(chan);
2485
2486 rfc.txwin_size = min_t(u16, chan->tx_win,
2487 L2CAP_DEFAULT_TX_WINDOW);
2488
2489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2490 (unsigned long) &rfc);
2491
2492 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2493 l2cap_add_opt_efs(&ptr, chan);
2494
2495 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2496 break;
2497
2498 if (chan->fcs == L2CAP_FCS_NONE ||
2499 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2500 chan->fcs = L2CAP_FCS_NONE;
2501 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2502 }
2503
2504 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2506 chan->tx_win);
2507 break;
2508
2509 case L2CAP_MODE_STREAMING:
2510 rfc.mode = L2CAP_MODE_STREAMING;
2511 rfc.txwin_size = 0;
2512 rfc.max_transmit = 0;
2513 rfc.retrans_timeout = 0;
2514 rfc.monitor_timeout = 0;
2515
2516 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2517 L2CAP_EXT_HDR_SIZE -
2518 L2CAP_SDULEN_SIZE -
2519 L2CAP_FCS_SIZE);
2520 rfc.max_pdu_size = cpu_to_le16(size);
2521
2522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2523 (unsigned long) &rfc);
2524
2525 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2526 l2cap_add_opt_efs(&ptr, chan);
2527
2528 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2529 break;
2530
2531 if (chan->fcs == L2CAP_FCS_NONE ||
2532 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2533 chan->fcs = L2CAP_FCS_NONE;
2534 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2535 }
2536 break;
2537 }
2538
2539 req->dcid = cpu_to_le16(chan->dcid);
2540 req->flags = cpu_to_le16(0);
2541
2542 return ptr - data;
2543 }
2544
2545 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2546 {
2547 struct l2cap_conf_rsp *rsp = data;
2548 void *ptr = rsp->data;
2549 void *req = chan->conf_req;
2550 int len = chan->conf_len;
2551 int type, hint, olen;
2552 unsigned long val;
2553 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2554 struct l2cap_conf_efs efs;
2555 u8 remote_efs = 0;
2556 u16 mtu = L2CAP_DEFAULT_MTU;
2557 u16 result = L2CAP_CONF_SUCCESS;
2558 u16 size;
2559
2560 BT_DBG("chan %p", chan);
2561
2562 while (len >= L2CAP_CONF_OPT_SIZE) {
2563 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2564
2565 hint = type & L2CAP_CONF_HINT;
2566 type &= L2CAP_CONF_MASK;
2567
2568 switch (type) {
2569 case L2CAP_CONF_MTU:
2570 mtu = val;
2571 break;
2572
2573 case L2CAP_CONF_FLUSH_TO:
2574 chan->flush_to = val;
2575 break;
2576
2577 case L2CAP_CONF_QOS:
2578 break;
2579
2580 case L2CAP_CONF_RFC:
2581 if (olen == sizeof(rfc))
2582 memcpy(&rfc, (void *) val, olen);
2583 break;
2584
2585 case L2CAP_CONF_FCS:
2586 if (val == L2CAP_FCS_NONE)
2587 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2588 break;
2589
2590 case L2CAP_CONF_EFS:
2591 remote_efs = 1;
2592 if (olen == sizeof(efs))
2593 memcpy(&efs, (void *) val, olen);
2594 break;
2595
2596 case L2CAP_CONF_EWS:
2597 if (!enable_hs)
2598 return -ECONNREFUSED;
2599
2600 set_bit(FLAG_EXT_CTRL, &chan->flags);
2601 set_bit(CONF_EWS_RECV, &chan->conf_state);
2602 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2603 chan->remote_tx_win = val;
2604 break;
2605
2606 default:
2607 if (hint)
2608 break;
2609
2610 result = L2CAP_CONF_UNKNOWN;
2611 *((u8 *) ptr++) = type;
2612 break;
2613 }
2614 }
2615
2616 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2617 goto done;
2618
2619 switch (chan->mode) {
2620 case L2CAP_MODE_STREAMING:
2621 case L2CAP_MODE_ERTM:
2622 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2623 chan->mode = l2cap_select_mode(rfc.mode,
2624 chan->conn->feat_mask);
2625 break;
2626 }
2627
2628 if (remote_efs) {
2629 if (__l2cap_efs_supported(chan))
2630 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2631 else
2632 return -ECONNREFUSED;
2633 }
2634
2635 if (chan->mode != rfc.mode)
2636 return -ECONNREFUSED;
2637
2638 break;
2639 }
2640
2641 done:
2642 if (chan->mode != rfc.mode) {
2643 result = L2CAP_CONF_UNACCEPT;
2644 rfc.mode = chan->mode;
2645
2646 if (chan->num_conf_rsp == 1)
2647 return -ECONNREFUSED;
2648
2649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2650 sizeof(rfc), (unsigned long) &rfc);
2651 }
2652
2653 if (result == L2CAP_CONF_SUCCESS) {
2654 /* Configure output options and let the other side know
2655 * which ones we don't like. */
2656
2657 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2658 result = L2CAP_CONF_UNACCEPT;
2659 else {
2660 chan->omtu = mtu;
2661 set_bit(CONF_MTU_DONE, &chan->conf_state);
2662 }
2663 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2664
2665 if (remote_efs) {
2666 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2667 efs.stype != L2CAP_SERV_NOTRAFIC &&
2668 efs.stype != chan->local_stype) {
2669
2670 result = L2CAP_CONF_UNACCEPT;
2671
2672 if (chan->num_conf_req >= 1)
2673 return -ECONNREFUSED;
2674
2675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2676 sizeof(efs),
2677 (unsigned long) &efs);
2678 } else {
2679 /* Send PENDING Conf Rsp */
2680 result = L2CAP_CONF_PENDING;
2681 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2682 }
2683 }
2684
2685 switch (rfc.mode) {
2686 case L2CAP_MODE_BASIC:
2687 chan->fcs = L2CAP_FCS_NONE;
2688 set_bit(CONF_MODE_DONE, &chan->conf_state);
2689 break;
2690
2691 case L2CAP_MODE_ERTM:
2692 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2693 chan->remote_tx_win = rfc.txwin_size;
2694 else
2695 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2696
2697 chan->remote_max_tx = rfc.max_transmit;
2698
2699 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2700 chan->conn->mtu -
2701 L2CAP_EXT_HDR_SIZE -
2702 L2CAP_SDULEN_SIZE -
2703 L2CAP_FCS_SIZE);
2704 rfc.max_pdu_size = cpu_to_le16(size);
2705 chan->remote_mps = size;
2706
2707 rfc.retrans_timeout =
2708 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2709 rfc.monitor_timeout =
2710 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2711
2712 set_bit(CONF_MODE_DONE, &chan->conf_state);
2713
2714 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2715 sizeof(rfc), (unsigned long) &rfc);
2716
2717 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2718 chan->remote_id = efs.id;
2719 chan->remote_stype = efs.stype;
2720 chan->remote_msdu = le16_to_cpu(efs.msdu);
2721 chan->remote_flush_to =
2722 le32_to_cpu(efs.flush_to);
2723 chan->remote_acc_lat =
2724 le32_to_cpu(efs.acc_lat);
2725 chan->remote_sdu_itime =
2726 le32_to_cpu(efs.sdu_itime);
2727 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2728 sizeof(efs), (unsigned long) &efs);
2729 }
2730 break;
2731
2732 case L2CAP_MODE_STREAMING:
2733 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2734 chan->conn->mtu -
2735 L2CAP_EXT_HDR_SIZE -
2736 L2CAP_SDULEN_SIZE -
2737 L2CAP_FCS_SIZE);
2738 rfc.max_pdu_size = cpu_to_le16(size);
2739 chan->remote_mps = size;
2740
2741 set_bit(CONF_MODE_DONE, &chan->conf_state);
2742
2743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2744 sizeof(rfc), (unsigned long) &rfc);
2745
2746 break;
2747
2748 default:
2749 result = L2CAP_CONF_UNACCEPT;
2750
2751 memset(&rfc, 0, sizeof(rfc));
2752 rfc.mode = chan->mode;
2753 }
2754
2755 if (result == L2CAP_CONF_SUCCESS)
2756 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2757 }
2758 rsp->scid = cpu_to_le16(chan->dcid);
2759 rsp->result = cpu_to_le16(result);
2760 rsp->flags = cpu_to_le16(0x0000);
2761
2762 return ptr - data;
2763 }
2764
2765 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2766 {
2767 struct l2cap_conf_req *req = data;
2768 void *ptr = req->data;
2769 int type, olen;
2770 unsigned long val;
2771 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2772 struct l2cap_conf_efs efs;
2773
2774 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2775
2776 while (len >= L2CAP_CONF_OPT_SIZE) {
2777 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2778
2779 switch (type) {
2780 case L2CAP_CONF_MTU:
2781 if (val < L2CAP_DEFAULT_MIN_MTU) {
2782 *result = L2CAP_CONF_UNACCEPT;
2783 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2784 } else
2785 chan->imtu = val;
2786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2787 break;
2788
2789 case L2CAP_CONF_FLUSH_TO:
2790 chan->flush_to = val;
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2792 2, chan->flush_to);
2793 break;
2794
2795 case L2CAP_CONF_RFC:
2796 if (olen == sizeof(rfc))
2797 memcpy(&rfc, (void *)val, olen);
2798
2799 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2800 rfc.mode != chan->mode)
2801 return -ECONNREFUSED;
2802
2803 chan->fcs = 0;
2804
2805 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2806 sizeof(rfc), (unsigned long) &rfc);
2807 break;
2808
2809 case L2CAP_CONF_EWS:
2810 chan->tx_win = min_t(u16, val,
2811 L2CAP_DEFAULT_EXT_WINDOW);
2812 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2813 chan->tx_win);
2814 break;
2815
2816 case L2CAP_CONF_EFS:
2817 if (olen == sizeof(efs))
2818 memcpy(&efs, (void *)val, olen);
2819
2820 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2821 efs.stype != L2CAP_SERV_NOTRAFIC &&
2822 efs.stype != chan->local_stype)
2823 return -ECONNREFUSED;
2824
2825 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2826 sizeof(efs), (unsigned long) &efs);
2827 break;
2828 }
2829 }
2830
2831 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2832 return -ECONNREFUSED;
2833
2834 chan->mode = rfc.mode;
2835
2836 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2837 switch (rfc.mode) {
2838 case L2CAP_MODE_ERTM:
2839 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2840 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2841 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2842
2843 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2844 chan->local_msdu = le16_to_cpu(efs.msdu);
2845 chan->local_sdu_itime =
2846 le32_to_cpu(efs.sdu_itime);
2847 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2848 chan->local_flush_to =
2849 le32_to_cpu(efs.flush_to);
2850 }
2851 break;
2852
2853 case L2CAP_MODE_STREAMING:
2854 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2855 }
2856 }
2857
2858 req->dcid = cpu_to_le16(chan->dcid);
2859 req->flags = cpu_to_le16(0x0000);
2860
2861 return ptr - data;
2862 }
2863
2864 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2865 {
2866 struct l2cap_conf_rsp *rsp = data;
2867 void *ptr = rsp->data;
2868
2869 BT_DBG("chan %p", chan);
2870
2871 rsp->scid = cpu_to_le16(chan->dcid);
2872 rsp->result = cpu_to_le16(result);
2873 rsp->flags = cpu_to_le16(flags);
2874
2875 return ptr - data;
2876 }
2877
2878 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2879 {
2880 struct l2cap_conn_rsp rsp;
2881 struct l2cap_conn *conn = chan->conn;
2882 u8 buf[128];
2883
2884 rsp.scid = cpu_to_le16(chan->dcid);
2885 rsp.dcid = cpu_to_le16(chan->scid);
2886 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2888 l2cap_send_cmd(conn, chan->ident,
2889 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2890
2891 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2892 return;
2893
2894 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2895 l2cap_build_conf_req(chan, buf), buf);
2896 chan->num_conf_req++;
2897 }
2898
2899 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2900 {
2901 int type, olen;
2902 unsigned long val;
2903 struct l2cap_conf_rfc rfc;
2904
2905 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2906
2907 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2908 return;
2909
2910 while (len >= L2CAP_CONF_OPT_SIZE) {
2911 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2912
2913 switch (type) {
2914 case L2CAP_CONF_RFC:
2915 if (olen == sizeof(rfc))
2916 memcpy(&rfc, (void *)val, olen);
2917 goto done;
2918 }
2919 }
2920
2921 /* Use sane default values in case a misbehaving remote device
2922 * did not send an RFC option.
2923 */
2924 rfc.mode = chan->mode;
2925 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2926 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2927 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2928
2929 BT_ERR("Expected RFC option was not found, using defaults");
2930
2931 done:
2932 switch (rfc.mode) {
2933 case L2CAP_MODE_ERTM:
2934 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2935 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2936 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2937 break;
2938 case L2CAP_MODE_STREAMING:
2939 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2940 }
2941 }
2942
2943 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2944 {
2945 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2946
2947 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2948 return 0;
2949
2950 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2951 cmd->ident == conn->info_ident) {
2952 cancel_delayed_work(&conn->info_timer);
2953
2954 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2955 conn->info_ident = 0;
2956
2957 l2cap_conn_start(conn);
2958 }
2959
2960 return 0;
2961 }
2962
2963 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2964 {
2965 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2966 struct l2cap_conn_rsp rsp;
2967 struct l2cap_chan *chan = NULL, *pchan;
2968 struct sock *parent, *sk = NULL;
2969 int result, status = L2CAP_CS_NO_INFO;
2970
2971 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2972 __le16 psm = req->psm;
2973
2974 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2975
2976 /* Check if we have socket listening on psm */
2977 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2978 if (!pchan) {
2979 result = L2CAP_CR_BAD_PSM;
2980 goto sendresp;
2981 }
2982
2983 parent = pchan->sk;
2984
2985 mutex_lock(&conn->chan_lock);
2986 lock_sock(parent);
2987
2988 /* Check if the ACL is secure enough (if not SDP) */
2989 if (psm != cpu_to_le16(0x0001) &&
2990 !hci_conn_check_link_mode(conn->hcon)) {
2991 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2992 result = L2CAP_CR_SEC_BLOCK;
2993 goto response;
2994 }
2995
2996 result = L2CAP_CR_NO_MEM;
2997
2998 /* Check for backlog size */
2999 if (sk_acceptq_is_full(parent)) {
3000 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3001 goto response;
3002 }
3003
3004 chan = pchan->ops->new_connection(pchan->data);
3005 if (!chan)
3006 goto response;
3007
3008 sk = chan->sk;
3009
3010 /* Check if we already have channel with that dcid */
3011 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3012 sock_set_flag(sk, SOCK_ZAPPED);
3013 chan->ops->close(chan->data);
3014 goto response;
3015 }
3016
3017 hci_conn_hold(conn->hcon);
3018
3019 bacpy(&bt_sk(sk)->src, conn->src);
3020 bacpy(&bt_sk(sk)->dst, conn->dst);
3021 chan->psm = psm;
3022 chan->dcid = scid;
3023
3024 bt_accept_enqueue(parent, sk);
3025
3026 __l2cap_chan_add(conn, chan);
3027
3028 dcid = chan->scid;
3029
3030 __set_chan_timer(chan, sk->sk_sndtimeo);
3031
3032 chan->ident = cmd->ident;
3033
3034 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3035 if (l2cap_chan_check_security(chan)) {
3036 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3037 __l2cap_state_change(chan, BT_CONNECT2);
3038 result = L2CAP_CR_PEND;
3039 status = L2CAP_CS_AUTHOR_PEND;
3040 parent->sk_data_ready(parent, 0);
3041 } else {
3042 __l2cap_state_change(chan, BT_CONFIG);
3043 result = L2CAP_CR_SUCCESS;
3044 status = L2CAP_CS_NO_INFO;
3045 }
3046 } else {
3047 __l2cap_state_change(chan, BT_CONNECT2);
3048 result = L2CAP_CR_PEND;
3049 status = L2CAP_CS_AUTHEN_PEND;
3050 }
3051 } else {
3052 __l2cap_state_change(chan, BT_CONNECT2);
3053 result = L2CAP_CR_PEND;
3054 status = L2CAP_CS_NO_INFO;
3055 }
3056
3057 response:
3058 release_sock(parent);
3059 mutex_unlock(&conn->chan_lock);
3060
3061 sendresp:
3062 rsp.scid = cpu_to_le16(scid);
3063 rsp.dcid = cpu_to_le16(dcid);
3064 rsp.result = cpu_to_le16(result);
3065 rsp.status = cpu_to_le16(status);
3066 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3067
3068 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3069 struct l2cap_info_req info;
3070 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3071
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3073 conn->info_ident = l2cap_get_ident(conn);
3074
3075 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3076
3077 l2cap_send_cmd(conn, conn->info_ident,
3078 L2CAP_INFO_REQ, sizeof(info), &info);
3079 }
3080
3081 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3082 result == L2CAP_CR_SUCCESS) {
3083 u8 buf[128];
3084 set_bit(CONF_REQ_SENT, &chan->conf_state);
3085 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3086 l2cap_build_conf_req(chan, buf), buf);
3087 chan->num_conf_req++;
3088 }
3089
3090 return 0;
3091 }
3092
3093 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3094 {
3095 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3096 u16 scid, dcid, result, status;
3097 struct l2cap_chan *chan;
3098 u8 req[128];
3099 int err;
3100
3101 scid = __le16_to_cpu(rsp->scid);
3102 dcid = __le16_to_cpu(rsp->dcid);
3103 result = __le16_to_cpu(rsp->result);
3104 status = __le16_to_cpu(rsp->status);
3105
3106 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3107 dcid, scid, result, status);
3108
3109 mutex_lock(&conn->chan_lock);
3110
3111 if (scid) {
3112 chan = __l2cap_get_chan_by_scid(conn, scid);
3113 if (!chan) {
3114 err = -EFAULT;
3115 goto unlock;
3116 }
3117 } else {
3118 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3119 if (!chan) {
3120 err = -EFAULT;
3121 goto unlock;
3122 }
3123 }
3124
3125 err = 0;
3126
3127 l2cap_chan_lock(chan);
3128
3129 switch (result) {
3130 case L2CAP_CR_SUCCESS:
3131 l2cap_state_change(chan, BT_CONFIG);
3132 chan->ident = 0;
3133 chan->dcid = dcid;
3134 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3135
3136 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3137 break;
3138
3139 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3140 l2cap_build_conf_req(chan, req), req);
3141 chan->num_conf_req++;
3142 break;
3143
3144 case L2CAP_CR_PEND:
3145 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3146 break;
3147
3148 default:
3149 l2cap_chan_del(chan, ECONNREFUSED);
3150 break;
3151 }
3152
3153 l2cap_chan_unlock(chan);
3154
3155 unlock:
3156 mutex_unlock(&conn->chan_lock);
3157
3158 return err;
3159 }
3160
3161 static inline void set_default_fcs(struct l2cap_chan *chan)
3162 {
3163 /* FCS is enabled only in ERTM or streaming mode, if one or both
3164 * sides request it.
3165 */
3166 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3167 chan->fcs = L2CAP_FCS_NONE;
3168 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3169 chan->fcs = L2CAP_FCS_CRC16;
3170 }
3171
3172 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3173 {
3174 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3175 u16 dcid, flags;
3176 u8 rsp[64];
3177 struct l2cap_chan *chan;
3178 int len, err = 0;
3179
3180 dcid = __le16_to_cpu(req->dcid);
3181 flags = __le16_to_cpu(req->flags);
3182
3183 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3184
3185 chan = l2cap_get_chan_by_scid(conn, dcid);
3186 if (!chan)
3187 return -ENOENT;
3188
3189 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3190 struct l2cap_cmd_rej_cid rej;
3191
3192 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3193 rej.scid = cpu_to_le16(chan->scid);
3194 rej.dcid = cpu_to_le16(chan->dcid);
3195
3196 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3197 sizeof(rej), &rej);
3198 goto unlock;
3199 }
3200
3201 /* Reject if config buffer is too small. */
3202 len = cmd_len - sizeof(*req);
3203 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3204 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3205 l2cap_build_conf_rsp(chan, rsp,
3206 L2CAP_CONF_REJECT, flags), rsp);
3207 goto unlock;
3208 }
3209
3210 /* Store config. */
3211 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3212 chan->conf_len += len;
3213
3214 if (flags & 0x0001) {
3215 /* Incomplete config. Send empty response. */
3216 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3217 l2cap_build_conf_rsp(chan, rsp,
3218 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3219 goto unlock;
3220 }
3221
3222 /* Complete config. */
3223 len = l2cap_parse_conf_req(chan, rsp);
3224 if (len < 0) {
3225 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3226 goto unlock;
3227 }
3228
3229 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3230 chan->num_conf_rsp++;
3231
3232 /* Reset config buffer. */
3233 chan->conf_len = 0;
3234
3235 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3236 goto unlock;
3237
3238 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3239 set_default_fcs(chan);
3240
3241 l2cap_state_change(chan, BT_CONNECTED);
3242
3243 if (chan->mode == L2CAP_MODE_ERTM ||
3244 chan->mode == L2CAP_MODE_STREAMING)
3245 err = l2cap_ertm_init(chan);
3246
3247 if (err < 0)
3248 l2cap_send_disconn_req(chan->conn, chan, -err);
3249 else
3250 l2cap_chan_ready(chan);
3251
3252 goto unlock;
3253 }
3254
3255 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3256 u8 buf[64];
3257 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3258 l2cap_build_conf_req(chan, buf), buf);
3259 chan->num_conf_req++;
3260 }
3261
3262 /* Got Conf Rsp PENDING from remote side and asume we sent
3263 Conf Rsp PENDING in the code above */
3264 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3265 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3266
3267 /* check compatibility */
3268
3269 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3270 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3271
3272 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3273 l2cap_build_conf_rsp(chan, rsp,
3274 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3275 }
3276
3277 unlock:
3278 l2cap_chan_unlock(chan);
3279 return err;
3280 }
3281
3282 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3283 {
3284 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3285 u16 scid, flags, result;
3286 struct l2cap_chan *chan;
3287 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3288 int err = 0;
3289
3290 scid = __le16_to_cpu(rsp->scid);
3291 flags = __le16_to_cpu(rsp->flags);
3292 result = __le16_to_cpu(rsp->result);
3293
3294 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3295 result, len);
3296
3297 chan = l2cap_get_chan_by_scid(conn, scid);
3298 if (!chan)
3299 return 0;
3300
3301 switch (result) {
3302 case L2CAP_CONF_SUCCESS:
3303 l2cap_conf_rfc_get(chan, rsp->data, len);
3304 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3305 break;
3306
3307 case L2CAP_CONF_PENDING:
3308 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3309
3310 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3311 char buf[64];
3312
3313 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3314 buf, &result);
3315 if (len < 0) {
3316 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3317 goto done;
3318 }
3319
3320 /* check compatibility */
3321
3322 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3323 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3324
3325 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3326 l2cap_build_conf_rsp(chan, buf,
3327 L2CAP_CONF_SUCCESS, 0x0000), buf);
3328 }
3329 goto done;
3330
3331 case L2CAP_CONF_UNACCEPT:
3332 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3333 char req[64];
3334
3335 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3336 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3337 goto done;
3338 }
3339
3340 /* throw out any old stored conf requests */
3341 result = L2CAP_CONF_SUCCESS;
3342 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3343 req, &result);
3344 if (len < 0) {
3345 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3346 goto done;
3347 }
3348
3349 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3350 L2CAP_CONF_REQ, len, req);
3351 chan->num_conf_req++;
3352 if (result != L2CAP_CONF_SUCCESS)
3353 goto done;
3354 break;
3355 }
3356
3357 default:
3358 l2cap_chan_set_err(chan, ECONNRESET);
3359
3360 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3361 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3362 goto done;
3363 }
3364
3365 if (flags & 0x01)
3366 goto done;
3367
3368 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3369
3370 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3371 set_default_fcs(chan);
3372
3373 l2cap_state_change(chan, BT_CONNECTED);
3374 if (chan->mode == L2CAP_MODE_ERTM ||
3375 chan->mode == L2CAP_MODE_STREAMING)
3376 err = l2cap_ertm_init(chan);
3377
3378 if (err < 0)
3379 l2cap_send_disconn_req(chan->conn, chan, -err);
3380 else
3381 l2cap_chan_ready(chan);
3382 }
3383
3384 done:
3385 l2cap_chan_unlock(chan);
3386 return err;
3387 }
3388
3389 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3390 {
3391 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3392 struct l2cap_disconn_rsp rsp;
3393 u16 dcid, scid;
3394 struct l2cap_chan *chan;
3395 struct sock *sk;
3396
3397 scid = __le16_to_cpu(req->scid);
3398 dcid = __le16_to_cpu(req->dcid);
3399
3400 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3401
3402 mutex_lock(&conn->chan_lock);
3403
3404 chan = __l2cap_get_chan_by_scid(conn, dcid);
3405 if (!chan) {
3406 mutex_unlock(&conn->chan_lock);
3407 return 0;
3408 }
3409
3410 l2cap_chan_lock(chan);
3411
3412 sk = chan->sk;
3413
3414 rsp.dcid = cpu_to_le16(chan->scid);
3415 rsp.scid = cpu_to_le16(chan->dcid);
3416 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3417
3418 lock_sock(sk);
3419 sk->sk_shutdown = SHUTDOWN_MASK;
3420 release_sock(sk);
3421
3422 l2cap_chan_hold(chan);
3423 l2cap_chan_del(chan, ECONNRESET);
3424
3425 l2cap_chan_unlock(chan);
3426
3427 chan->ops->close(chan->data);
3428 l2cap_chan_put(chan);
3429
3430 mutex_unlock(&conn->chan_lock);
3431
3432 return 0;
3433 }
3434
3435 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3436 {
3437 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3438 u16 dcid, scid;
3439 struct l2cap_chan *chan;
3440
3441 scid = __le16_to_cpu(rsp->scid);
3442 dcid = __le16_to_cpu(rsp->dcid);
3443
3444 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3445
3446 mutex_lock(&conn->chan_lock);
3447
3448 chan = __l2cap_get_chan_by_scid(conn, scid);
3449 if (!chan) {
3450 mutex_unlock(&conn->chan_lock);
3451 return 0;
3452 }
3453
3454 l2cap_chan_lock(chan);
3455
3456 l2cap_chan_hold(chan);
3457 l2cap_chan_del(chan, 0);
3458
3459 l2cap_chan_unlock(chan);
3460
3461 chan->ops->close(chan->data);
3462 l2cap_chan_put(chan);
3463
3464 mutex_unlock(&conn->chan_lock);
3465
3466 return 0;
3467 }
3468
3469 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3470 {
3471 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3472 u16 type;
3473
3474 type = __le16_to_cpu(req->type);
3475
3476 BT_DBG("type 0x%4.4x", type);
3477
3478 if (type == L2CAP_IT_FEAT_MASK) {
3479 u8 buf[8];
3480 u32 feat_mask = l2cap_feat_mask;
3481 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3482 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3483 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3484 if (!disable_ertm)
3485 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3486 | L2CAP_FEAT_FCS;
3487 if (enable_hs)
3488 feat_mask |= L2CAP_FEAT_EXT_FLOW
3489 | L2CAP_FEAT_EXT_WINDOW;
3490
3491 put_unaligned_le32(feat_mask, rsp->data);
3492 l2cap_send_cmd(conn, cmd->ident,
3493 L2CAP_INFO_RSP, sizeof(buf), buf);
3494 } else if (type == L2CAP_IT_FIXED_CHAN) {
3495 u8 buf[12];
3496 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3497
3498 if (enable_hs)
3499 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3500 else
3501 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3502
3503 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3504 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3505 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3506 l2cap_send_cmd(conn, cmd->ident,
3507 L2CAP_INFO_RSP, sizeof(buf), buf);
3508 } else {
3509 struct l2cap_info_rsp rsp;
3510 rsp.type = cpu_to_le16(type);
3511 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3512 l2cap_send_cmd(conn, cmd->ident,
3513 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3514 }
3515
3516 return 0;
3517 }
3518
3519 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3520 {
3521 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3522 u16 type, result;
3523
3524 type = __le16_to_cpu(rsp->type);
3525 result = __le16_to_cpu(rsp->result);
3526
3527 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3528
3529 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3530 if (cmd->ident != conn->info_ident ||
3531 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3532 return 0;
3533
3534 cancel_delayed_work(&conn->info_timer);
3535
3536 if (result != L2CAP_IR_SUCCESS) {
3537 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3538 conn->info_ident = 0;
3539
3540 l2cap_conn_start(conn);
3541
3542 return 0;
3543 }
3544
3545 switch (type) {
3546 case L2CAP_IT_FEAT_MASK:
3547 conn->feat_mask = get_unaligned_le32(rsp->data);
3548
3549 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3550 struct l2cap_info_req req;
3551 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3552
3553 conn->info_ident = l2cap_get_ident(conn);
3554
3555 l2cap_send_cmd(conn, conn->info_ident,
3556 L2CAP_INFO_REQ, sizeof(req), &req);
3557 } else {
3558 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3559 conn->info_ident = 0;
3560
3561 l2cap_conn_start(conn);
3562 }
3563 break;
3564
3565 case L2CAP_IT_FIXED_CHAN:
3566 conn->fixed_chan_mask = rsp->data[0];
3567 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3568 conn->info_ident = 0;
3569
3570 l2cap_conn_start(conn);
3571 break;
3572 }
3573
3574 return 0;
3575 }
3576
3577 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3578 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3579 void *data)
3580 {
3581 struct l2cap_create_chan_req *req = data;
3582 struct l2cap_create_chan_rsp rsp;
3583 u16 psm, scid;
3584
3585 if (cmd_len != sizeof(*req))
3586 return -EPROTO;
3587
3588 if (!enable_hs)
3589 return -EINVAL;
3590
3591 psm = le16_to_cpu(req->psm);
3592 scid = le16_to_cpu(req->scid);
3593
3594 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3595
3596 /* Placeholder: Always reject */
3597 rsp.dcid = 0;
3598 rsp.scid = cpu_to_le16(scid);
3599 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3600 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3601
3602 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3603 sizeof(rsp), &rsp);
3604
3605 return 0;
3606 }
3607
3608 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3609 struct l2cap_cmd_hdr *cmd, void *data)
3610 {
3611 BT_DBG("conn %p", conn);
3612
3613 return l2cap_connect_rsp(conn, cmd, data);
3614 }
3615
3616 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3617 u16 icid, u16 result)
3618 {
3619 struct l2cap_move_chan_rsp rsp;
3620
3621 BT_DBG("icid %d, result %d", icid, result);
3622
3623 rsp.icid = cpu_to_le16(icid);
3624 rsp.result = cpu_to_le16(result);
3625
3626 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3627 }
3628
3629 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3630 struct l2cap_chan *chan, u16 icid, u16 result)
3631 {
3632 struct l2cap_move_chan_cfm cfm;
3633 u8 ident;
3634
3635 BT_DBG("icid %d, result %d", icid, result);
3636
3637 ident = l2cap_get_ident(conn);
3638 if (chan)
3639 chan->ident = ident;
3640
3641 cfm.icid = cpu_to_le16(icid);
3642 cfm.result = cpu_to_le16(result);
3643
3644 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3645 }
3646
3647 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3648 u16 icid)
3649 {
3650 struct l2cap_move_chan_cfm_rsp rsp;
3651
3652 BT_DBG("icid %d", icid);
3653
3654 rsp.icid = cpu_to_le16(icid);
3655 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3656 }
3657
3658 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3659 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3660 {
3661 struct l2cap_move_chan_req *req = data;
3662 u16 icid = 0;
3663 u16 result = L2CAP_MR_NOT_ALLOWED;
3664
3665 if (cmd_len != sizeof(*req))
3666 return -EPROTO;
3667
3668 icid = le16_to_cpu(req->icid);
3669
3670 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3671
3672 if (!enable_hs)
3673 return -EINVAL;
3674
3675 /* Placeholder: Always refuse */
3676 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3677
3678 return 0;
3679 }
3680
3681 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3682 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3683 {
3684 struct l2cap_move_chan_rsp *rsp = data;
3685 u16 icid, result;
3686
3687 if (cmd_len != sizeof(*rsp))
3688 return -EPROTO;
3689
3690 icid = le16_to_cpu(rsp->icid);
3691 result = le16_to_cpu(rsp->result);
3692
3693 BT_DBG("icid %d, result %d", icid, result);
3694
3695 /* Placeholder: Always unconfirmed */
3696 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3697
3698 return 0;
3699 }
3700
3701 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3702 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3703 {
3704 struct l2cap_move_chan_cfm *cfm = data;
3705 u16 icid, result;
3706
3707 if (cmd_len != sizeof(*cfm))
3708 return -EPROTO;
3709
3710 icid = le16_to_cpu(cfm->icid);
3711 result = le16_to_cpu(cfm->result);
3712
3713 BT_DBG("icid %d, result %d", icid, result);
3714
3715 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3716
3717 return 0;
3718 }
3719
3720 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3721 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3722 {
3723 struct l2cap_move_chan_cfm_rsp *rsp = data;
3724 u16 icid;
3725
3726 if (cmd_len != sizeof(*rsp))
3727 return -EPROTO;
3728
3729 icid = le16_to_cpu(rsp->icid);
3730
3731 BT_DBG("icid %d", icid);
3732
3733 return 0;
3734 }
3735
3736 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3737 u16 to_multiplier)
3738 {
3739 u16 max_latency;
3740
3741 if (min > max || min < 6 || max > 3200)
3742 return -EINVAL;
3743
3744 if (to_multiplier < 10 || to_multiplier > 3200)
3745 return -EINVAL;
3746
3747 if (max >= to_multiplier * 8)
3748 return -EINVAL;
3749
3750 max_latency = (to_multiplier * 8 / max) - 1;
3751 if (latency > 499 || latency > max_latency)
3752 return -EINVAL;
3753
3754 return 0;
3755 }
3756
3757 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3758 struct l2cap_cmd_hdr *cmd, u8 *data)
3759 {
3760 struct hci_conn *hcon = conn->hcon;
3761 struct l2cap_conn_param_update_req *req;
3762 struct l2cap_conn_param_update_rsp rsp;
3763 u16 min, max, latency, to_multiplier, cmd_len;
3764 int err;
3765
3766 if (!(hcon->link_mode & HCI_LM_MASTER))
3767 return -EINVAL;
3768
3769 cmd_len = __le16_to_cpu(cmd->len);
3770 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3771 return -EPROTO;
3772
3773 req = (struct l2cap_conn_param_update_req *) data;
3774 min = __le16_to_cpu(req->min);
3775 max = __le16_to_cpu(req->max);
3776 latency = __le16_to_cpu(req->latency);
3777 to_multiplier = __le16_to_cpu(req->to_multiplier);
3778
3779 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3780 min, max, latency, to_multiplier);
3781
3782 memset(&rsp, 0, sizeof(rsp));
3783
3784 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3785 if (err)
3786 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3787 else
3788 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3789
3790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3791 sizeof(rsp), &rsp);
3792
3793 if (!err)
3794 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3795
3796 return 0;
3797 }
3798
3799 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3800 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3801 {
3802 int err = 0;
3803
3804 switch (cmd->code) {
3805 case L2CAP_COMMAND_REJ:
3806 l2cap_command_rej(conn, cmd, data);
3807 break;
3808
3809 case L2CAP_CONN_REQ:
3810 err = l2cap_connect_req(conn, cmd, data);
3811 break;
3812
3813 case L2CAP_CONN_RSP:
3814 err = l2cap_connect_rsp(conn, cmd, data);
3815 break;
3816
3817 case L2CAP_CONF_REQ:
3818 err = l2cap_config_req(conn, cmd, cmd_len, data);
3819 break;
3820
3821 case L2CAP_CONF_RSP:
3822 err = l2cap_config_rsp(conn, cmd, data);
3823 break;
3824
3825 case L2CAP_DISCONN_REQ:
3826 err = l2cap_disconnect_req(conn, cmd, data);
3827 break;
3828
3829 case L2CAP_DISCONN_RSP:
3830 err = l2cap_disconnect_rsp(conn, cmd, data);
3831 break;
3832
3833 case L2CAP_ECHO_REQ:
3834 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3835 break;
3836
3837 case L2CAP_ECHO_RSP:
3838 break;
3839
3840 case L2CAP_INFO_REQ:
3841 err = l2cap_information_req(conn, cmd, data);
3842 break;
3843
3844 case L2CAP_INFO_RSP:
3845 err = l2cap_information_rsp(conn, cmd, data);
3846 break;
3847
3848 case L2CAP_CREATE_CHAN_REQ:
3849 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3850 break;
3851
3852 case L2CAP_CREATE_CHAN_RSP:
3853 err = l2cap_create_channel_rsp(conn, cmd, data);
3854 break;
3855
3856 case L2CAP_MOVE_CHAN_REQ:
3857 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3858 break;
3859
3860 case L2CAP_MOVE_CHAN_RSP:
3861 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3862 break;
3863
3864 case L2CAP_MOVE_CHAN_CFM:
3865 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3866 break;
3867
3868 case L2CAP_MOVE_CHAN_CFM_RSP:
3869 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3870 break;
3871
3872 default:
3873 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3874 err = -EINVAL;
3875 break;
3876 }
3877
3878 return err;
3879 }
3880
3881 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3882 struct l2cap_cmd_hdr *cmd, u8 *data)
3883 {
3884 switch (cmd->code) {
3885 case L2CAP_COMMAND_REJ:
3886 return 0;
3887
3888 case L2CAP_CONN_PARAM_UPDATE_REQ:
3889 return l2cap_conn_param_update_req(conn, cmd, data);
3890
3891 case L2CAP_CONN_PARAM_UPDATE_RSP:
3892 return 0;
3893
3894 default:
3895 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3896 return -EINVAL;
3897 }
3898 }
3899
3900 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3901 struct sk_buff *skb)
3902 {
3903 u8 *data = skb->data;
3904 int len = skb->len;
3905 struct l2cap_cmd_hdr cmd;
3906 int err;
3907
3908 l2cap_raw_recv(conn, skb);
3909
3910 while (len >= L2CAP_CMD_HDR_SIZE) {
3911 u16 cmd_len;
3912 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3913 data += L2CAP_CMD_HDR_SIZE;
3914 len -= L2CAP_CMD_HDR_SIZE;
3915
3916 cmd_len = le16_to_cpu(cmd.len);
3917
3918 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3919
3920 if (cmd_len > len || !cmd.ident) {
3921 BT_DBG("corrupted command");
3922 break;
3923 }
3924
3925 if (conn->hcon->type == LE_LINK)
3926 err = l2cap_le_sig_cmd(conn, &cmd, data);
3927 else
3928 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3929
3930 if (err) {
3931 struct l2cap_cmd_rej_unk rej;
3932
3933 BT_ERR("Wrong link type (%d)", err);
3934
3935 /* FIXME: Map err to a valid reason */
3936 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3937 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3938 }
3939
3940 data += cmd_len;
3941 len -= cmd_len;
3942 }
3943
3944 kfree_skb(skb);
3945 }
3946
3947 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3948 {
3949 u16 our_fcs, rcv_fcs;
3950 int hdr_size;
3951
3952 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3953 hdr_size = L2CAP_EXT_HDR_SIZE;
3954 else
3955 hdr_size = L2CAP_ENH_HDR_SIZE;
3956
3957 if (chan->fcs == L2CAP_FCS_CRC16) {
3958 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3959 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3960 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3961
3962 if (our_fcs != rcv_fcs)
3963 return -EBADMSG;
3964 }
3965 return 0;
3966 }
3967
3968 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3969 {
3970 u32 control = 0;
3971
3972 chan->frames_sent = 0;
3973
3974 control |= __set_reqseq(chan, chan->buffer_seq);
3975
3976 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3977 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3978 l2cap_send_sframe(chan, control);
3979 set_bit(CONN_RNR_SENT, &chan->conn_state);
3980 }
3981
3982 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3983 l2cap_retransmit_frames(chan);
3984
3985 l2cap_ertm_send(chan);
3986
3987 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3988 chan->frames_sent == 0) {
3989 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3990 l2cap_send_sframe(chan, control);
3991 }
3992 }
3993
3994 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3995 {
3996 struct sk_buff *next_skb;
3997 int tx_seq_offset, next_tx_seq_offset;
3998
3999 bt_cb(skb)->control.txseq = tx_seq;
4000 bt_cb(skb)->control.sar = sar;
4001
4002 next_skb = skb_peek(&chan->srej_q);
4003
4004 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4005
4006 while (next_skb) {
4007 if (bt_cb(next_skb)->control.txseq == tx_seq)
4008 return -EINVAL;
4009
4010 next_tx_seq_offset = __seq_offset(chan,
4011 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4012
4013 if (next_tx_seq_offset > tx_seq_offset) {
4014 __skb_queue_before(&chan->srej_q, next_skb, skb);
4015 return 0;
4016 }
4017
4018 if (skb_queue_is_last(&chan->srej_q, next_skb))
4019 next_skb = NULL;
4020 else
4021 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4022 }
4023
4024 __skb_queue_tail(&chan->srej_q, skb);
4025
4026 return 0;
4027 }
4028
4029 static void append_skb_frag(struct sk_buff *skb,
4030 struct sk_buff *new_frag, struct sk_buff **last_frag)
4031 {
4032 /* skb->len reflects data in skb as well as all fragments
4033 * skb->data_len reflects only data in fragments
4034 */
4035 if (!skb_has_frag_list(skb))
4036 skb_shinfo(skb)->frag_list = new_frag;
4037
4038 new_frag->next = NULL;
4039
4040 (*last_frag)->next = new_frag;
4041 *last_frag = new_frag;
4042
4043 skb->len += new_frag->len;
4044 skb->data_len += new_frag->len;
4045 skb->truesize += new_frag->truesize;
4046 }
4047
4048 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4049 {
4050 int err = -EINVAL;
4051
4052 switch (__get_ctrl_sar(chan, control)) {
4053 case L2CAP_SAR_UNSEGMENTED:
4054 if (chan->sdu)
4055 break;
4056
4057 err = chan->ops->recv(chan->data, skb);
4058 break;
4059
4060 case L2CAP_SAR_START:
4061 if (chan->sdu)
4062 break;
4063
4064 chan->sdu_len = get_unaligned_le16(skb->data);
4065 skb_pull(skb, L2CAP_SDULEN_SIZE);
4066
4067 if (chan->sdu_len > chan->imtu) {
4068 err = -EMSGSIZE;
4069 break;
4070 }
4071
4072 if (skb->len >= chan->sdu_len)
4073 break;
4074
4075 chan->sdu = skb;
4076 chan->sdu_last_frag = skb;
4077
4078 skb = NULL;
4079 err = 0;
4080 break;
4081
4082 case L2CAP_SAR_CONTINUE:
4083 if (!chan->sdu)
4084 break;
4085
4086 append_skb_frag(chan->sdu, skb,
4087 &chan->sdu_last_frag);
4088 skb = NULL;
4089
4090 if (chan->sdu->len >= chan->sdu_len)
4091 break;
4092
4093 err = 0;
4094 break;
4095
4096 case L2CAP_SAR_END:
4097 if (!chan->sdu)
4098 break;
4099
4100 append_skb_frag(chan->sdu, skb,
4101 &chan->sdu_last_frag);
4102 skb = NULL;
4103
4104 if (chan->sdu->len != chan->sdu_len)
4105 break;
4106
4107 err = chan->ops->recv(chan->data, chan->sdu);
4108
4109 if (!err) {
4110 /* Reassembly complete */
4111 chan->sdu = NULL;
4112 chan->sdu_last_frag = NULL;
4113 chan->sdu_len = 0;
4114 }
4115 break;
4116 }
4117
4118 if (err) {
4119 kfree_skb(skb);
4120 kfree_skb(chan->sdu);
4121 chan->sdu = NULL;
4122 chan->sdu_last_frag = NULL;
4123 chan->sdu_len = 0;
4124 }
4125
4126 return err;
4127 }
4128
4129 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4130 {
4131 BT_DBG("chan %p, Enter local busy", chan);
4132
4133 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4134 l2cap_seq_list_clear(&chan->srej_list);
4135
4136 __set_ack_timer(chan);
4137 }
4138
4139 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4140 {
4141 u32 control;
4142
4143 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4144 goto done;
4145
4146 control = __set_reqseq(chan, chan->buffer_seq);
4147 control |= __set_ctrl_poll(chan);
4148 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4149 l2cap_send_sframe(chan, control);
4150 chan->retry_count = 1;
4151
4152 __clear_retrans_timer(chan);
4153 __set_monitor_timer(chan);
4154
4155 set_bit(CONN_WAIT_F, &chan->conn_state);
4156
4157 done:
4158 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4159 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4160
4161 BT_DBG("chan %p, Exit local busy", chan);
4162 }
4163
4164 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4165 {
4166 if (chan->mode == L2CAP_MODE_ERTM) {
4167 if (busy)
4168 l2cap_ertm_enter_local_busy(chan);
4169 else
4170 l2cap_ertm_exit_local_busy(chan);
4171 }
4172 }
4173
4174 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4175 {
4176 struct sk_buff *skb;
4177 u32 control;
4178
4179 while ((skb = skb_peek(&chan->srej_q)) &&
4180 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4181 int err;
4182
4183 if (bt_cb(skb)->control.txseq != tx_seq)
4184 break;
4185
4186 skb = skb_dequeue(&chan->srej_q);
4187 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4188 err = l2cap_reassemble_sdu(chan, skb, control);
4189
4190 if (err < 0) {
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4192 break;
4193 }
4194
4195 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4196 tx_seq = __next_seq(chan, tx_seq);
4197 }
4198 }
4199
4200 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4201 {
4202 struct srej_list *l, *tmp;
4203 u32 control;
4204
4205 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4206 if (l->tx_seq == tx_seq) {
4207 list_del(&l->list);
4208 kfree(l);
4209 return;
4210 }
4211 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4212 control |= __set_reqseq(chan, l->tx_seq);
4213 l2cap_send_sframe(chan, control);
4214 list_del(&l->list);
4215 list_add_tail(&l->list, &chan->srej_l);
4216 }
4217 }
4218
4219 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4220 {
4221 struct srej_list *new;
4222 u32 control;
4223
4224 while (tx_seq != chan->expected_tx_seq) {
4225 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4226 control |= __set_reqseq(chan, chan->expected_tx_seq);
4227 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4228 l2cap_send_sframe(chan, control);
4229
4230 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4231 if (!new)
4232 return -ENOMEM;
4233
4234 new->tx_seq = chan->expected_tx_seq;
4235
4236 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4237
4238 list_add_tail(&new->list, &chan->srej_l);
4239 }
4240
4241 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4242
4243 return 0;
4244 }
4245
4246 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4247 {
4248 u16 tx_seq = __get_txseq(chan, rx_control);
4249 u16 req_seq = __get_reqseq(chan, rx_control);
4250 u8 sar = __get_ctrl_sar(chan, rx_control);
4251 int tx_seq_offset, expected_tx_seq_offset;
4252 int num_to_ack = (chan->tx_win/6) + 1;
4253 int err = 0;
4254
4255 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4256 tx_seq, rx_control);
4257
4258 if (__is_ctrl_final(chan, rx_control) &&
4259 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4260 __clear_monitor_timer(chan);
4261 if (chan->unacked_frames > 0)
4262 __set_retrans_timer(chan);
4263 clear_bit(CONN_WAIT_F, &chan->conn_state);
4264 }
4265
4266 chan->expected_ack_seq = req_seq;
4267 l2cap_drop_acked_frames(chan);
4268
4269 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4270
4271 /* invalid tx_seq */
4272 if (tx_seq_offset >= chan->tx_win) {
4273 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4274 goto drop;
4275 }
4276
4277 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4278 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4279 l2cap_send_ack(chan);
4280 goto drop;
4281 }
4282
4283 if (tx_seq == chan->expected_tx_seq)
4284 goto expected;
4285
4286 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4287 struct srej_list *first;
4288
4289 first = list_first_entry(&chan->srej_l,
4290 struct srej_list, list);
4291 if (tx_seq == first->tx_seq) {
4292 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4293 l2cap_check_srej_gap(chan, tx_seq);
4294
4295 list_del(&first->list);
4296 kfree(first);
4297
4298 if (list_empty(&chan->srej_l)) {
4299 chan->buffer_seq = chan->buffer_seq_srej;
4300 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4301 l2cap_send_ack(chan);
4302 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4303 }
4304 } else {
4305 struct srej_list *l;
4306
4307 /* duplicated tx_seq */
4308 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4309 goto drop;
4310
4311 list_for_each_entry(l, &chan->srej_l, list) {
4312 if (l->tx_seq == tx_seq) {
4313 l2cap_resend_srejframe(chan, tx_seq);
4314 return 0;
4315 }
4316 }
4317
4318 err = l2cap_send_srejframe(chan, tx_seq);
4319 if (err < 0) {
4320 l2cap_send_disconn_req(chan->conn, chan, -err);
4321 return err;
4322 }
4323 }
4324 } else {
4325 expected_tx_seq_offset = __seq_offset(chan,
4326 chan->expected_tx_seq, chan->buffer_seq);
4327
4328 /* duplicated tx_seq */
4329 if (tx_seq_offset < expected_tx_seq_offset)
4330 goto drop;
4331
4332 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4333
4334 BT_DBG("chan %p, Enter SREJ", chan);
4335
4336 INIT_LIST_HEAD(&chan->srej_l);
4337 chan->buffer_seq_srej = chan->buffer_seq;
4338
4339 __skb_queue_head_init(&chan->srej_q);
4340 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4341
4342 /* Set P-bit only if there are some I-frames to ack. */
4343 if (__clear_ack_timer(chan))
4344 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4345
4346 err = l2cap_send_srejframe(chan, tx_seq);
4347 if (err < 0) {
4348 l2cap_send_disconn_req(chan->conn, chan, -err);
4349 return err;
4350 }
4351 }
4352 return 0;
4353
4354 expected:
4355 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4356
4357 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4358 bt_cb(skb)->control.txseq = tx_seq;
4359 bt_cb(skb)->control.sar = sar;
4360 __skb_queue_tail(&chan->srej_q, skb);
4361 return 0;
4362 }
4363
4364 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4365 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4366
4367 if (err < 0) {
4368 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4369 return err;
4370 }
4371
4372 if (__is_ctrl_final(chan, rx_control)) {
4373 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4374 l2cap_retransmit_frames(chan);
4375 }
4376
4377
4378 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4379 if (chan->num_acked == num_to_ack - 1)
4380 l2cap_send_ack(chan);
4381 else
4382 __set_ack_timer(chan);
4383
4384 return 0;
4385
4386 drop:
4387 kfree_skb(skb);
4388 return 0;
4389 }
4390
4391 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4392 {
4393 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4394 __get_reqseq(chan, rx_control), rx_control);
4395
4396 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4397 l2cap_drop_acked_frames(chan);
4398
4399 if (__is_ctrl_poll(chan, rx_control)) {
4400 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4401 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4402 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4403 (chan->unacked_frames > 0))
4404 __set_retrans_timer(chan);
4405
4406 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4407 l2cap_send_srejtail(chan);
4408 } else {
4409 l2cap_send_i_or_rr_or_rnr(chan);
4410 }
4411
4412 } else if (__is_ctrl_final(chan, rx_control)) {
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4414
4415 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4416 l2cap_retransmit_frames(chan);
4417
4418 } else {
4419 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4420 (chan->unacked_frames > 0))
4421 __set_retrans_timer(chan);
4422
4423 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4424 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4425 l2cap_send_ack(chan);
4426 else
4427 l2cap_ertm_send(chan);
4428 }
4429 }
4430
4431 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4432 {
4433 u16 tx_seq = __get_reqseq(chan, rx_control);
4434
4435 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4436
4437 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4438
4439 chan->expected_ack_seq = tx_seq;
4440 l2cap_drop_acked_frames(chan);
4441
4442 if (__is_ctrl_final(chan, rx_control)) {
4443 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4444 l2cap_retransmit_frames(chan);
4445 } else {
4446 l2cap_retransmit_frames(chan);
4447
4448 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4449 set_bit(CONN_REJ_ACT, &chan->conn_state);
4450 }
4451 }
4452 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4453 {
4454 u16 tx_seq = __get_reqseq(chan, rx_control);
4455
4456 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4457
4458 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4459
4460 if (__is_ctrl_poll(chan, rx_control)) {
4461 chan->expected_ack_seq = tx_seq;
4462 l2cap_drop_acked_frames(chan);
4463
4464 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4465 l2cap_retransmit_one_frame(chan, tx_seq);
4466
4467 l2cap_ertm_send(chan);
4468
4469 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4470 chan->srej_save_reqseq = tx_seq;
4471 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4472 }
4473 } else if (__is_ctrl_final(chan, rx_control)) {
4474 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4475 chan->srej_save_reqseq == tx_seq)
4476 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4477 else
4478 l2cap_retransmit_one_frame(chan, tx_seq);
4479 } else {
4480 l2cap_retransmit_one_frame(chan, tx_seq);
4481 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4482 chan->srej_save_reqseq = tx_seq;
4483 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4484 }
4485 }
4486 }
4487
4488 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4489 {
4490 u16 tx_seq = __get_reqseq(chan, rx_control);
4491
4492 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4493
4494 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4495 chan->expected_ack_seq = tx_seq;
4496 l2cap_drop_acked_frames(chan);
4497
4498 if (__is_ctrl_poll(chan, rx_control))
4499 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4500
4501 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4502 __clear_retrans_timer(chan);
4503 if (__is_ctrl_poll(chan, rx_control))
4504 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4505 return;
4506 }
4507
4508 if (__is_ctrl_poll(chan, rx_control)) {
4509 l2cap_send_srejtail(chan);
4510 } else {
4511 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4512 l2cap_send_sframe(chan, rx_control);
4513 }
4514 }
4515
4516 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4517 {
4518 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4519
4520 if (__is_ctrl_final(chan, rx_control) &&
4521 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4522 __clear_monitor_timer(chan);
4523 if (chan->unacked_frames > 0)
4524 __set_retrans_timer(chan);
4525 clear_bit(CONN_WAIT_F, &chan->conn_state);
4526 }
4527
4528 switch (__get_ctrl_super(chan, rx_control)) {
4529 case L2CAP_SUPER_RR:
4530 l2cap_data_channel_rrframe(chan, rx_control);
4531 break;
4532
4533 case L2CAP_SUPER_REJ:
4534 l2cap_data_channel_rejframe(chan, rx_control);
4535 break;
4536
4537 case L2CAP_SUPER_SREJ:
4538 l2cap_data_channel_srejframe(chan, rx_control);
4539 break;
4540
4541 case L2CAP_SUPER_RNR:
4542 l2cap_data_channel_rnrframe(chan, rx_control);
4543 break;
4544 }
4545
4546 kfree_skb(skb);
4547 return 0;
4548 }
4549
4550 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4551 {
4552 u32 control;
4553 u16 req_seq;
4554 int len, next_tx_seq_offset, req_seq_offset;
4555
4556 __unpack_control(chan, skb);
4557
4558 control = __get_control(chan, skb->data);
4559 skb_pull(skb, __ctrl_size(chan));
4560 len = skb->len;
4561
4562 /*
4563 * We can just drop the corrupted I-frame here.
4564 * Receiver will miss it and start proper recovery
4565 * procedures and ask retransmission.
4566 */
4567 if (l2cap_check_fcs(chan, skb))
4568 goto drop;
4569
4570 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4571 len -= L2CAP_SDULEN_SIZE;
4572
4573 if (chan->fcs == L2CAP_FCS_CRC16)
4574 len -= L2CAP_FCS_SIZE;
4575
4576 if (len > chan->mps) {
4577 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4578 goto drop;
4579 }
4580
4581 req_seq = __get_reqseq(chan, control);
4582
4583 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4584
4585 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4586 chan->expected_ack_seq);
4587
4588 /* check for invalid req-seq */
4589 if (req_seq_offset > next_tx_seq_offset) {
4590 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4591 goto drop;
4592 }
4593
4594 if (!__is_sframe(chan, control)) {
4595 if (len < 0) {
4596 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4597 goto drop;
4598 }
4599
4600 l2cap_data_channel_iframe(chan, control, skb);
4601 } else {
4602 if (len != 0) {
4603 BT_ERR("%d", len);
4604 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4605 goto drop;
4606 }
4607
4608 l2cap_data_channel_sframe(chan, control, skb);
4609 }
4610
4611 return 0;
4612
4613 drop:
4614 kfree_skb(skb);
4615 return 0;
4616 }
4617
4618 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4619 {
4620 struct l2cap_chan *chan;
4621 u32 control;
4622 u16 tx_seq;
4623 int len;
4624
4625 chan = l2cap_get_chan_by_scid(conn, cid);
4626 if (!chan) {
4627 BT_DBG("unknown cid 0x%4.4x", cid);
4628 /* Drop packet and return */
4629 kfree_skb(skb);
4630 return 0;
4631 }
4632
4633 BT_DBG("chan %p, len %d", chan, skb->len);
4634
4635 if (chan->state != BT_CONNECTED)
4636 goto drop;
4637
4638 switch (chan->mode) {
4639 case L2CAP_MODE_BASIC:
4640 /* If socket recv buffers overflows we drop data here
4641 * which is *bad* because L2CAP has to be reliable.
4642 * But we don't have any other choice. L2CAP doesn't
4643 * provide flow control mechanism. */
4644
4645 if (chan->imtu < skb->len)
4646 goto drop;
4647
4648 if (!chan->ops->recv(chan->data, skb))
4649 goto done;
4650 break;
4651
4652 case L2CAP_MODE_ERTM:
4653 l2cap_ertm_data_rcv(chan, skb);
4654
4655 goto done;
4656
4657 case L2CAP_MODE_STREAMING:
4658 control = __get_control(chan, skb->data);
4659 skb_pull(skb, __ctrl_size(chan));
4660 len = skb->len;
4661
4662 if (l2cap_check_fcs(chan, skb))
4663 goto drop;
4664
4665 if (__is_sar_start(chan, control))
4666 len -= L2CAP_SDULEN_SIZE;
4667
4668 if (chan->fcs == L2CAP_FCS_CRC16)
4669 len -= L2CAP_FCS_SIZE;
4670
4671 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4672 goto drop;
4673
4674 tx_seq = __get_txseq(chan, control);
4675
4676 if (chan->expected_tx_seq != tx_seq) {
4677 /* Frame(s) missing - must discard partial SDU */
4678 kfree_skb(chan->sdu);
4679 chan->sdu = NULL;
4680 chan->sdu_last_frag = NULL;
4681 chan->sdu_len = 0;
4682
4683 /* TODO: Notify userland of missing data */
4684 }
4685
4686 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4687
4688 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4689 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4690
4691 goto done;
4692
4693 default:
4694 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4695 break;
4696 }
4697
4698 drop:
4699 kfree_skb(skb);
4700
4701 done:
4702 l2cap_chan_unlock(chan);
4703
4704 return 0;
4705 }
4706
4707 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4708 {
4709 struct l2cap_chan *chan;
4710
4711 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4712 if (!chan)
4713 goto drop;
4714
4715 BT_DBG("chan %p, len %d", chan, skb->len);
4716
4717 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4718 goto drop;
4719
4720 if (chan->imtu < skb->len)
4721 goto drop;
4722
4723 if (!chan->ops->recv(chan->data, skb))
4724 return 0;
4725
4726 drop:
4727 kfree_skb(skb);
4728
4729 return 0;
4730 }
4731
4732 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4733 struct sk_buff *skb)
4734 {
4735 struct l2cap_chan *chan;
4736
4737 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4738 if (!chan)
4739 goto drop;
4740
4741 BT_DBG("chan %p, len %d", chan, skb->len);
4742
4743 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4744 goto drop;
4745
4746 if (chan->imtu < skb->len)
4747 goto drop;
4748
4749 if (!chan->ops->recv(chan->data, skb))
4750 return 0;
4751
4752 drop:
4753 kfree_skb(skb);
4754
4755 return 0;
4756 }
4757
4758 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4759 {
4760 struct l2cap_hdr *lh = (void *) skb->data;
4761 u16 cid, len;
4762 __le16 psm;
4763
4764 skb_pull(skb, L2CAP_HDR_SIZE);
4765 cid = __le16_to_cpu(lh->cid);
4766 len = __le16_to_cpu(lh->len);
4767
4768 if (len != skb->len) {
4769 kfree_skb(skb);
4770 return;
4771 }
4772
4773 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4774
4775 switch (cid) {
4776 case L2CAP_CID_LE_SIGNALING:
4777 case L2CAP_CID_SIGNALING:
4778 l2cap_sig_channel(conn, skb);
4779 break;
4780
4781 case L2CAP_CID_CONN_LESS:
4782 psm = get_unaligned((__le16 *) skb->data);
4783 skb_pull(skb, 2);
4784 l2cap_conless_channel(conn, psm, skb);
4785 break;
4786
4787 case L2CAP_CID_LE_DATA:
4788 l2cap_att_channel(conn, cid, skb);
4789 break;
4790
4791 case L2CAP_CID_SMP:
4792 if (smp_sig_channel(conn, skb))
4793 l2cap_conn_del(conn->hcon, EACCES);
4794 break;
4795
4796 default:
4797 l2cap_data_channel(conn, cid, skb);
4798 break;
4799 }
4800 }
4801
4802 /* ---- L2CAP interface with lower layer (HCI) ---- */
4803
4804 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4805 {
4806 int exact = 0, lm1 = 0, lm2 = 0;
4807 struct l2cap_chan *c;
4808
4809 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4810
4811 /* Find listening sockets and check their link_mode */
4812 read_lock(&chan_list_lock);
4813 list_for_each_entry(c, &chan_list, global_l) {
4814 struct sock *sk = c->sk;
4815
4816 if (c->state != BT_LISTEN)
4817 continue;
4818
4819 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4820 lm1 |= HCI_LM_ACCEPT;
4821 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4822 lm1 |= HCI_LM_MASTER;
4823 exact++;
4824 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4825 lm2 |= HCI_LM_ACCEPT;
4826 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4827 lm2 |= HCI_LM_MASTER;
4828 }
4829 }
4830 read_unlock(&chan_list_lock);
4831
4832 return exact ? lm1 : lm2;
4833 }
4834
4835 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4836 {
4837 struct l2cap_conn *conn;
4838
4839 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4840
4841 if (!status) {
4842 conn = l2cap_conn_add(hcon, status);
4843 if (conn)
4844 l2cap_conn_ready(conn);
4845 } else
4846 l2cap_conn_del(hcon, bt_to_errno(status));
4847
4848 return 0;
4849 }
4850
4851 int l2cap_disconn_ind(struct hci_conn *hcon)
4852 {
4853 struct l2cap_conn *conn = hcon->l2cap_data;
4854
4855 BT_DBG("hcon %p", hcon);
4856
4857 if (!conn)
4858 return HCI_ERROR_REMOTE_USER_TERM;
4859 return conn->disc_reason;
4860 }
4861
4862 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4863 {
4864 BT_DBG("hcon %p reason %d", hcon, reason);
4865
4866 l2cap_conn_del(hcon, bt_to_errno(reason));
4867 return 0;
4868 }
4869
4870 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4871 {
4872 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4873 return;
4874
4875 if (encrypt == 0x00) {
4876 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4877 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4878 } else if (chan->sec_level == BT_SECURITY_HIGH)
4879 l2cap_chan_close(chan, ECONNREFUSED);
4880 } else {
4881 if (chan->sec_level == BT_SECURITY_MEDIUM)
4882 __clear_chan_timer(chan);
4883 }
4884 }
4885
4886 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4887 {
4888 struct l2cap_conn *conn = hcon->l2cap_data;
4889 struct l2cap_chan *chan;
4890
4891 if (!conn)
4892 return 0;
4893
4894 BT_DBG("conn %p", conn);
4895
4896 if (hcon->type == LE_LINK) {
4897 if (!status && encrypt)
4898 smp_distribute_keys(conn, 0);
4899 cancel_delayed_work(&conn->security_timer);
4900 }
4901
4902 mutex_lock(&conn->chan_lock);
4903
4904 list_for_each_entry(chan, &conn->chan_l, list) {
4905 l2cap_chan_lock(chan);
4906
4907 BT_DBG("chan->scid %d", chan->scid);
4908
4909 if (chan->scid == L2CAP_CID_LE_DATA) {
4910 if (!status && encrypt) {
4911 chan->sec_level = hcon->sec_level;
4912 l2cap_chan_ready(chan);
4913 }
4914
4915 l2cap_chan_unlock(chan);
4916 continue;
4917 }
4918
4919 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4920 l2cap_chan_unlock(chan);
4921 continue;
4922 }
4923
4924 if (!status && (chan->state == BT_CONNECTED ||
4925 chan->state == BT_CONFIG)) {
4926 struct sock *sk = chan->sk;
4927
4928 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4929 sk->sk_state_change(sk);
4930
4931 l2cap_check_encryption(chan, encrypt);
4932 l2cap_chan_unlock(chan);
4933 continue;
4934 }
4935
4936 if (chan->state == BT_CONNECT) {
4937 if (!status) {
4938 l2cap_send_conn_req(chan);
4939 } else {
4940 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4941 }
4942 } else if (chan->state == BT_CONNECT2) {
4943 struct sock *sk = chan->sk;
4944 struct l2cap_conn_rsp rsp;
4945 __u16 res, stat;
4946
4947 lock_sock(sk);
4948
4949 if (!status) {
4950 if (test_bit(BT_SK_DEFER_SETUP,
4951 &bt_sk(sk)->flags)) {
4952 struct sock *parent = bt_sk(sk)->parent;
4953 res = L2CAP_CR_PEND;
4954 stat = L2CAP_CS_AUTHOR_PEND;
4955 if (parent)
4956 parent->sk_data_ready(parent, 0);
4957 } else {
4958 __l2cap_state_change(chan, BT_CONFIG);
4959 res = L2CAP_CR_SUCCESS;
4960 stat = L2CAP_CS_NO_INFO;
4961 }
4962 } else {
4963 __l2cap_state_change(chan, BT_DISCONN);
4964 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4965 res = L2CAP_CR_SEC_BLOCK;
4966 stat = L2CAP_CS_NO_INFO;
4967 }
4968
4969 release_sock(sk);
4970
4971 rsp.scid = cpu_to_le16(chan->dcid);
4972 rsp.dcid = cpu_to_le16(chan->scid);
4973 rsp.result = cpu_to_le16(res);
4974 rsp.status = cpu_to_le16(stat);
4975 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4976 sizeof(rsp), &rsp);
4977 }
4978
4979 l2cap_chan_unlock(chan);
4980 }
4981
4982 mutex_unlock(&conn->chan_lock);
4983
4984 return 0;
4985 }
4986
4987 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4988 {
4989 struct l2cap_conn *conn = hcon->l2cap_data;
4990
4991 if (!conn)
4992 conn = l2cap_conn_add(hcon, 0);
4993
4994 if (!conn)
4995 goto drop;
4996
4997 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4998
4999 if (!(flags & ACL_CONT)) {
5000 struct l2cap_hdr *hdr;
5001 int len;
5002
5003 if (conn->rx_len) {
5004 BT_ERR("Unexpected start frame (len %d)", skb->len);
5005 kfree_skb(conn->rx_skb);
5006 conn->rx_skb = NULL;
5007 conn->rx_len = 0;
5008 l2cap_conn_unreliable(conn, ECOMM);
5009 }
5010
5011 /* Start fragment always begin with Basic L2CAP header */
5012 if (skb->len < L2CAP_HDR_SIZE) {
5013 BT_ERR("Frame is too short (len %d)", skb->len);
5014 l2cap_conn_unreliable(conn, ECOMM);
5015 goto drop;
5016 }
5017
5018 hdr = (struct l2cap_hdr *) skb->data;
5019 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5020
5021 if (len == skb->len) {
5022 /* Complete frame received */
5023 l2cap_recv_frame(conn, skb);
5024 return 0;
5025 }
5026
5027 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5028
5029 if (skb->len > len) {
5030 BT_ERR("Frame is too long (len %d, expected len %d)",
5031 skb->len, len);
5032 l2cap_conn_unreliable(conn, ECOMM);
5033 goto drop;
5034 }
5035
5036 /* Allocate skb for the complete frame (with header) */
5037 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5038 if (!conn->rx_skb)
5039 goto drop;
5040
5041 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5042 skb->len);
5043 conn->rx_len = len - skb->len;
5044 } else {
5045 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5046
5047 if (!conn->rx_len) {
5048 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5049 l2cap_conn_unreliable(conn, ECOMM);
5050 goto drop;
5051 }
5052
5053 if (skb->len > conn->rx_len) {
5054 BT_ERR("Fragment is too long (len %d, expected %d)",
5055 skb->len, conn->rx_len);
5056 kfree_skb(conn->rx_skb);
5057 conn->rx_skb = NULL;
5058 conn->rx_len = 0;
5059 l2cap_conn_unreliable(conn, ECOMM);
5060 goto drop;
5061 }
5062
5063 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5064 skb->len);
5065 conn->rx_len -= skb->len;
5066
5067 if (!conn->rx_len) {
5068 /* Complete frame received */
5069 l2cap_recv_frame(conn, conn->rx_skb);
5070 conn->rx_skb = NULL;
5071 }
5072 }
5073
5074 drop:
5075 kfree_skb(skb);
5076 return 0;
5077 }
5078
5079 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5080 {
5081 struct l2cap_chan *c;
5082
5083 read_lock(&chan_list_lock);
5084
5085 list_for_each_entry(c, &chan_list, global_l) {
5086 struct sock *sk = c->sk;
5087
5088 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5089 batostr(&bt_sk(sk)->src),
5090 batostr(&bt_sk(sk)->dst),
5091 c->state, __le16_to_cpu(c->psm),
5092 c->scid, c->dcid, c->imtu, c->omtu,
5093 c->sec_level, c->mode);
5094 }
5095
5096 read_unlock(&chan_list_lock);
5097
5098 return 0;
5099 }
5100
5101 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5102 {
5103 return single_open(file, l2cap_debugfs_show, inode->i_private);
5104 }
5105
5106 static const struct file_operations l2cap_debugfs_fops = {
5107 .open = l2cap_debugfs_open,
5108 .read = seq_read,
5109 .llseek = seq_lseek,
5110 .release = single_release,
5111 };
5112
5113 static struct dentry *l2cap_debugfs;
5114
5115 int __init l2cap_init(void)
5116 {
5117 int err;
5118
5119 err = l2cap_init_sockets();
5120 if (err < 0)
5121 return err;
5122
5123 if (bt_debugfs) {
5124 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5125 bt_debugfs, NULL, &l2cap_debugfs_fops);
5126 if (!l2cap_debugfs)
5127 BT_ERR("Failed to create L2CAP debug file");
5128 }
5129
5130 return 0;
5131 }
5132
5133 void l2cap_exit(void)
5134 {
5135 debugfs_remove(l2cap_debugfs);
5136 l2cap_cleanup_sockets();
5137 }
5138
5139 module_param(disable_ertm, bool, 0644);
5140 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");