]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/l2cap_core.c
Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
27 */
28
29 /* Bluetooth L2CAP core. */
30
31 #include <linux/module.h>
32
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
51 #include <net/sock.h>
52
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 /* ---- L2CAP channels ---- */
77
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 if (c->dcid == cid)
84 return c;
85 }
86 return NULL;
87 }
88
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90 {
91 struct l2cap_chan *c;
92
93 list_for_each_entry(c, &conn->chan_l, list) {
94 if (c->scid == cid)
95 return c;
96 }
97 return NULL;
98 }
99
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 if (c)
109 l2cap_chan_lock(c);
110 mutex_unlock(&conn->chan_lock);
111
112 return c;
113 }
114
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
116 {
117 struct l2cap_chan *c;
118
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
121 return c;
122 }
123 return NULL;
124 }
125
126 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
127 {
128 struct l2cap_chan *c;
129
130 list_for_each_entry(c, &chan_list, global_l) {
131 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
132 return c;
133 }
134 return NULL;
135 }
136
137 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
138 {
139 int err;
140
141 write_lock(&chan_list_lock);
142
143 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
144 err = -EADDRINUSE;
145 goto done;
146 }
147
148 if (psm) {
149 chan->psm = psm;
150 chan->sport = psm;
151 err = 0;
152 } else {
153 u16 p;
154
155 err = -EINVAL;
156 for (p = 0x1001; p < 0x1100; p += 2)
157 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
158 chan->psm = cpu_to_le16(p);
159 chan->sport = cpu_to_le16(p);
160 err = 0;
161 break;
162 }
163 }
164
165 done:
166 write_unlock(&chan_list_lock);
167 return err;
168 }
169
170 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
171 {
172 write_lock(&chan_list_lock);
173
174 chan->scid = scid;
175
176 write_unlock(&chan_list_lock);
177
178 return 0;
179 }
180
181 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
182 {
183 u16 cid = L2CAP_CID_DYN_START;
184
185 for (; cid < L2CAP_CID_DYN_END; cid++) {
186 if (!__l2cap_get_chan_by_scid(conn, cid))
187 return cid;
188 }
189
190 return 0;
191 }
192
193 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
194 {
195 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
196 state_to_string(state));
197
198 chan->state = state;
199 chan->ops->state_change(chan->data, state);
200 }
201
202 static void l2cap_state_change(struct l2cap_chan *chan, int state)
203 {
204 struct sock *sk = chan->sk;
205
206 lock_sock(sk);
207 __l2cap_state_change(chan, state);
208 release_sock(sk);
209 }
210
211 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
212 {
213 struct sock *sk = chan->sk;
214
215 sk->sk_err = err;
216 }
217
218 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
219 {
220 struct sock *sk = chan->sk;
221
222 lock_sock(sk);
223 __l2cap_chan_set_err(chan, err);
224 release_sock(sk);
225 }
226
227 /* ---- L2CAP sequence number lists ---- */
228
229 /* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
235 * allocs or frees.
236 */
237
238 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
239 {
240 size_t alloc_size, i;
241
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
245 */
246 alloc_size = roundup_pow_of_two(size);
247
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
249 if (!seq_list->list)
250 return -ENOMEM;
251
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
257
258 return 0;
259 }
260
261 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
262 {
263 kfree(seq_list->list);
264 }
265
266 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
267 u16 seq)
268 {
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
271 }
272
273 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
274 {
275 u16 mask = seq_list->mask;
276
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
284
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
288 }
289 } else {
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
296 }
297
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
303 }
304 return seq;
305 }
306
307 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
308 {
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
311 }
312
313 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
314 {
315 u16 i;
316
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
318 return;
319
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
322
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325 }
326
327 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
328 {
329 u16 mask = seq_list->mask;
330
331 /* All appends happen in constant time */
332
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
334 return;
335
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
338 else
339 seq_list->list[seq_list->tail & mask] = seq;
340
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
343 }
344
345 static void l2cap_chan_timeout(struct work_struct *work)
346 {
347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
348 chan_timer.work);
349 struct l2cap_conn *conn = chan->conn;
350 int reason;
351
352 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
353
354 mutex_lock(&conn->chan_lock);
355 l2cap_chan_lock(chan);
356
357 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
358 reason = ECONNREFUSED;
359 else if (chan->state == BT_CONNECT &&
360 chan->sec_level != BT_SECURITY_SDP)
361 reason = ECONNREFUSED;
362 else
363 reason = ETIMEDOUT;
364
365 l2cap_chan_close(chan, reason);
366
367 l2cap_chan_unlock(chan);
368
369 chan->ops->close(chan->data);
370 mutex_unlock(&conn->chan_lock);
371
372 l2cap_chan_put(chan);
373 }
374
375 struct l2cap_chan *l2cap_chan_create(void)
376 {
377 struct l2cap_chan *chan;
378
379 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
380 if (!chan)
381 return NULL;
382
383 mutex_init(&chan->lock);
384
385 write_lock(&chan_list_lock);
386 list_add(&chan->global_l, &chan_list);
387 write_unlock(&chan_list_lock);
388
389 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
390
391 chan->state = BT_OPEN;
392
393 atomic_set(&chan->refcnt, 1);
394
395 BT_DBG("chan %p", chan);
396
397 return chan;
398 }
399
400 void l2cap_chan_destroy(struct l2cap_chan *chan)
401 {
402 write_lock(&chan_list_lock);
403 list_del(&chan->global_l);
404 write_unlock(&chan_list_lock);
405
406 l2cap_chan_put(chan);
407 }
408
409 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
410 {
411 chan->fcs = L2CAP_FCS_CRC16;
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW;
416
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
418 }
419
420 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
421 {
422 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
423 __le16_to_cpu(chan->psm), chan->dcid);
424
425 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
426
427 chan->conn = conn;
428
429 switch (chan->chan_type) {
430 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) {
432 /* LE connection */
433 chan->omtu = L2CAP_LE_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA;
436 } else {
437 /* Alloc CID for connection-oriented socket */
438 chan->scid = l2cap_alloc_cid(conn);
439 chan->omtu = L2CAP_DEFAULT_MTU;
440 }
441 break;
442
443 case L2CAP_CHAN_CONN_LESS:
444 /* Connectionless socket */
445 chan->scid = L2CAP_CID_CONN_LESS;
446 chan->dcid = L2CAP_CID_CONN_LESS;
447 chan->omtu = L2CAP_DEFAULT_MTU;
448 break;
449
450 default:
451 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING;
453 chan->dcid = L2CAP_CID_SIGNALING;
454 chan->omtu = L2CAP_DEFAULT_MTU;
455 }
456
457 chan->local_id = L2CAP_BESTEFFORT_ID;
458 chan->local_stype = L2CAP_SERV_BESTEFFORT;
459 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
460 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
461 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
462 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
463
464 l2cap_chan_hold(chan);
465
466 list_add(&chan->list, &conn->chan_l);
467 }
468
469 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
470 {
471 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock);
474 }
475
476 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
477 {
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
481
482 __clear_chan_timer(chan);
483
484 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
485
486 if (conn) {
487 /* Delete from channel list */
488 list_del(&chan->list);
489
490 l2cap_chan_put(chan);
491
492 chan->conn = NULL;
493 hci_conn_put(conn->hcon);
494 }
495
496 lock_sock(sk);
497
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
500
501 if (err)
502 __l2cap_chan_set_err(chan, err);
503
504 if (parent) {
505 bt_accept_unlink(sk);
506 parent->sk_data_ready(parent, 0);
507 } else
508 sk->sk_state_change(sk);
509
510 release_sock(sk);
511
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 return;
515
516 skb_queue_purge(&chan->tx_q);
517
518 if (chan->mode == L2CAP_MODE_ERTM) {
519 struct srej_list *l, *tmp;
520
521 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan);
524
525 skb_queue_purge(&chan->srej_q);
526
527 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
530 list_del(&l->list);
531 kfree(l);
532 }
533 }
534 }
535
536 static void l2cap_chan_cleanup_listen(struct sock *parent)
537 {
538 struct sock *sk;
539
540 BT_DBG("parent %p", parent);
541
542 /* Close not yet accepted channels */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
545
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
550
551 chan->ops->close(chan->data);
552 }
553 }
554
555 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
556 {
557 struct l2cap_conn *conn = chan->conn;
558 struct sock *sk = chan->sk;
559
560 BT_DBG("chan %p state %s sk %p", chan,
561 state_to_string(chan->state), sk);
562
563 switch (chan->state) {
564 case BT_LISTEN:
565 lock_sock(sk);
566 l2cap_chan_cleanup_listen(sk);
567
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
570 release_sock(sk);
571 break;
572
573 case BT_CONNECTED:
574 case BT_CONFIG:
575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
576 conn->hcon->type == ACL_LINK) {
577 __set_chan_timer(chan, sk->sk_sndtimeo);
578 l2cap_send_disconn_req(conn, chan, reason);
579 } else
580 l2cap_chan_del(chan, reason);
581 break;
582
583 case BT_CONNECT2:
584 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
585 conn->hcon->type == ACL_LINK) {
586 struct l2cap_conn_rsp rsp;
587 __u16 result;
588
589 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
590 result = L2CAP_CR_SEC_BLOCK;
591 else
592 result = L2CAP_CR_BAD_PSM;
593 l2cap_state_change(chan, BT_DISCONN);
594
595 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
600 sizeof(rsp), &rsp);
601 }
602
603 l2cap_chan_del(chan, reason);
604 break;
605
606 case BT_CONNECT:
607 case BT_DISCONN:
608 l2cap_chan_del(chan, reason);
609 break;
610
611 default:
612 lock_sock(sk);
613 sock_set_flag(sk, SOCK_ZAPPED);
614 release_sock(sk);
615 break;
616 }
617 }
618
619 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
620 {
621 if (chan->chan_type == L2CAP_CHAN_RAW) {
622 switch (chan->sec_level) {
623 case BT_SECURITY_HIGH:
624 return HCI_AT_DEDICATED_BONDING_MITM;
625 case BT_SECURITY_MEDIUM:
626 return HCI_AT_DEDICATED_BONDING;
627 default:
628 return HCI_AT_NO_BONDING;
629 }
630 } else if (chan->psm == cpu_to_le16(0x0001)) {
631 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP;
633
634 if (chan->sec_level == BT_SECURITY_HIGH)
635 return HCI_AT_NO_BONDING_MITM;
636 else
637 return HCI_AT_NO_BONDING;
638 } else {
639 switch (chan->sec_level) {
640 case BT_SECURITY_HIGH:
641 return HCI_AT_GENERAL_BONDING_MITM;
642 case BT_SECURITY_MEDIUM:
643 return HCI_AT_GENERAL_BONDING;
644 default:
645 return HCI_AT_NO_BONDING;
646 }
647 }
648 }
649
650 /* Service level security */
651 int l2cap_chan_check_security(struct l2cap_chan *chan)
652 {
653 struct l2cap_conn *conn = chan->conn;
654 __u8 auth_type;
655
656 auth_type = l2cap_get_auth_type(chan);
657
658 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
659 }
660
661 static u8 l2cap_get_ident(struct l2cap_conn *conn)
662 {
663 u8 id;
664
665 /* Get next available identificator.
666 * 1 - 128 are used by kernel.
667 * 129 - 199 are reserved.
668 * 200 - 254 are used by utilities like l2ping, etc.
669 */
670
671 spin_lock(&conn->lock);
672
673 if (++conn->tx_ident > 128)
674 conn->tx_ident = 1;
675
676 id = conn->tx_ident;
677
678 spin_unlock(&conn->lock);
679
680 return id;
681 }
682
683 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
684 {
685 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
686 u8 flags;
687
688 BT_DBG("code 0x%2.2x", code);
689
690 if (!skb)
691 return;
692
693 if (lmp_no_flush_capable(conn->hcon->hdev))
694 flags = ACL_START_NO_FLUSH;
695 else
696 flags = ACL_START;
697
698 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
699 skb->priority = HCI_PRIO_MAX;
700
701 hci_send_acl(conn->hchan, skb, flags);
702 }
703
704 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
705 {
706 struct hci_conn *hcon = chan->conn->hcon;
707 u16 flags;
708
709 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
710 skb->priority);
711
712 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
713 lmp_no_flush_capable(hcon->hdev))
714 flags = ACL_START_NO_FLUSH;
715 else
716 flags = ACL_START;
717
718 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
719 hci_send_acl(chan->conn->hchan, skb, flags);
720 }
721
722 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
723 {
724 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
725 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
726
727 if (enh & L2CAP_CTRL_FRAME_TYPE) {
728 /* S-Frame */
729 control->sframe = 1;
730 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
731 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
732
733 control->sar = 0;
734 control->txseq = 0;
735 } else {
736 /* I-Frame */
737 control->sframe = 0;
738 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
739 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
740
741 control->poll = 0;
742 control->super = 0;
743 }
744 }
745
746 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
747 {
748 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
749 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
750
751 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
752 /* S-Frame */
753 control->sframe = 1;
754 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
755 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
756
757 control->sar = 0;
758 control->txseq = 0;
759 } else {
760 /* I-Frame */
761 control->sframe = 0;
762 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
763 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
764
765 control->poll = 0;
766 control->super = 0;
767 }
768 }
769
770 static inline void __unpack_control(struct l2cap_chan *chan,
771 struct sk_buff *skb)
772 {
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control);
776 } else {
777 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control);
779 }
780 }
781
782 static u32 __pack_extended_control(struct l2cap_ctrl *control)
783 {
784 u32 packed;
785
786 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
787 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
788
789 if (control->sframe) {
790 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
791 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
792 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
793 } else {
794 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
795 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
796 }
797
798 return packed;
799 }
800
801 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
802 {
803 u16 packed;
804
805 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
807
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_CTRL_FRAME_TYPE;
812 } else {
813 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
815 }
816
817 return packed;
818 }
819
820 static inline void __pack_control(struct l2cap_chan *chan,
821 struct l2cap_ctrl *control,
822 struct sk_buff *skb)
823 {
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
825 put_unaligned_le32(__pack_extended_control(control),
826 skb->data + L2CAP_HDR_SIZE);
827 } else {
828 put_unaligned_le16(__pack_enhanced_control(control),
829 skb->data + L2CAP_HDR_SIZE);
830 }
831 }
832
833 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
834 {
835 struct sk_buff *skb;
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
838 int count, hlen;
839
840 if (chan->state != BT_CONNECTED)
841 return;
842
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE;
845 else
846 hlen = L2CAP_ENH_HDR_SIZE;
847
848 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE;
850
851 BT_DBG("chan %p, control 0x%8.8x", chan, control);
852
853 count = min_t(unsigned int, conn->mtu, hlen);
854
855 control |= __set_sframe(chan);
856
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
859
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
862
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
864 if (!skb)
865 return;
866
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
870
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
872
873 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 }
877
878 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb);
880 }
881
882 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
883 {
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
887 } else
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
889
890 control |= __set_reqseq(chan, chan->buffer_seq);
891
892 l2cap_send_sframe(chan, control);
893 }
894
895 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
896 {
897 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
898 }
899
900 static void l2cap_send_conn_req(struct l2cap_chan *chan)
901 {
902 struct l2cap_conn *conn = chan->conn;
903 struct l2cap_conn_req req;
904
905 req.scid = cpu_to_le16(chan->scid);
906 req.psm = chan->psm;
907
908 chan->ident = l2cap_get_ident(conn);
909
910 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
911
912 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
913 }
914
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
916 {
917 struct sock *sk = chan->sk;
918 struct sock *parent;
919
920 lock_sock(sk);
921
922 parent = bt_sk(sk)->parent;
923
924 BT_DBG("sk %p, parent %p", sk, parent);
925
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
928
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
931
932 if (parent)
933 parent->sk_data_ready(parent, 0);
934
935 release_sock(sk);
936 }
937
938 static void l2cap_do_start(struct l2cap_chan *chan)
939 {
940 struct l2cap_conn *conn = chan->conn;
941
942 if (conn->hcon->type == LE_LINK) {
943 l2cap_chan_ready(chan);
944 return;
945 }
946
947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
948 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
949 return;
950
951 if (l2cap_chan_check_security(chan) &&
952 __l2cap_no_conn_pending(chan))
953 l2cap_send_conn_req(chan);
954 } else {
955 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
957
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn);
960
961 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
962
963 l2cap_send_cmd(conn, conn->info_ident,
964 L2CAP_INFO_REQ, sizeof(req), &req);
965 }
966 }
967
968 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
969 {
970 u32 local_feat_mask = l2cap_feat_mask;
971 if (!disable_ertm)
972 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
973
974 switch (mode) {
975 case L2CAP_MODE_ERTM:
976 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
977 case L2CAP_MODE_STREAMING:
978 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
979 default:
980 return 0x00;
981 }
982 }
983
984 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
985 {
986 struct sock *sk = chan->sk;
987 struct l2cap_disconn_req req;
988
989 if (!conn)
990 return;
991
992 if (chan->mode == L2CAP_MODE_ERTM) {
993 __clear_retrans_timer(chan);
994 __clear_monitor_timer(chan);
995 __clear_ack_timer(chan);
996 }
997
998 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1001 L2CAP_DISCONN_REQ, sizeof(req), &req);
1002
1003 lock_sock(sk);
1004 __l2cap_state_change(chan, BT_DISCONN);
1005 __l2cap_chan_set_err(chan, err);
1006 release_sock(sk);
1007 }
1008
1009 /* ---- L2CAP connections ---- */
1010 static void l2cap_conn_start(struct l2cap_conn *conn)
1011 {
1012 struct l2cap_chan *chan, *tmp;
1013
1014 BT_DBG("conn %p", conn);
1015
1016 mutex_lock(&conn->chan_lock);
1017
1018 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1019 struct sock *sk = chan->sk;
1020
1021 l2cap_chan_lock(chan);
1022
1023 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1024 l2cap_chan_unlock(chan);
1025 continue;
1026 }
1027
1028 if (chan->state == BT_CONNECT) {
1029 if (!l2cap_chan_check_security(chan) ||
1030 !__l2cap_no_conn_pending(chan)) {
1031 l2cap_chan_unlock(chan);
1032 continue;
1033 }
1034
1035 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1036 && test_bit(CONF_STATE2_DEVICE,
1037 &chan->conf_state)) {
1038 l2cap_chan_close(chan, ECONNRESET);
1039 l2cap_chan_unlock(chan);
1040 continue;
1041 }
1042
1043 l2cap_send_conn_req(chan);
1044
1045 } else if (chan->state == BT_CONNECT2) {
1046 struct l2cap_conn_rsp rsp;
1047 char buf[128];
1048 rsp.scid = cpu_to_le16(chan->dcid);
1049 rsp.dcid = cpu_to_le16(chan->scid);
1050
1051 if (l2cap_chan_check_security(chan)) {
1052 lock_sock(sk);
1053 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1058 if (parent)
1059 parent->sk_data_ready(parent, 0);
1060
1061 } else {
1062 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1065 }
1066 release_sock(sk);
1067 } else {
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1070 }
1071
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1073 sizeof(rsp), &rsp);
1074
1075 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1076 rsp.result != L2CAP_CR_SUCCESS) {
1077 l2cap_chan_unlock(chan);
1078 continue;
1079 }
1080
1081 set_bit(CONF_REQ_SENT, &chan->conf_state);
1082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1083 l2cap_build_conf_req(chan, buf), buf);
1084 chan->num_conf_req++;
1085 }
1086
1087 l2cap_chan_unlock(chan);
1088 }
1089
1090 mutex_unlock(&conn->chan_lock);
1091 }
1092
1093 /* Find socket with cid and source/destination bdaddr.
1094 * Returns closest match, locked.
1095 */
1096 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1097 bdaddr_t *src,
1098 bdaddr_t *dst)
1099 {
1100 struct l2cap_chan *c, *c1 = NULL;
1101
1102 read_lock(&chan_list_lock);
1103
1104 list_for_each_entry(c, &chan_list, global_l) {
1105 struct sock *sk = c->sk;
1106
1107 if (state && c->state != state)
1108 continue;
1109
1110 if (c->scid == cid) {
1111 int src_match, dst_match;
1112 int src_any, dst_any;
1113
1114 /* Exact match. */
1115 src_match = !bacmp(&bt_sk(sk)->src, src);
1116 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1117 if (src_match && dst_match) {
1118 read_unlock(&chan_list_lock);
1119 return c;
1120 }
1121
1122 /* Closest match */
1123 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1124 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1125 if ((src_match && dst_any) || (src_any && dst_match) ||
1126 (src_any && dst_any))
1127 c1 = c;
1128 }
1129 }
1130
1131 read_unlock(&chan_list_lock);
1132
1133 return c1;
1134 }
1135
1136 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1137 {
1138 struct sock *parent, *sk;
1139 struct l2cap_chan *chan, *pchan;
1140
1141 BT_DBG("");
1142
1143 /* Check if we have socket listening on cid */
1144 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1145 conn->src, conn->dst);
1146 if (!pchan)
1147 return;
1148
1149 parent = pchan->sk;
1150
1151 lock_sock(parent);
1152
1153 /* Check for backlog size */
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 goto clean;
1157 }
1158
1159 chan = pchan->ops->new_connection(pchan->data);
1160 if (!chan)
1161 goto clean;
1162
1163 sk = chan->sk;
1164
1165 hci_conn_hold(conn->hcon);
1166
1167 bacpy(&bt_sk(sk)->src, conn->src);
1168 bacpy(&bt_sk(sk)->dst, conn->dst);
1169
1170 bt_accept_enqueue(parent, sk);
1171
1172 l2cap_chan_add(conn, chan);
1173
1174 __set_chan_timer(chan, sk->sk_sndtimeo);
1175
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1178
1179 clean:
1180 release_sock(parent);
1181 }
1182
1183 static void l2cap_conn_ready(struct l2cap_conn *conn)
1184 {
1185 struct l2cap_chan *chan;
1186
1187 BT_DBG("conn %p", conn);
1188
1189 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1190 l2cap_le_conn_ready(conn);
1191
1192 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1193 smp_conn_security(conn, conn->hcon->pending_sec_level);
1194
1195 mutex_lock(&conn->chan_lock);
1196
1197 list_for_each_entry(chan, &conn->chan_l, list) {
1198
1199 l2cap_chan_lock(chan);
1200
1201 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan);
1204
1205 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1206 struct sock *sk = chan->sk;
1207 __clear_chan_timer(chan);
1208 lock_sock(sk);
1209 __l2cap_state_change(chan, BT_CONNECTED);
1210 sk->sk_state_change(sk);
1211 release_sock(sk);
1212
1213 } else if (chan->state == BT_CONNECT)
1214 l2cap_do_start(chan);
1215
1216 l2cap_chan_unlock(chan);
1217 }
1218
1219 mutex_unlock(&conn->chan_lock);
1220 }
1221
1222 /* Notify sockets that we cannot guaranty reliability anymore */
1223 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1224 {
1225 struct l2cap_chan *chan;
1226
1227 BT_DBG("conn %p", conn);
1228
1229 mutex_lock(&conn->chan_lock);
1230
1231 list_for_each_entry(chan, &conn->chan_l, list) {
1232 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1233 __l2cap_chan_set_err(chan, err);
1234 }
1235
1236 mutex_unlock(&conn->chan_lock);
1237 }
1238
1239 static void l2cap_info_timeout(struct work_struct *work)
1240 {
1241 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1242 info_timer.work);
1243
1244 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1245 conn->info_ident = 0;
1246
1247 l2cap_conn_start(conn);
1248 }
1249
1250 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1251 {
1252 struct l2cap_conn *conn = hcon->l2cap_data;
1253 struct l2cap_chan *chan, *l;
1254
1255 if (!conn)
1256 return;
1257
1258 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1259
1260 kfree_skb(conn->rx_skb);
1261
1262 mutex_lock(&conn->chan_lock);
1263
1264 /* Kill channels */
1265 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1266 l2cap_chan_hold(chan);
1267 l2cap_chan_lock(chan);
1268
1269 l2cap_chan_del(chan, err);
1270
1271 l2cap_chan_unlock(chan);
1272
1273 chan->ops->close(chan->data);
1274 l2cap_chan_put(chan);
1275 }
1276
1277 mutex_unlock(&conn->chan_lock);
1278
1279 hci_chan_del(conn->hchan);
1280
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1282 cancel_delayed_work_sync(&conn->info_timer);
1283
1284 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1285 cancel_delayed_work_sync(&conn->security_timer);
1286 smp_chan_destroy(conn);
1287 }
1288
1289 hcon->l2cap_data = NULL;
1290 kfree(conn);
1291 }
1292
1293 static void security_timeout(struct work_struct *work)
1294 {
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1296 security_timer.work);
1297
1298 BT_DBG("conn %p", conn);
1299
1300 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1301 smp_chan_destroy(conn);
1302 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1303 }
1304 }
1305
1306 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1307 {
1308 struct l2cap_conn *conn = hcon->l2cap_data;
1309 struct hci_chan *hchan;
1310
1311 if (conn || status)
1312 return conn;
1313
1314 hchan = hci_chan_create(hcon);
1315 if (!hchan)
1316 return NULL;
1317
1318 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1319 if (!conn) {
1320 hci_chan_del(hchan);
1321 return NULL;
1322 }
1323
1324 hcon->l2cap_data = conn;
1325 conn->hcon = hcon;
1326 conn->hchan = hchan;
1327
1328 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1329
1330 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1331 conn->mtu = hcon->hdev->le_mtu;
1332 else
1333 conn->mtu = hcon->hdev->acl_mtu;
1334
1335 conn->src = &hcon->hdev->bdaddr;
1336 conn->dst = &hcon->dst;
1337
1338 conn->feat_mask = 0;
1339
1340 spin_lock_init(&conn->lock);
1341 mutex_init(&conn->chan_lock);
1342
1343 INIT_LIST_HEAD(&conn->chan_l);
1344
1345 if (hcon->type == LE_LINK)
1346 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1347 else
1348 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1349
1350 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1351
1352 return conn;
1353 }
1354
1355 /* ---- Socket interface ---- */
1356
1357 /* Find socket with psm and source / destination bdaddr.
1358 * Returns closest match.
1359 */
1360 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1361 bdaddr_t *src,
1362 bdaddr_t *dst)
1363 {
1364 struct l2cap_chan *c, *c1 = NULL;
1365
1366 read_lock(&chan_list_lock);
1367
1368 list_for_each_entry(c, &chan_list, global_l) {
1369 struct sock *sk = c->sk;
1370
1371 if (state && c->state != state)
1372 continue;
1373
1374 if (c->psm == psm) {
1375 int src_match, dst_match;
1376 int src_any, dst_any;
1377
1378 /* Exact match. */
1379 src_match = !bacmp(&bt_sk(sk)->src, src);
1380 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1381 if (src_match && dst_match) {
1382 read_unlock(&chan_list_lock);
1383 return c;
1384 }
1385
1386 /* Closest match */
1387 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1388 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1389 if ((src_match && dst_any) || (src_any && dst_match) ||
1390 (src_any && dst_any))
1391 c1 = c;
1392 }
1393 }
1394
1395 read_unlock(&chan_list_lock);
1396
1397 return c1;
1398 }
1399
1400 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1401 bdaddr_t *dst, u8 dst_type)
1402 {
1403 struct sock *sk = chan->sk;
1404 bdaddr_t *src = &bt_sk(sk)->src;
1405 struct l2cap_conn *conn;
1406 struct hci_conn *hcon;
1407 struct hci_dev *hdev;
1408 __u8 auth_type;
1409 int err;
1410
1411 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1412 dst_type, __le16_to_cpu(chan->psm));
1413
1414 hdev = hci_get_route(dst, src);
1415 if (!hdev)
1416 return -EHOSTUNREACH;
1417
1418 hci_dev_lock(hdev);
1419
1420 l2cap_chan_lock(chan);
1421
1422 /* PSM must be odd and lsb of upper byte must be 0 */
1423 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1424 chan->chan_type != L2CAP_CHAN_RAW) {
1425 err = -EINVAL;
1426 goto done;
1427 }
1428
1429 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1430 err = -EINVAL;
1431 goto done;
1432 }
1433
1434 switch (chan->mode) {
1435 case L2CAP_MODE_BASIC:
1436 break;
1437 case L2CAP_MODE_ERTM:
1438 case L2CAP_MODE_STREAMING:
1439 if (!disable_ertm)
1440 break;
1441 /* fall through */
1442 default:
1443 err = -ENOTSUPP;
1444 goto done;
1445 }
1446
1447 lock_sock(sk);
1448
1449 switch (sk->sk_state) {
1450 case BT_CONNECT:
1451 case BT_CONNECT2:
1452 case BT_CONFIG:
1453 /* Already connecting */
1454 err = 0;
1455 release_sock(sk);
1456 goto done;
1457
1458 case BT_CONNECTED:
1459 /* Already connected */
1460 err = -EISCONN;
1461 release_sock(sk);
1462 goto done;
1463
1464 case BT_OPEN:
1465 case BT_BOUND:
1466 /* Can connect */
1467 break;
1468
1469 default:
1470 err = -EBADFD;
1471 release_sock(sk);
1472 goto done;
1473 }
1474
1475 /* Set destination address and psm */
1476 bacpy(&bt_sk(sk)->dst, dst);
1477
1478 release_sock(sk);
1479
1480 chan->psm = psm;
1481 chan->dcid = cid;
1482
1483 auth_type = l2cap_get_auth_type(chan);
1484
1485 if (chan->dcid == L2CAP_CID_LE_DATA)
1486 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1487 chan->sec_level, auth_type);
1488 else
1489 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1490 chan->sec_level, auth_type);
1491
1492 if (IS_ERR(hcon)) {
1493 err = PTR_ERR(hcon);
1494 goto done;
1495 }
1496
1497 conn = l2cap_conn_add(hcon, 0);
1498 if (!conn) {
1499 hci_conn_put(hcon);
1500 err = -ENOMEM;
1501 goto done;
1502 }
1503
1504 if (hcon->type == LE_LINK) {
1505 err = 0;
1506
1507 if (!list_empty(&conn->chan_l)) {
1508 err = -EBUSY;
1509 hci_conn_put(hcon);
1510 }
1511
1512 if (err)
1513 goto done;
1514 }
1515
1516 /* Update source addr of the socket */
1517 bacpy(src, conn->src);
1518
1519 l2cap_chan_unlock(chan);
1520 l2cap_chan_add(conn, chan);
1521 l2cap_chan_lock(chan);
1522
1523 l2cap_state_change(chan, BT_CONNECT);
1524 __set_chan_timer(chan, sk->sk_sndtimeo);
1525
1526 if (hcon->state == BT_CONNECTED) {
1527 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 __clear_chan_timer(chan);
1529 if (l2cap_chan_check_security(chan))
1530 l2cap_state_change(chan, BT_CONNECTED);
1531 } else
1532 l2cap_do_start(chan);
1533 }
1534
1535 err = 0;
1536
1537 done:
1538 l2cap_chan_unlock(chan);
1539 hci_dev_unlock(hdev);
1540 hci_dev_put(hdev);
1541 return err;
1542 }
1543
1544 int __l2cap_wait_ack(struct sock *sk)
1545 {
1546 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1547 DECLARE_WAITQUEUE(wait, current);
1548 int err = 0;
1549 int timeo = HZ/5;
1550
1551 add_wait_queue(sk_sleep(sk), &wait);
1552 set_current_state(TASK_INTERRUPTIBLE);
1553 while (chan->unacked_frames > 0 && chan->conn) {
1554 if (!timeo)
1555 timeo = HZ/5;
1556
1557 if (signal_pending(current)) {
1558 err = sock_intr_errno(timeo);
1559 break;
1560 }
1561
1562 release_sock(sk);
1563 timeo = schedule_timeout(timeo);
1564 lock_sock(sk);
1565 set_current_state(TASK_INTERRUPTIBLE);
1566
1567 err = sock_error(sk);
1568 if (err)
1569 break;
1570 }
1571 set_current_state(TASK_RUNNING);
1572 remove_wait_queue(sk_sleep(sk), &wait);
1573 return err;
1574 }
1575
1576 static void l2cap_monitor_timeout(struct work_struct *work)
1577 {
1578 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1579 monitor_timer.work);
1580
1581 BT_DBG("chan %p", chan);
1582
1583 l2cap_chan_lock(chan);
1584
1585 if (chan->retry_count >= chan->remote_max_tx) {
1586 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1587 l2cap_chan_unlock(chan);
1588 l2cap_chan_put(chan);
1589 return;
1590 }
1591
1592 chan->retry_count++;
1593 __set_monitor_timer(chan);
1594
1595 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1596 l2cap_chan_unlock(chan);
1597 l2cap_chan_put(chan);
1598 }
1599
1600 static void l2cap_retrans_timeout(struct work_struct *work)
1601 {
1602 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1603 retrans_timer.work);
1604
1605 BT_DBG("chan %p", chan);
1606
1607 l2cap_chan_lock(chan);
1608
1609 chan->retry_count = 1;
1610 __set_monitor_timer(chan);
1611
1612 set_bit(CONN_WAIT_F, &chan->conn_state);
1613
1614 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1615
1616 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan);
1618 }
1619
1620 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1621 {
1622 struct sk_buff *skb;
1623
1624 while ((skb = skb_peek(&chan->tx_q)) &&
1625 chan->unacked_frames) {
1626 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1627 break;
1628
1629 skb = skb_dequeue(&chan->tx_q);
1630 kfree_skb(skb);
1631
1632 chan->unacked_frames--;
1633 }
1634
1635 if (!chan->unacked_frames)
1636 __clear_retrans_timer(chan);
1637 }
1638
1639 static void l2cap_streaming_send(struct l2cap_chan *chan)
1640 {
1641 struct sk_buff *skb;
1642 u32 control;
1643 u16 fcs;
1644
1645 while ((skb = skb_dequeue(&chan->tx_q))) {
1646 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1647 control |= __set_txseq(chan, chan->next_tx_seq);
1648 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1649 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1650
1651 if (chan->fcs == L2CAP_FCS_CRC16) {
1652 fcs = crc16(0, (u8 *)skb->data,
1653 skb->len - L2CAP_FCS_SIZE);
1654 put_unaligned_le16(fcs,
1655 skb->data + skb->len - L2CAP_FCS_SIZE);
1656 }
1657
1658 l2cap_do_send(chan, skb);
1659
1660 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1661 }
1662 }
1663
1664 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1665 {
1666 struct sk_buff *skb, *tx_skb;
1667 u16 fcs;
1668 u32 control;
1669
1670 skb = skb_peek(&chan->tx_q);
1671 if (!skb)
1672 return;
1673
1674 while (bt_cb(skb)->control.txseq != tx_seq) {
1675 if (skb_queue_is_last(&chan->tx_q, skb))
1676 return;
1677
1678 skb = skb_queue_next(&chan->tx_q, skb);
1679 }
1680
1681 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1682 chan->remote_max_tx) {
1683 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1684 return;
1685 }
1686
1687 tx_skb = skb_clone(skb, GFP_ATOMIC);
1688 bt_cb(skb)->control.retries++;
1689
1690 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1691 control &= __get_sar_mask(chan);
1692
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1694 control |= __set_ctrl_final(chan);
1695
1696 control |= __set_reqseq(chan, chan->buffer_seq);
1697 control |= __set_txseq(chan, tx_seq);
1698
1699 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1700
1701 if (chan->fcs == L2CAP_FCS_CRC16) {
1702 fcs = crc16(0, (u8 *)tx_skb->data,
1703 tx_skb->len - L2CAP_FCS_SIZE);
1704 put_unaligned_le16(fcs,
1705 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1706 }
1707
1708 l2cap_do_send(chan, tx_skb);
1709 }
1710
1711 static int l2cap_ertm_send(struct l2cap_chan *chan)
1712 {
1713 struct sk_buff *skb, *tx_skb;
1714 u16 fcs;
1715 u32 control;
1716 int nsent = 0;
1717
1718 if (chan->state != BT_CONNECTED)
1719 return -ENOTCONN;
1720
1721 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1722 return 0;
1723
1724 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1725
1726 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1727 chan->remote_max_tx) {
1728 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1729 break;
1730 }
1731
1732 tx_skb = skb_clone(skb, GFP_ATOMIC);
1733
1734 bt_cb(skb)->control.retries++;
1735
1736 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1737 control &= __get_sar_mask(chan);
1738
1739 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1740 control |= __set_ctrl_final(chan);
1741
1742 control |= __set_reqseq(chan, chan->buffer_seq);
1743 control |= __set_txseq(chan, chan->next_tx_seq);
1744 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1745
1746 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1747
1748 if (chan->fcs == L2CAP_FCS_CRC16) {
1749 fcs = crc16(0, (u8 *)skb->data,
1750 tx_skb->len - L2CAP_FCS_SIZE);
1751 put_unaligned_le16(fcs, skb->data +
1752 tx_skb->len - L2CAP_FCS_SIZE);
1753 }
1754
1755 l2cap_do_send(chan, tx_skb);
1756
1757 __set_retrans_timer(chan);
1758
1759 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1760
1761 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1762
1763 if (bt_cb(skb)->control.retries == 1) {
1764 chan->unacked_frames++;
1765
1766 if (!nsent++)
1767 __clear_ack_timer(chan);
1768 }
1769
1770 chan->frames_sent++;
1771
1772 if (skb_queue_is_last(&chan->tx_q, skb))
1773 chan->tx_send_head = NULL;
1774 else
1775 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1776 }
1777
1778 return nsent;
1779 }
1780
1781 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1782 {
1783 int ret;
1784
1785 if (!skb_queue_empty(&chan->tx_q))
1786 chan->tx_send_head = chan->tx_q.next;
1787
1788 chan->next_tx_seq = chan->expected_ack_seq;
1789 ret = l2cap_ertm_send(chan);
1790 return ret;
1791 }
1792
1793 static void __l2cap_send_ack(struct l2cap_chan *chan)
1794 {
1795 u32 control = 0;
1796
1797 control |= __set_reqseq(chan, chan->buffer_seq);
1798
1799 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1800 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1801 set_bit(CONN_RNR_SENT, &chan->conn_state);
1802 l2cap_send_sframe(chan, control);
1803 return;
1804 }
1805
1806 if (l2cap_ertm_send(chan) > 0)
1807 return;
1808
1809 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1810 l2cap_send_sframe(chan, control);
1811 }
1812
1813 static void l2cap_send_ack(struct l2cap_chan *chan)
1814 {
1815 __clear_ack_timer(chan);
1816 __l2cap_send_ack(chan);
1817 }
1818
1819 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1820 {
1821 struct srej_list *tail;
1822 u32 control;
1823
1824 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1825 control |= __set_ctrl_final(chan);
1826
1827 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1828 control |= __set_reqseq(chan, tail->tx_seq);
1829
1830 l2cap_send_sframe(chan, control);
1831 }
1832
1833 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1834 struct msghdr *msg, int len,
1835 int count, struct sk_buff *skb)
1836 {
1837 struct l2cap_conn *conn = chan->conn;
1838 struct sk_buff **frag;
1839 int sent = 0;
1840
1841 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1842 return -EFAULT;
1843
1844 sent += count;
1845 len -= count;
1846
1847 /* Continuation fragments (no L2CAP header) */
1848 frag = &skb_shinfo(skb)->frag_list;
1849 while (len) {
1850 struct sk_buff *tmp;
1851
1852 count = min_t(unsigned int, conn->mtu, len);
1853
1854 tmp = chan->ops->alloc_skb(chan, count,
1855 msg->msg_flags & MSG_DONTWAIT);
1856 if (IS_ERR(tmp))
1857 return PTR_ERR(tmp);
1858
1859 *frag = tmp;
1860
1861 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1862 return -EFAULT;
1863
1864 (*frag)->priority = skb->priority;
1865
1866 sent += count;
1867 len -= count;
1868
1869 skb->len += (*frag)->len;
1870 skb->data_len += (*frag)->len;
1871
1872 frag = &(*frag)->next;
1873 }
1874
1875 return sent;
1876 }
1877
1878 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1879 struct msghdr *msg, size_t len,
1880 u32 priority)
1881 {
1882 struct l2cap_conn *conn = chan->conn;
1883 struct sk_buff *skb;
1884 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1885 struct l2cap_hdr *lh;
1886
1887 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1888
1889 count = min_t(unsigned int, (conn->mtu - hlen), len);
1890
1891 skb = chan->ops->alloc_skb(chan, count + hlen,
1892 msg->msg_flags & MSG_DONTWAIT);
1893 if (IS_ERR(skb))
1894 return skb;
1895
1896 skb->priority = priority;
1897
1898 /* Create L2CAP header */
1899 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1900 lh->cid = cpu_to_le16(chan->dcid);
1901 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1902 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1903
1904 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1905 if (unlikely(err < 0)) {
1906 kfree_skb(skb);
1907 return ERR_PTR(err);
1908 }
1909 return skb;
1910 }
1911
1912 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1913 struct msghdr *msg, size_t len,
1914 u32 priority)
1915 {
1916 struct l2cap_conn *conn = chan->conn;
1917 struct sk_buff *skb;
1918 int err, count;
1919 struct l2cap_hdr *lh;
1920
1921 BT_DBG("chan %p len %d", chan, (int)len);
1922
1923 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1924
1925 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1926 msg->msg_flags & MSG_DONTWAIT);
1927 if (IS_ERR(skb))
1928 return skb;
1929
1930 skb->priority = priority;
1931
1932 /* Create L2CAP header */
1933 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1934 lh->cid = cpu_to_le16(chan->dcid);
1935 lh->len = cpu_to_le16(len);
1936
1937 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1938 if (unlikely(err < 0)) {
1939 kfree_skb(skb);
1940 return ERR_PTR(err);
1941 }
1942 return skb;
1943 }
1944
1945 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1946 struct msghdr *msg, size_t len,
1947 u16 sdulen)
1948 {
1949 struct l2cap_conn *conn = chan->conn;
1950 struct sk_buff *skb;
1951 int err, count, hlen;
1952 struct l2cap_hdr *lh;
1953
1954 BT_DBG("chan %p len %d", chan, (int)len);
1955
1956 if (!conn)
1957 return ERR_PTR(-ENOTCONN);
1958
1959 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1960 hlen = L2CAP_EXT_HDR_SIZE;
1961 else
1962 hlen = L2CAP_ENH_HDR_SIZE;
1963
1964 if (sdulen)
1965 hlen += L2CAP_SDULEN_SIZE;
1966
1967 if (chan->fcs == L2CAP_FCS_CRC16)
1968 hlen += L2CAP_FCS_SIZE;
1969
1970 count = min_t(unsigned int, (conn->mtu - hlen), len);
1971
1972 skb = chan->ops->alloc_skb(chan, count + hlen,
1973 msg->msg_flags & MSG_DONTWAIT);
1974 if (IS_ERR(skb))
1975 return skb;
1976
1977 /* Create L2CAP header */
1978 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1979 lh->cid = cpu_to_le16(chan->dcid);
1980 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1981
1982 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1983
1984 if (sdulen)
1985 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1986
1987 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1988 if (unlikely(err < 0)) {
1989 kfree_skb(skb);
1990 return ERR_PTR(err);
1991 }
1992
1993 if (chan->fcs == L2CAP_FCS_CRC16)
1994 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1995
1996 bt_cb(skb)->control.retries = 0;
1997 return skb;
1998 }
1999
2000 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2001 struct sk_buff_head *seg_queue,
2002 struct msghdr *msg, size_t len)
2003 {
2004 struct sk_buff *skb;
2005 u16 sdu_len;
2006 size_t pdu_len;
2007 int err = 0;
2008 u8 sar;
2009
2010 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2011
2012 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2013 * so fragmented skbs are not used. The HCI layer's handling
2014 * of fragmented skbs is not compatible with ERTM's queueing.
2015 */
2016
2017 /* PDU size is derived from the HCI MTU */
2018 pdu_len = chan->conn->mtu;
2019
2020 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2021
2022 /* Adjust for largest possible L2CAP overhead. */
2023 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2024
2025 /* Remote device may have requested smaller PDUs */
2026 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2027
2028 if (len <= pdu_len) {
2029 sar = L2CAP_SAR_UNSEGMENTED;
2030 sdu_len = 0;
2031 pdu_len = len;
2032 } else {
2033 sar = L2CAP_SAR_START;
2034 sdu_len = len;
2035 pdu_len -= L2CAP_SDULEN_SIZE;
2036 }
2037
2038 while (len > 0) {
2039 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2040
2041 if (IS_ERR(skb)) {
2042 __skb_queue_purge(seg_queue);
2043 return PTR_ERR(skb);
2044 }
2045
2046 bt_cb(skb)->control.sar = sar;
2047 __skb_queue_tail(seg_queue, skb);
2048
2049 len -= pdu_len;
2050 if (sdu_len) {
2051 sdu_len = 0;
2052 pdu_len += L2CAP_SDULEN_SIZE;
2053 }
2054
2055 if (len <= pdu_len) {
2056 sar = L2CAP_SAR_END;
2057 pdu_len = len;
2058 } else {
2059 sar = L2CAP_SAR_CONTINUE;
2060 }
2061 }
2062
2063 return err;
2064 }
2065
2066 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2067 u32 priority)
2068 {
2069 struct sk_buff *skb;
2070 int err;
2071 struct sk_buff_head seg_queue;
2072
2073 /* Connectionless channel */
2074 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2075 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2076 if (IS_ERR(skb))
2077 return PTR_ERR(skb);
2078
2079 l2cap_do_send(chan, skb);
2080 return len;
2081 }
2082
2083 switch (chan->mode) {
2084 case L2CAP_MODE_BASIC:
2085 /* Check outgoing MTU */
2086 if (len > chan->omtu)
2087 return -EMSGSIZE;
2088
2089 /* Create a basic PDU */
2090 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2091 if (IS_ERR(skb))
2092 return PTR_ERR(skb);
2093
2094 l2cap_do_send(chan, skb);
2095 err = len;
2096 break;
2097
2098 case L2CAP_MODE_ERTM:
2099 case L2CAP_MODE_STREAMING:
2100 /* Check outgoing MTU */
2101 if (len > chan->omtu) {
2102 err = -EMSGSIZE;
2103 break;
2104 }
2105
2106 __skb_queue_head_init(&seg_queue);
2107
2108 /* Do segmentation before calling in to the state machine,
2109 * since it's possible to block while waiting for memory
2110 * allocation.
2111 */
2112 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2113
2114 /* The channel could have been closed while segmenting,
2115 * check that it is still connected.
2116 */
2117 if (chan->state != BT_CONNECTED) {
2118 __skb_queue_purge(&seg_queue);
2119 err = -ENOTCONN;
2120 }
2121
2122 if (err)
2123 break;
2124
2125 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2126 chan->tx_send_head = seg_queue.next;
2127 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2128
2129 if (chan->mode == L2CAP_MODE_ERTM)
2130 err = l2cap_ertm_send(chan);
2131 else
2132 l2cap_streaming_send(chan);
2133
2134 if (err >= 0)
2135 err = len;
2136
2137 /* If the skbs were not queued for sending, they'll still be in
2138 * seg_queue and need to be purged.
2139 */
2140 __skb_queue_purge(&seg_queue);
2141 break;
2142
2143 default:
2144 BT_DBG("bad state %1.1x", chan->mode);
2145 err = -EBADFD;
2146 }
2147
2148 return err;
2149 }
2150
2151 /* Copy frame to all raw sockets on that connection */
2152 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2153 {
2154 struct sk_buff *nskb;
2155 struct l2cap_chan *chan;
2156
2157 BT_DBG("conn %p", conn);
2158
2159 mutex_lock(&conn->chan_lock);
2160
2161 list_for_each_entry(chan, &conn->chan_l, list) {
2162 struct sock *sk = chan->sk;
2163 if (chan->chan_type != L2CAP_CHAN_RAW)
2164 continue;
2165
2166 /* Don't send frame to the socket it came from */
2167 if (skb->sk == sk)
2168 continue;
2169 nskb = skb_clone(skb, GFP_ATOMIC);
2170 if (!nskb)
2171 continue;
2172
2173 if (chan->ops->recv(chan->data, nskb))
2174 kfree_skb(nskb);
2175 }
2176
2177 mutex_unlock(&conn->chan_lock);
2178 }
2179
2180 /* ---- L2CAP signalling commands ---- */
2181 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2182 u8 code, u8 ident, u16 dlen, void *data)
2183 {
2184 struct sk_buff *skb, **frag;
2185 struct l2cap_cmd_hdr *cmd;
2186 struct l2cap_hdr *lh;
2187 int len, count;
2188
2189 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2190 conn, code, ident, dlen);
2191
2192 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2193 count = min_t(unsigned int, conn->mtu, len);
2194
2195 skb = bt_skb_alloc(count, GFP_ATOMIC);
2196 if (!skb)
2197 return NULL;
2198
2199 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2200 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2201
2202 if (conn->hcon->type == LE_LINK)
2203 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2204 else
2205 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2206
2207 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2208 cmd->code = code;
2209 cmd->ident = ident;
2210 cmd->len = cpu_to_le16(dlen);
2211
2212 if (dlen) {
2213 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2214 memcpy(skb_put(skb, count), data, count);
2215 data += count;
2216 }
2217
2218 len -= skb->len;
2219
2220 /* Continuation fragments (no L2CAP header) */
2221 frag = &skb_shinfo(skb)->frag_list;
2222 while (len) {
2223 count = min_t(unsigned int, conn->mtu, len);
2224
2225 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2226 if (!*frag)
2227 goto fail;
2228
2229 memcpy(skb_put(*frag, count), data, count);
2230
2231 len -= count;
2232 data += count;
2233
2234 frag = &(*frag)->next;
2235 }
2236
2237 return skb;
2238
2239 fail:
2240 kfree_skb(skb);
2241 return NULL;
2242 }
2243
2244 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2245 {
2246 struct l2cap_conf_opt *opt = *ptr;
2247 int len;
2248
2249 len = L2CAP_CONF_OPT_SIZE + opt->len;
2250 *ptr += len;
2251
2252 *type = opt->type;
2253 *olen = opt->len;
2254
2255 switch (opt->len) {
2256 case 1:
2257 *val = *((u8 *) opt->val);
2258 break;
2259
2260 case 2:
2261 *val = get_unaligned_le16(opt->val);
2262 break;
2263
2264 case 4:
2265 *val = get_unaligned_le32(opt->val);
2266 break;
2267
2268 default:
2269 *val = (unsigned long) opt->val;
2270 break;
2271 }
2272
2273 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2274 return len;
2275 }
2276
2277 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2278 {
2279 struct l2cap_conf_opt *opt = *ptr;
2280
2281 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2282
2283 opt->type = type;
2284 opt->len = len;
2285
2286 switch (len) {
2287 case 1:
2288 *((u8 *) opt->val) = val;
2289 break;
2290
2291 case 2:
2292 put_unaligned_le16(val, opt->val);
2293 break;
2294
2295 case 4:
2296 put_unaligned_le32(val, opt->val);
2297 break;
2298
2299 default:
2300 memcpy(opt->val, (void *) val, len);
2301 break;
2302 }
2303
2304 *ptr += L2CAP_CONF_OPT_SIZE + len;
2305 }
2306
2307 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2308 {
2309 struct l2cap_conf_efs efs;
2310
2311 switch (chan->mode) {
2312 case L2CAP_MODE_ERTM:
2313 efs.id = chan->local_id;
2314 efs.stype = chan->local_stype;
2315 efs.msdu = cpu_to_le16(chan->local_msdu);
2316 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2317 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2318 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2319 break;
2320
2321 case L2CAP_MODE_STREAMING:
2322 efs.id = 1;
2323 efs.stype = L2CAP_SERV_BESTEFFORT;
2324 efs.msdu = cpu_to_le16(chan->local_msdu);
2325 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2326 efs.acc_lat = 0;
2327 efs.flush_to = 0;
2328 break;
2329
2330 default:
2331 return;
2332 }
2333
2334 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2335 (unsigned long) &efs);
2336 }
2337
2338 static void l2cap_ack_timeout(struct work_struct *work)
2339 {
2340 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2341 ack_timer.work);
2342
2343 BT_DBG("chan %p", chan);
2344
2345 l2cap_chan_lock(chan);
2346
2347 __l2cap_send_ack(chan);
2348
2349 l2cap_chan_unlock(chan);
2350
2351 l2cap_chan_put(chan);
2352 }
2353
2354 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2355 {
2356 int err;
2357
2358 chan->next_tx_seq = 0;
2359 chan->expected_tx_seq = 0;
2360 chan->expected_ack_seq = 0;
2361 chan->unacked_frames = 0;
2362 chan->buffer_seq = 0;
2363 chan->num_acked = 0;
2364 chan->frames_sent = 0;
2365 chan->last_acked_seq = 0;
2366 chan->sdu = NULL;
2367 chan->sdu_last_frag = NULL;
2368 chan->sdu_len = 0;
2369
2370 skb_queue_head_init(&chan->tx_q);
2371
2372 if (chan->mode != L2CAP_MODE_ERTM)
2373 return 0;
2374
2375 chan->rx_state = L2CAP_RX_STATE_RECV;
2376 chan->tx_state = L2CAP_TX_STATE_XMIT;
2377
2378 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2379 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2380 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2381
2382 skb_queue_head_init(&chan->srej_q);
2383
2384 INIT_LIST_HEAD(&chan->srej_l);
2385 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2386 if (err < 0)
2387 return err;
2388
2389 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2390 }
2391
2392 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2393 {
2394 switch (mode) {
2395 case L2CAP_MODE_STREAMING:
2396 case L2CAP_MODE_ERTM:
2397 if (l2cap_mode_supported(mode, remote_feat_mask))
2398 return mode;
2399 /* fall through */
2400 default:
2401 return L2CAP_MODE_BASIC;
2402 }
2403 }
2404
2405 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2406 {
2407 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2408 }
2409
2410 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2411 {
2412 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2413 }
2414
2415 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2416 {
2417 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2418 __l2cap_ews_supported(chan)) {
2419 /* use extended control field */
2420 set_bit(FLAG_EXT_CTRL, &chan->flags);
2421 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2422 } else {
2423 chan->tx_win = min_t(u16, chan->tx_win,
2424 L2CAP_DEFAULT_TX_WINDOW);
2425 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2426 }
2427 }
2428
2429 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2430 {
2431 struct l2cap_conf_req *req = data;
2432 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2433 void *ptr = req->data;
2434 u16 size;
2435
2436 BT_DBG("chan %p", chan);
2437
2438 if (chan->num_conf_req || chan->num_conf_rsp)
2439 goto done;
2440
2441 switch (chan->mode) {
2442 case L2CAP_MODE_STREAMING:
2443 case L2CAP_MODE_ERTM:
2444 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2445 break;
2446
2447 if (__l2cap_efs_supported(chan))
2448 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2449
2450 /* fall through */
2451 default:
2452 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2453 break;
2454 }
2455
2456 done:
2457 if (chan->imtu != L2CAP_DEFAULT_MTU)
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2459
2460 switch (chan->mode) {
2461 case L2CAP_MODE_BASIC:
2462 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2463 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2464 break;
2465
2466 rfc.mode = L2CAP_MODE_BASIC;
2467 rfc.txwin_size = 0;
2468 rfc.max_transmit = 0;
2469 rfc.retrans_timeout = 0;
2470 rfc.monitor_timeout = 0;
2471 rfc.max_pdu_size = 0;
2472
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2474 (unsigned long) &rfc);
2475 break;
2476
2477 case L2CAP_MODE_ERTM:
2478 rfc.mode = L2CAP_MODE_ERTM;
2479 rfc.max_transmit = chan->max_tx;
2480 rfc.retrans_timeout = 0;
2481 rfc.monitor_timeout = 0;
2482
2483 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2484 L2CAP_EXT_HDR_SIZE -
2485 L2CAP_SDULEN_SIZE -
2486 L2CAP_FCS_SIZE);
2487 rfc.max_pdu_size = cpu_to_le16(size);
2488
2489 l2cap_txwin_setup(chan);
2490
2491 rfc.txwin_size = min_t(u16, chan->tx_win,
2492 L2CAP_DEFAULT_TX_WINDOW);
2493
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2495 (unsigned long) &rfc);
2496
2497 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2498 l2cap_add_opt_efs(&ptr, chan);
2499
2500 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2501 break;
2502
2503 if (chan->fcs == L2CAP_FCS_NONE ||
2504 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2505 chan->fcs = L2CAP_FCS_NONE;
2506 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2507 }
2508
2509 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2511 chan->tx_win);
2512 break;
2513
2514 case L2CAP_MODE_STREAMING:
2515 rfc.mode = L2CAP_MODE_STREAMING;
2516 rfc.txwin_size = 0;
2517 rfc.max_transmit = 0;
2518 rfc.retrans_timeout = 0;
2519 rfc.monitor_timeout = 0;
2520
2521 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2522 L2CAP_EXT_HDR_SIZE -
2523 L2CAP_SDULEN_SIZE -
2524 L2CAP_FCS_SIZE);
2525 rfc.max_pdu_size = cpu_to_le16(size);
2526
2527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2528 (unsigned long) &rfc);
2529
2530 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2531 l2cap_add_opt_efs(&ptr, chan);
2532
2533 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2534 break;
2535
2536 if (chan->fcs == L2CAP_FCS_NONE ||
2537 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2538 chan->fcs = L2CAP_FCS_NONE;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2540 }
2541 break;
2542 }
2543
2544 req->dcid = cpu_to_le16(chan->dcid);
2545 req->flags = cpu_to_le16(0);
2546
2547 return ptr - data;
2548 }
2549
2550 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2551 {
2552 struct l2cap_conf_rsp *rsp = data;
2553 void *ptr = rsp->data;
2554 void *req = chan->conf_req;
2555 int len = chan->conf_len;
2556 int type, hint, olen;
2557 unsigned long val;
2558 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2559 struct l2cap_conf_efs efs;
2560 u8 remote_efs = 0;
2561 u16 mtu = L2CAP_DEFAULT_MTU;
2562 u16 result = L2CAP_CONF_SUCCESS;
2563 u16 size;
2564
2565 BT_DBG("chan %p", chan);
2566
2567 while (len >= L2CAP_CONF_OPT_SIZE) {
2568 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2569
2570 hint = type & L2CAP_CONF_HINT;
2571 type &= L2CAP_CONF_MASK;
2572
2573 switch (type) {
2574 case L2CAP_CONF_MTU:
2575 mtu = val;
2576 break;
2577
2578 case L2CAP_CONF_FLUSH_TO:
2579 chan->flush_to = val;
2580 break;
2581
2582 case L2CAP_CONF_QOS:
2583 break;
2584
2585 case L2CAP_CONF_RFC:
2586 if (olen == sizeof(rfc))
2587 memcpy(&rfc, (void *) val, olen);
2588 break;
2589
2590 case L2CAP_CONF_FCS:
2591 if (val == L2CAP_FCS_NONE)
2592 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2593 break;
2594
2595 case L2CAP_CONF_EFS:
2596 remote_efs = 1;
2597 if (olen == sizeof(efs))
2598 memcpy(&efs, (void *) val, olen);
2599 break;
2600
2601 case L2CAP_CONF_EWS:
2602 if (!enable_hs)
2603 return -ECONNREFUSED;
2604
2605 set_bit(FLAG_EXT_CTRL, &chan->flags);
2606 set_bit(CONF_EWS_RECV, &chan->conf_state);
2607 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2608 chan->remote_tx_win = val;
2609 break;
2610
2611 default:
2612 if (hint)
2613 break;
2614
2615 result = L2CAP_CONF_UNKNOWN;
2616 *((u8 *) ptr++) = type;
2617 break;
2618 }
2619 }
2620
2621 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2622 goto done;
2623
2624 switch (chan->mode) {
2625 case L2CAP_MODE_STREAMING:
2626 case L2CAP_MODE_ERTM:
2627 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2628 chan->mode = l2cap_select_mode(rfc.mode,
2629 chan->conn->feat_mask);
2630 break;
2631 }
2632
2633 if (remote_efs) {
2634 if (__l2cap_efs_supported(chan))
2635 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2636 else
2637 return -ECONNREFUSED;
2638 }
2639
2640 if (chan->mode != rfc.mode)
2641 return -ECONNREFUSED;
2642
2643 break;
2644 }
2645
2646 done:
2647 if (chan->mode != rfc.mode) {
2648 result = L2CAP_CONF_UNACCEPT;
2649 rfc.mode = chan->mode;
2650
2651 if (chan->num_conf_rsp == 1)
2652 return -ECONNREFUSED;
2653
2654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2655 sizeof(rfc), (unsigned long) &rfc);
2656 }
2657
2658 if (result == L2CAP_CONF_SUCCESS) {
2659 /* Configure output options and let the other side know
2660 * which ones we don't like. */
2661
2662 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2663 result = L2CAP_CONF_UNACCEPT;
2664 else {
2665 chan->omtu = mtu;
2666 set_bit(CONF_MTU_DONE, &chan->conf_state);
2667 }
2668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2669
2670 if (remote_efs) {
2671 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2672 efs.stype != L2CAP_SERV_NOTRAFIC &&
2673 efs.stype != chan->local_stype) {
2674
2675 result = L2CAP_CONF_UNACCEPT;
2676
2677 if (chan->num_conf_req >= 1)
2678 return -ECONNREFUSED;
2679
2680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2681 sizeof(efs),
2682 (unsigned long) &efs);
2683 } else {
2684 /* Send PENDING Conf Rsp */
2685 result = L2CAP_CONF_PENDING;
2686 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2687 }
2688 }
2689
2690 switch (rfc.mode) {
2691 case L2CAP_MODE_BASIC:
2692 chan->fcs = L2CAP_FCS_NONE;
2693 set_bit(CONF_MODE_DONE, &chan->conf_state);
2694 break;
2695
2696 case L2CAP_MODE_ERTM:
2697 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2698 chan->remote_tx_win = rfc.txwin_size;
2699 else
2700 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2701
2702 chan->remote_max_tx = rfc.max_transmit;
2703
2704 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2705 chan->conn->mtu -
2706 L2CAP_EXT_HDR_SIZE -
2707 L2CAP_SDULEN_SIZE -
2708 L2CAP_FCS_SIZE);
2709 rfc.max_pdu_size = cpu_to_le16(size);
2710 chan->remote_mps = size;
2711
2712 rfc.retrans_timeout =
2713 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2714 rfc.monitor_timeout =
2715 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2716
2717 set_bit(CONF_MODE_DONE, &chan->conf_state);
2718
2719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2720 sizeof(rfc), (unsigned long) &rfc);
2721
2722 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2723 chan->remote_id = efs.id;
2724 chan->remote_stype = efs.stype;
2725 chan->remote_msdu = le16_to_cpu(efs.msdu);
2726 chan->remote_flush_to =
2727 le32_to_cpu(efs.flush_to);
2728 chan->remote_acc_lat =
2729 le32_to_cpu(efs.acc_lat);
2730 chan->remote_sdu_itime =
2731 le32_to_cpu(efs.sdu_itime);
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2733 sizeof(efs), (unsigned long) &efs);
2734 }
2735 break;
2736
2737 case L2CAP_MODE_STREAMING:
2738 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2739 chan->conn->mtu -
2740 L2CAP_EXT_HDR_SIZE -
2741 L2CAP_SDULEN_SIZE -
2742 L2CAP_FCS_SIZE);
2743 rfc.max_pdu_size = cpu_to_le16(size);
2744 chan->remote_mps = size;
2745
2746 set_bit(CONF_MODE_DONE, &chan->conf_state);
2747
2748 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2749 sizeof(rfc), (unsigned long) &rfc);
2750
2751 break;
2752
2753 default:
2754 result = L2CAP_CONF_UNACCEPT;
2755
2756 memset(&rfc, 0, sizeof(rfc));
2757 rfc.mode = chan->mode;
2758 }
2759
2760 if (result == L2CAP_CONF_SUCCESS)
2761 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2762 }
2763 rsp->scid = cpu_to_le16(chan->dcid);
2764 rsp->result = cpu_to_le16(result);
2765 rsp->flags = cpu_to_le16(0x0000);
2766
2767 return ptr - data;
2768 }
2769
2770 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2771 {
2772 struct l2cap_conf_req *req = data;
2773 void *ptr = req->data;
2774 int type, olen;
2775 unsigned long val;
2776 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2777 struct l2cap_conf_efs efs;
2778
2779 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2780
2781 while (len >= L2CAP_CONF_OPT_SIZE) {
2782 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2783
2784 switch (type) {
2785 case L2CAP_CONF_MTU:
2786 if (val < L2CAP_DEFAULT_MIN_MTU) {
2787 *result = L2CAP_CONF_UNACCEPT;
2788 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2789 } else
2790 chan->imtu = val;
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2792 break;
2793
2794 case L2CAP_CONF_FLUSH_TO:
2795 chan->flush_to = val;
2796 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2797 2, chan->flush_to);
2798 break;
2799
2800 case L2CAP_CONF_RFC:
2801 if (olen == sizeof(rfc))
2802 memcpy(&rfc, (void *)val, olen);
2803
2804 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2805 rfc.mode != chan->mode)
2806 return -ECONNREFUSED;
2807
2808 chan->fcs = 0;
2809
2810 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2811 sizeof(rfc), (unsigned long) &rfc);
2812 break;
2813
2814 case L2CAP_CONF_EWS:
2815 chan->tx_win = min_t(u16, val,
2816 L2CAP_DEFAULT_EXT_WINDOW);
2817 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2818 chan->tx_win);
2819 break;
2820
2821 case L2CAP_CONF_EFS:
2822 if (olen == sizeof(efs))
2823 memcpy(&efs, (void *)val, olen);
2824
2825 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2826 efs.stype != L2CAP_SERV_NOTRAFIC &&
2827 efs.stype != chan->local_stype)
2828 return -ECONNREFUSED;
2829
2830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2831 sizeof(efs), (unsigned long) &efs);
2832 break;
2833 }
2834 }
2835
2836 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2837 return -ECONNREFUSED;
2838
2839 chan->mode = rfc.mode;
2840
2841 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2842 switch (rfc.mode) {
2843 case L2CAP_MODE_ERTM:
2844 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2845 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2846 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2847
2848 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2849 chan->local_msdu = le16_to_cpu(efs.msdu);
2850 chan->local_sdu_itime =
2851 le32_to_cpu(efs.sdu_itime);
2852 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2853 chan->local_flush_to =
2854 le32_to_cpu(efs.flush_to);
2855 }
2856 break;
2857
2858 case L2CAP_MODE_STREAMING:
2859 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2860 }
2861 }
2862
2863 req->dcid = cpu_to_le16(chan->dcid);
2864 req->flags = cpu_to_le16(0x0000);
2865
2866 return ptr - data;
2867 }
2868
2869 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2870 {
2871 struct l2cap_conf_rsp *rsp = data;
2872 void *ptr = rsp->data;
2873
2874 BT_DBG("chan %p", chan);
2875
2876 rsp->scid = cpu_to_le16(chan->dcid);
2877 rsp->result = cpu_to_le16(result);
2878 rsp->flags = cpu_to_le16(flags);
2879
2880 return ptr - data;
2881 }
2882
2883 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2884 {
2885 struct l2cap_conn_rsp rsp;
2886 struct l2cap_conn *conn = chan->conn;
2887 u8 buf[128];
2888
2889 rsp.scid = cpu_to_le16(chan->dcid);
2890 rsp.dcid = cpu_to_le16(chan->scid);
2891 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2892 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2893 l2cap_send_cmd(conn, chan->ident,
2894 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2895
2896 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2897 return;
2898
2899 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2900 l2cap_build_conf_req(chan, buf), buf);
2901 chan->num_conf_req++;
2902 }
2903
2904 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2905 {
2906 int type, olen;
2907 unsigned long val;
2908 struct l2cap_conf_rfc rfc;
2909
2910 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2911
2912 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2913 return;
2914
2915 while (len >= L2CAP_CONF_OPT_SIZE) {
2916 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2917
2918 if (type != L2CAP_CONF_RFC)
2919 continue;
2920
2921 if (olen != sizeof(rfc))
2922 break;
2923
2924 memcpy(&rfc, (void *)val, olen);
2925 goto done;
2926 }
2927
2928 /* Use sane default values in case a misbehaving remote device
2929 * did not send an RFC option.
2930 */
2931 rfc.mode = chan->mode;
2932 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2933 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2934 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2935
2936 BT_ERR("Expected RFC option was not found, using defaults");
2937
2938 done:
2939 switch (rfc.mode) {
2940 case L2CAP_MODE_ERTM:
2941 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2942 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2943 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2944 break;
2945 case L2CAP_MODE_STREAMING:
2946 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2947 }
2948 }
2949
2950 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2951 {
2952 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2953
2954 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2955 return 0;
2956
2957 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2958 cmd->ident == conn->info_ident) {
2959 cancel_delayed_work(&conn->info_timer);
2960
2961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2962 conn->info_ident = 0;
2963
2964 l2cap_conn_start(conn);
2965 }
2966
2967 return 0;
2968 }
2969
2970 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2971 {
2972 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2973 struct l2cap_conn_rsp rsp;
2974 struct l2cap_chan *chan = NULL, *pchan;
2975 struct sock *parent, *sk = NULL;
2976 int result, status = L2CAP_CS_NO_INFO;
2977
2978 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2979 __le16 psm = req->psm;
2980
2981 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2982
2983 /* Check if we have socket listening on psm */
2984 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2985 if (!pchan) {
2986 result = L2CAP_CR_BAD_PSM;
2987 goto sendresp;
2988 }
2989
2990 parent = pchan->sk;
2991
2992 mutex_lock(&conn->chan_lock);
2993 lock_sock(parent);
2994
2995 /* Check if the ACL is secure enough (if not SDP) */
2996 if (psm != cpu_to_le16(0x0001) &&
2997 !hci_conn_check_link_mode(conn->hcon)) {
2998 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2999 result = L2CAP_CR_SEC_BLOCK;
3000 goto response;
3001 }
3002
3003 result = L2CAP_CR_NO_MEM;
3004
3005 /* Check for backlog size */
3006 if (sk_acceptq_is_full(parent)) {
3007 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3008 goto response;
3009 }
3010
3011 chan = pchan->ops->new_connection(pchan->data);
3012 if (!chan)
3013 goto response;
3014
3015 sk = chan->sk;
3016
3017 /* Check if we already have channel with that dcid */
3018 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3019 sock_set_flag(sk, SOCK_ZAPPED);
3020 chan->ops->close(chan->data);
3021 goto response;
3022 }
3023
3024 hci_conn_hold(conn->hcon);
3025
3026 bacpy(&bt_sk(sk)->src, conn->src);
3027 bacpy(&bt_sk(sk)->dst, conn->dst);
3028 chan->psm = psm;
3029 chan->dcid = scid;
3030
3031 bt_accept_enqueue(parent, sk);
3032
3033 __l2cap_chan_add(conn, chan);
3034
3035 dcid = chan->scid;
3036
3037 __set_chan_timer(chan, sk->sk_sndtimeo);
3038
3039 chan->ident = cmd->ident;
3040
3041 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3042 if (l2cap_chan_check_security(chan)) {
3043 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3044 __l2cap_state_change(chan, BT_CONNECT2);
3045 result = L2CAP_CR_PEND;
3046 status = L2CAP_CS_AUTHOR_PEND;
3047 parent->sk_data_ready(parent, 0);
3048 } else {
3049 __l2cap_state_change(chan, BT_CONFIG);
3050 result = L2CAP_CR_SUCCESS;
3051 status = L2CAP_CS_NO_INFO;
3052 }
3053 } else {
3054 __l2cap_state_change(chan, BT_CONNECT2);
3055 result = L2CAP_CR_PEND;
3056 status = L2CAP_CS_AUTHEN_PEND;
3057 }
3058 } else {
3059 __l2cap_state_change(chan, BT_CONNECT2);
3060 result = L2CAP_CR_PEND;
3061 status = L2CAP_CS_NO_INFO;
3062 }
3063
3064 response:
3065 release_sock(parent);
3066 mutex_unlock(&conn->chan_lock);
3067
3068 sendresp:
3069 rsp.scid = cpu_to_le16(scid);
3070 rsp.dcid = cpu_to_le16(dcid);
3071 rsp.result = cpu_to_le16(result);
3072 rsp.status = cpu_to_le16(status);
3073 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3074
3075 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3076 struct l2cap_info_req info;
3077 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3078
3079 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3080 conn->info_ident = l2cap_get_ident(conn);
3081
3082 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3083
3084 l2cap_send_cmd(conn, conn->info_ident,
3085 L2CAP_INFO_REQ, sizeof(info), &info);
3086 }
3087
3088 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3089 result == L2CAP_CR_SUCCESS) {
3090 u8 buf[128];
3091 set_bit(CONF_REQ_SENT, &chan->conf_state);
3092 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3093 l2cap_build_conf_req(chan, buf), buf);
3094 chan->num_conf_req++;
3095 }
3096
3097 return 0;
3098 }
3099
3100 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3101 {
3102 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3103 u16 scid, dcid, result, status;
3104 struct l2cap_chan *chan;
3105 u8 req[128];
3106 int err;
3107
3108 scid = __le16_to_cpu(rsp->scid);
3109 dcid = __le16_to_cpu(rsp->dcid);
3110 result = __le16_to_cpu(rsp->result);
3111 status = __le16_to_cpu(rsp->status);
3112
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3114 dcid, scid, result, status);
3115
3116 mutex_lock(&conn->chan_lock);
3117
3118 if (scid) {
3119 chan = __l2cap_get_chan_by_scid(conn, scid);
3120 if (!chan) {
3121 err = -EFAULT;
3122 goto unlock;
3123 }
3124 } else {
3125 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3126 if (!chan) {
3127 err = -EFAULT;
3128 goto unlock;
3129 }
3130 }
3131
3132 err = 0;
3133
3134 l2cap_chan_lock(chan);
3135
3136 switch (result) {
3137 case L2CAP_CR_SUCCESS:
3138 l2cap_state_change(chan, BT_CONFIG);
3139 chan->ident = 0;
3140 chan->dcid = dcid;
3141 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3142
3143 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3144 break;
3145
3146 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3147 l2cap_build_conf_req(chan, req), req);
3148 chan->num_conf_req++;
3149 break;
3150
3151 case L2CAP_CR_PEND:
3152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3153 break;
3154
3155 default:
3156 l2cap_chan_del(chan, ECONNREFUSED);
3157 break;
3158 }
3159
3160 l2cap_chan_unlock(chan);
3161
3162 unlock:
3163 mutex_unlock(&conn->chan_lock);
3164
3165 return err;
3166 }
3167
3168 static inline void set_default_fcs(struct l2cap_chan *chan)
3169 {
3170 /* FCS is enabled only in ERTM or streaming mode, if one or both
3171 * sides request it.
3172 */
3173 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3174 chan->fcs = L2CAP_FCS_NONE;
3175 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3176 chan->fcs = L2CAP_FCS_CRC16;
3177 }
3178
3179 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3180 {
3181 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3182 u16 dcid, flags;
3183 u8 rsp[64];
3184 struct l2cap_chan *chan;
3185 int len, err = 0;
3186
3187 dcid = __le16_to_cpu(req->dcid);
3188 flags = __le16_to_cpu(req->flags);
3189
3190 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3191
3192 chan = l2cap_get_chan_by_scid(conn, dcid);
3193 if (!chan)
3194 return -ENOENT;
3195
3196 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3197 struct l2cap_cmd_rej_cid rej;
3198
3199 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3200 rej.scid = cpu_to_le16(chan->scid);
3201 rej.dcid = cpu_to_le16(chan->dcid);
3202
3203 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3204 sizeof(rej), &rej);
3205 goto unlock;
3206 }
3207
3208 /* Reject if config buffer is too small. */
3209 len = cmd_len - sizeof(*req);
3210 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3211 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3212 l2cap_build_conf_rsp(chan, rsp,
3213 L2CAP_CONF_REJECT, flags), rsp);
3214 goto unlock;
3215 }
3216
3217 /* Store config. */
3218 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3219 chan->conf_len += len;
3220
3221 if (flags & 0x0001) {
3222 /* Incomplete config. Send empty response. */
3223 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3224 l2cap_build_conf_rsp(chan, rsp,
3225 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3226 goto unlock;
3227 }
3228
3229 /* Complete config. */
3230 len = l2cap_parse_conf_req(chan, rsp);
3231 if (len < 0) {
3232 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3233 goto unlock;
3234 }
3235
3236 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3237 chan->num_conf_rsp++;
3238
3239 /* Reset config buffer. */
3240 chan->conf_len = 0;
3241
3242 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3243 goto unlock;
3244
3245 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3246 set_default_fcs(chan);
3247
3248 l2cap_state_change(chan, BT_CONNECTED);
3249
3250 if (chan->mode == L2CAP_MODE_ERTM ||
3251 chan->mode == L2CAP_MODE_STREAMING)
3252 err = l2cap_ertm_init(chan);
3253
3254 if (err < 0)
3255 l2cap_send_disconn_req(chan->conn, chan, -err);
3256 else
3257 l2cap_chan_ready(chan);
3258
3259 goto unlock;
3260 }
3261
3262 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3263 u8 buf[64];
3264 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3265 l2cap_build_conf_req(chan, buf), buf);
3266 chan->num_conf_req++;
3267 }
3268
3269 /* Got Conf Rsp PENDING from remote side and asume we sent
3270 Conf Rsp PENDING in the code above */
3271 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3272 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3273
3274 /* check compatibility */
3275
3276 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3277 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3278
3279 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3280 l2cap_build_conf_rsp(chan, rsp,
3281 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3282 }
3283
3284 unlock:
3285 l2cap_chan_unlock(chan);
3286 return err;
3287 }
3288
3289 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3290 {
3291 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3292 u16 scid, flags, result;
3293 struct l2cap_chan *chan;
3294 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3295 int err = 0;
3296
3297 scid = __le16_to_cpu(rsp->scid);
3298 flags = __le16_to_cpu(rsp->flags);
3299 result = __le16_to_cpu(rsp->result);
3300
3301 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3302 result, len);
3303
3304 chan = l2cap_get_chan_by_scid(conn, scid);
3305 if (!chan)
3306 return 0;
3307
3308 switch (result) {
3309 case L2CAP_CONF_SUCCESS:
3310 l2cap_conf_rfc_get(chan, rsp->data, len);
3311 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3312 break;
3313
3314 case L2CAP_CONF_PENDING:
3315 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3316
3317 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3318 char buf[64];
3319
3320 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3321 buf, &result);
3322 if (len < 0) {
3323 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3324 goto done;
3325 }
3326
3327 /* check compatibility */
3328
3329 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3330 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3331
3332 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3333 l2cap_build_conf_rsp(chan, buf,
3334 L2CAP_CONF_SUCCESS, 0x0000), buf);
3335 }
3336 goto done;
3337
3338 case L2CAP_CONF_UNACCEPT:
3339 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3340 char req[64];
3341
3342 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3343 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3344 goto done;
3345 }
3346
3347 /* throw out any old stored conf requests */
3348 result = L2CAP_CONF_SUCCESS;
3349 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3350 req, &result);
3351 if (len < 0) {
3352 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3353 goto done;
3354 }
3355
3356 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3357 L2CAP_CONF_REQ, len, req);
3358 chan->num_conf_req++;
3359 if (result != L2CAP_CONF_SUCCESS)
3360 goto done;
3361 break;
3362 }
3363
3364 default:
3365 l2cap_chan_set_err(chan, ECONNRESET);
3366
3367 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3368 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3369 goto done;
3370 }
3371
3372 if (flags & 0x01)
3373 goto done;
3374
3375 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3376
3377 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3378 set_default_fcs(chan);
3379
3380 l2cap_state_change(chan, BT_CONNECTED);
3381 if (chan->mode == L2CAP_MODE_ERTM ||
3382 chan->mode == L2CAP_MODE_STREAMING)
3383 err = l2cap_ertm_init(chan);
3384
3385 if (err < 0)
3386 l2cap_send_disconn_req(chan->conn, chan, -err);
3387 else
3388 l2cap_chan_ready(chan);
3389 }
3390
3391 done:
3392 l2cap_chan_unlock(chan);
3393 return err;
3394 }
3395
3396 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3397 {
3398 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3399 struct l2cap_disconn_rsp rsp;
3400 u16 dcid, scid;
3401 struct l2cap_chan *chan;
3402 struct sock *sk;
3403
3404 scid = __le16_to_cpu(req->scid);
3405 dcid = __le16_to_cpu(req->dcid);
3406
3407 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3408
3409 mutex_lock(&conn->chan_lock);
3410
3411 chan = __l2cap_get_chan_by_scid(conn, dcid);
3412 if (!chan) {
3413 mutex_unlock(&conn->chan_lock);
3414 return 0;
3415 }
3416
3417 l2cap_chan_lock(chan);
3418
3419 sk = chan->sk;
3420
3421 rsp.dcid = cpu_to_le16(chan->scid);
3422 rsp.scid = cpu_to_le16(chan->dcid);
3423 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3424
3425 lock_sock(sk);
3426 sk->sk_shutdown = SHUTDOWN_MASK;
3427 release_sock(sk);
3428
3429 l2cap_chan_hold(chan);
3430 l2cap_chan_del(chan, ECONNRESET);
3431
3432 l2cap_chan_unlock(chan);
3433
3434 chan->ops->close(chan->data);
3435 l2cap_chan_put(chan);
3436
3437 mutex_unlock(&conn->chan_lock);
3438
3439 return 0;
3440 }
3441
3442 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3443 {
3444 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3445 u16 dcid, scid;
3446 struct l2cap_chan *chan;
3447
3448 scid = __le16_to_cpu(rsp->scid);
3449 dcid = __le16_to_cpu(rsp->dcid);
3450
3451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3452
3453 mutex_lock(&conn->chan_lock);
3454
3455 chan = __l2cap_get_chan_by_scid(conn, scid);
3456 if (!chan) {
3457 mutex_unlock(&conn->chan_lock);
3458 return 0;
3459 }
3460
3461 l2cap_chan_lock(chan);
3462
3463 l2cap_chan_hold(chan);
3464 l2cap_chan_del(chan, 0);
3465
3466 l2cap_chan_unlock(chan);
3467
3468 chan->ops->close(chan->data);
3469 l2cap_chan_put(chan);
3470
3471 mutex_unlock(&conn->chan_lock);
3472
3473 return 0;
3474 }
3475
3476 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3477 {
3478 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3479 u16 type;
3480
3481 type = __le16_to_cpu(req->type);
3482
3483 BT_DBG("type 0x%4.4x", type);
3484
3485 if (type == L2CAP_IT_FEAT_MASK) {
3486 u8 buf[8];
3487 u32 feat_mask = l2cap_feat_mask;
3488 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3489 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3490 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3491 if (!disable_ertm)
3492 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3493 | L2CAP_FEAT_FCS;
3494 if (enable_hs)
3495 feat_mask |= L2CAP_FEAT_EXT_FLOW
3496 | L2CAP_FEAT_EXT_WINDOW;
3497
3498 put_unaligned_le32(feat_mask, rsp->data);
3499 l2cap_send_cmd(conn, cmd->ident,
3500 L2CAP_INFO_RSP, sizeof(buf), buf);
3501 } else if (type == L2CAP_IT_FIXED_CHAN) {
3502 u8 buf[12];
3503 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3504
3505 if (enable_hs)
3506 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3507 else
3508 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3509
3510 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3511 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3512 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3513 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(buf), buf);
3515 } else {
3516 struct l2cap_info_rsp rsp;
3517 rsp.type = cpu_to_le16(type);
3518 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3519 l2cap_send_cmd(conn, cmd->ident,
3520 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3521 }
3522
3523 return 0;
3524 }
3525
3526 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3527 {
3528 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3529 u16 type, result;
3530
3531 type = __le16_to_cpu(rsp->type);
3532 result = __le16_to_cpu(rsp->result);
3533
3534 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3535
3536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3537 if (cmd->ident != conn->info_ident ||
3538 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3539 return 0;
3540
3541 cancel_delayed_work(&conn->info_timer);
3542
3543 if (result != L2CAP_IR_SUCCESS) {
3544 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3545 conn->info_ident = 0;
3546
3547 l2cap_conn_start(conn);
3548
3549 return 0;
3550 }
3551
3552 switch (type) {
3553 case L2CAP_IT_FEAT_MASK:
3554 conn->feat_mask = get_unaligned_le32(rsp->data);
3555
3556 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3557 struct l2cap_info_req req;
3558 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3559
3560 conn->info_ident = l2cap_get_ident(conn);
3561
3562 l2cap_send_cmd(conn, conn->info_ident,
3563 L2CAP_INFO_REQ, sizeof(req), &req);
3564 } else {
3565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3566 conn->info_ident = 0;
3567
3568 l2cap_conn_start(conn);
3569 }
3570 break;
3571
3572 case L2CAP_IT_FIXED_CHAN:
3573 conn->fixed_chan_mask = rsp->data[0];
3574 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3575 conn->info_ident = 0;
3576
3577 l2cap_conn_start(conn);
3578 break;
3579 }
3580
3581 return 0;
3582 }
3583
3584 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3585 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3586 void *data)
3587 {
3588 struct l2cap_create_chan_req *req = data;
3589 struct l2cap_create_chan_rsp rsp;
3590 u16 psm, scid;
3591
3592 if (cmd_len != sizeof(*req))
3593 return -EPROTO;
3594
3595 if (!enable_hs)
3596 return -EINVAL;
3597
3598 psm = le16_to_cpu(req->psm);
3599 scid = le16_to_cpu(req->scid);
3600
3601 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3602
3603 /* Placeholder: Always reject */
3604 rsp.dcid = 0;
3605 rsp.scid = cpu_to_le16(scid);
3606 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3607 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3608
3609 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3610 sizeof(rsp), &rsp);
3611
3612 return 0;
3613 }
3614
3615 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3616 struct l2cap_cmd_hdr *cmd, void *data)
3617 {
3618 BT_DBG("conn %p", conn);
3619
3620 return l2cap_connect_rsp(conn, cmd, data);
3621 }
3622
3623 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3624 u16 icid, u16 result)
3625 {
3626 struct l2cap_move_chan_rsp rsp;
3627
3628 BT_DBG("icid %d, result %d", icid, result);
3629
3630 rsp.icid = cpu_to_le16(icid);
3631 rsp.result = cpu_to_le16(result);
3632
3633 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3634 }
3635
3636 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3637 struct l2cap_chan *chan, u16 icid, u16 result)
3638 {
3639 struct l2cap_move_chan_cfm cfm;
3640 u8 ident;
3641
3642 BT_DBG("icid %d, result %d", icid, result);
3643
3644 ident = l2cap_get_ident(conn);
3645 if (chan)
3646 chan->ident = ident;
3647
3648 cfm.icid = cpu_to_le16(icid);
3649 cfm.result = cpu_to_le16(result);
3650
3651 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3652 }
3653
3654 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3655 u16 icid)
3656 {
3657 struct l2cap_move_chan_cfm_rsp rsp;
3658
3659 BT_DBG("icid %d", icid);
3660
3661 rsp.icid = cpu_to_le16(icid);
3662 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3663 }
3664
3665 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3666 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3667 {
3668 struct l2cap_move_chan_req *req = data;
3669 u16 icid = 0;
3670 u16 result = L2CAP_MR_NOT_ALLOWED;
3671
3672 if (cmd_len != sizeof(*req))
3673 return -EPROTO;
3674
3675 icid = le16_to_cpu(req->icid);
3676
3677 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3678
3679 if (!enable_hs)
3680 return -EINVAL;
3681
3682 /* Placeholder: Always refuse */
3683 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3684
3685 return 0;
3686 }
3687
3688 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3689 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3690 {
3691 struct l2cap_move_chan_rsp *rsp = data;
3692 u16 icid, result;
3693
3694 if (cmd_len != sizeof(*rsp))
3695 return -EPROTO;
3696
3697 icid = le16_to_cpu(rsp->icid);
3698 result = le16_to_cpu(rsp->result);
3699
3700 BT_DBG("icid %d, result %d", icid, result);
3701
3702 /* Placeholder: Always unconfirmed */
3703 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3704
3705 return 0;
3706 }
3707
3708 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3709 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3710 {
3711 struct l2cap_move_chan_cfm *cfm = data;
3712 u16 icid, result;
3713
3714 if (cmd_len != sizeof(*cfm))
3715 return -EPROTO;
3716
3717 icid = le16_to_cpu(cfm->icid);
3718 result = le16_to_cpu(cfm->result);
3719
3720 BT_DBG("icid %d, result %d", icid, result);
3721
3722 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3723
3724 return 0;
3725 }
3726
3727 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3728 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3729 {
3730 struct l2cap_move_chan_cfm_rsp *rsp = data;
3731 u16 icid;
3732
3733 if (cmd_len != sizeof(*rsp))
3734 return -EPROTO;
3735
3736 icid = le16_to_cpu(rsp->icid);
3737
3738 BT_DBG("icid %d", icid);
3739
3740 return 0;
3741 }
3742
3743 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3744 u16 to_multiplier)
3745 {
3746 u16 max_latency;
3747
3748 if (min > max || min < 6 || max > 3200)
3749 return -EINVAL;
3750
3751 if (to_multiplier < 10 || to_multiplier > 3200)
3752 return -EINVAL;
3753
3754 if (max >= to_multiplier * 8)
3755 return -EINVAL;
3756
3757 max_latency = (to_multiplier * 8 / max) - 1;
3758 if (latency > 499 || latency > max_latency)
3759 return -EINVAL;
3760
3761 return 0;
3762 }
3763
3764 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3765 struct l2cap_cmd_hdr *cmd, u8 *data)
3766 {
3767 struct hci_conn *hcon = conn->hcon;
3768 struct l2cap_conn_param_update_req *req;
3769 struct l2cap_conn_param_update_rsp rsp;
3770 u16 min, max, latency, to_multiplier, cmd_len;
3771 int err;
3772
3773 if (!(hcon->link_mode & HCI_LM_MASTER))
3774 return -EINVAL;
3775
3776 cmd_len = __le16_to_cpu(cmd->len);
3777 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3778 return -EPROTO;
3779
3780 req = (struct l2cap_conn_param_update_req *) data;
3781 min = __le16_to_cpu(req->min);
3782 max = __le16_to_cpu(req->max);
3783 latency = __le16_to_cpu(req->latency);
3784 to_multiplier = __le16_to_cpu(req->to_multiplier);
3785
3786 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3787 min, max, latency, to_multiplier);
3788
3789 memset(&rsp, 0, sizeof(rsp));
3790
3791 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3792 if (err)
3793 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3794 else
3795 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3796
3797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3798 sizeof(rsp), &rsp);
3799
3800 if (!err)
3801 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3802
3803 return 0;
3804 }
3805
3806 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3807 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3808 {
3809 int err = 0;
3810
3811 switch (cmd->code) {
3812 case L2CAP_COMMAND_REJ:
3813 l2cap_command_rej(conn, cmd, data);
3814 break;
3815
3816 case L2CAP_CONN_REQ:
3817 err = l2cap_connect_req(conn, cmd, data);
3818 break;
3819
3820 case L2CAP_CONN_RSP:
3821 err = l2cap_connect_rsp(conn, cmd, data);
3822 break;
3823
3824 case L2CAP_CONF_REQ:
3825 err = l2cap_config_req(conn, cmd, cmd_len, data);
3826 break;
3827
3828 case L2CAP_CONF_RSP:
3829 err = l2cap_config_rsp(conn, cmd, data);
3830 break;
3831
3832 case L2CAP_DISCONN_REQ:
3833 err = l2cap_disconnect_req(conn, cmd, data);
3834 break;
3835
3836 case L2CAP_DISCONN_RSP:
3837 err = l2cap_disconnect_rsp(conn, cmd, data);
3838 break;
3839
3840 case L2CAP_ECHO_REQ:
3841 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3842 break;
3843
3844 case L2CAP_ECHO_RSP:
3845 break;
3846
3847 case L2CAP_INFO_REQ:
3848 err = l2cap_information_req(conn, cmd, data);
3849 break;
3850
3851 case L2CAP_INFO_RSP:
3852 err = l2cap_information_rsp(conn, cmd, data);
3853 break;
3854
3855 case L2CAP_CREATE_CHAN_REQ:
3856 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3857 break;
3858
3859 case L2CAP_CREATE_CHAN_RSP:
3860 err = l2cap_create_channel_rsp(conn, cmd, data);
3861 break;
3862
3863 case L2CAP_MOVE_CHAN_REQ:
3864 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3865 break;
3866
3867 case L2CAP_MOVE_CHAN_RSP:
3868 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3869 break;
3870
3871 case L2CAP_MOVE_CHAN_CFM:
3872 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3873 break;
3874
3875 case L2CAP_MOVE_CHAN_CFM_RSP:
3876 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3877 break;
3878
3879 default:
3880 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3881 err = -EINVAL;
3882 break;
3883 }
3884
3885 return err;
3886 }
3887
3888 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3889 struct l2cap_cmd_hdr *cmd, u8 *data)
3890 {
3891 switch (cmd->code) {
3892 case L2CAP_COMMAND_REJ:
3893 return 0;
3894
3895 case L2CAP_CONN_PARAM_UPDATE_REQ:
3896 return l2cap_conn_param_update_req(conn, cmd, data);
3897
3898 case L2CAP_CONN_PARAM_UPDATE_RSP:
3899 return 0;
3900
3901 default:
3902 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3903 return -EINVAL;
3904 }
3905 }
3906
3907 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3908 struct sk_buff *skb)
3909 {
3910 u8 *data = skb->data;
3911 int len = skb->len;
3912 struct l2cap_cmd_hdr cmd;
3913 int err;
3914
3915 l2cap_raw_recv(conn, skb);
3916
3917 while (len >= L2CAP_CMD_HDR_SIZE) {
3918 u16 cmd_len;
3919 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3920 data += L2CAP_CMD_HDR_SIZE;
3921 len -= L2CAP_CMD_HDR_SIZE;
3922
3923 cmd_len = le16_to_cpu(cmd.len);
3924
3925 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3926
3927 if (cmd_len > len || !cmd.ident) {
3928 BT_DBG("corrupted command");
3929 break;
3930 }
3931
3932 if (conn->hcon->type == LE_LINK)
3933 err = l2cap_le_sig_cmd(conn, &cmd, data);
3934 else
3935 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3936
3937 if (err) {
3938 struct l2cap_cmd_rej_unk rej;
3939
3940 BT_ERR("Wrong link type (%d)", err);
3941
3942 /* FIXME: Map err to a valid reason */
3943 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3944 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3945 }
3946
3947 data += cmd_len;
3948 len -= cmd_len;
3949 }
3950
3951 kfree_skb(skb);
3952 }
3953
3954 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3955 {
3956 u16 our_fcs, rcv_fcs;
3957 int hdr_size;
3958
3959 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3960 hdr_size = L2CAP_EXT_HDR_SIZE;
3961 else
3962 hdr_size = L2CAP_ENH_HDR_SIZE;
3963
3964 if (chan->fcs == L2CAP_FCS_CRC16) {
3965 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3966 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3967 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3968
3969 if (our_fcs != rcv_fcs)
3970 return -EBADMSG;
3971 }
3972 return 0;
3973 }
3974
3975 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3976 {
3977 u32 control = 0;
3978
3979 chan->frames_sent = 0;
3980
3981 control |= __set_reqseq(chan, chan->buffer_seq);
3982
3983 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3984 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3985 l2cap_send_sframe(chan, control);
3986 set_bit(CONN_RNR_SENT, &chan->conn_state);
3987 }
3988
3989 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3990 l2cap_retransmit_frames(chan);
3991
3992 l2cap_ertm_send(chan);
3993
3994 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3995 chan->frames_sent == 0) {
3996 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3997 l2cap_send_sframe(chan, control);
3998 }
3999 }
4000
4001 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4002 {
4003 struct sk_buff *next_skb;
4004 int tx_seq_offset, next_tx_seq_offset;
4005
4006 bt_cb(skb)->control.txseq = tx_seq;
4007 bt_cb(skb)->control.sar = sar;
4008
4009 next_skb = skb_peek(&chan->srej_q);
4010
4011 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4012
4013 while (next_skb) {
4014 if (bt_cb(next_skb)->control.txseq == tx_seq)
4015 return -EINVAL;
4016
4017 next_tx_seq_offset = __seq_offset(chan,
4018 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4019
4020 if (next_tx_seq_offset > tx_seq_offset) {
4021 __skb_queue_before(&chan->srej_q, next_skb, skb);
4022 return 0;
4023 }
4024
4025 if (skb_queue_is_last(&chan->srej_q, next_skb))
4026 next_skb = NULL;
4027 else
4028 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4029 }
4030
4031 __skb_queue_tail(&chan->srej_q, skb);
4032
4033 return 0;
4034 }
4035
4036 static void append_skb_frag(struct sk_buff *skb,
4037 struct sk_buff *new_frag, struct sk_buff **last_frag)
4038 {
4039 /* skb->len reflects data in skb as well as all fragments
4040 * skb->data_len reflects only data in fragments
4041 */
4042 if (!skb_has_frag_list(skb))
4043 skb_shinfo(skb)->frag_list = new_frag;
4044
4045 new_frag->next = NULL;
4046
4047 (*last_frag)->next = new_frag;
4048 *last_frag = new_frag;
4049
4050 skb->len += new_frag->len;
4051 skb->data_len += new_frag->len;
4052 skb->truesize += new_frag->truesize;
4053 }
4054
4055 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4056 {
4057 int err = -EINVAL;
4058
4059 switch (__get_ctrl_sar(chan, control)) {
4060 case L2CAP_SAR_UNSEGMENTED:
4061 if (chan->sdu)
4062 break;
4063
4064 err = chan->ops->recv(chan->data, skb);
4065 break;
4066
4067 case L2CAP_SAR_START:
4068 if (chan->sdu)
4069 break;
4070
4071 chan->sdu_len = get_unaligned_le16(skb->data);
4072 skb_pull(skb, L2CAP_SDULEN_SIZE);
4073
4074 if (chan->sdu_len > chan->imtu) {
4075 err = -EMSGSIZE;
4076 break;
4077 }
4078
4079 if (skb->len >= chan->sdu_len)
4080 break;
4081
4082 chan->sdu = skb;
4083 chan->sdu_last_frag = skb;
4084
4085 skb = NULL;
4086 err = 0;
4087 break;
4088
4089 case L2CAP_SAR_CONTINUE:
4090 if (!chan->sdu)
4091 break;
4092
4093 append_skb_frag(chan->sdu, skb,
4094 &chan->sdu_last_frag);
4095 skb = NULL;
4096
4097 if (chan->sdu->len >= chan->sdu_len)
4098 break;
4099
4100 err = 0;
4101 break;
4102
4103 case L2CAP_SAR_END:
4104 if (!chan->sdu)
4105 break;
4106
4107 append_skb_frag(chan->sdu, skb,
4108 &chan->sdu_last_frag);
4109 skb = NULL;
4110
4111 if (chan->sdu->len != chan->sdu_len)
4112 break;
4113
4114 err = chan->ops->recv(chan->data, chan->sdu);
4115
4116 if (!err) {
4117 /* Reassembly complete */
4118 chan->sdu = NULL;
4119 chan->sdu_last_frag = NULL;
4120 chan->sdu_len = 0;
4121 }
4122 break;
4123 }
4124
4125 if (err) {
4126 kfree_skb(skb);
4127 kfree_skb(chan->sdu);
4128 chan->sdu = NULL;
4129 chan->sdu_last_frag = NULL;
4130 chan->sdu_len = 0;
4131 }
4132
4133 return err;
4134 }
4135
4136 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4137 {
4138 BT_DBG("chan %p, Enter local busy", chan);
4139
4140 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4141 l2cap_seq_list_clear(&chan->srej_list);
4142
4143 __set_ack_timer(chan);
4144 }
4145
4146 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4147 {
4148 u32 control;
4149
4150 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4151 goto done;
4152
4153 control = __set_reqseq(chan, chan->buffer_seq);
4154 control |= __set_ctrl_poll(chan);
4155 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4156 l2cap_send_sframe(chan, control);
4157 chan->retry_count = 1;
4158
4159 __clear_retrans_timer(chan);
4160 __set_monitor_timer(chan);
4161
4162 set_bit(CONN_WAIT_F, &chan->conn_state);
4163
4164 done:
4165 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4166 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4167
4168 BT_DBG("chan %p, Exit local busy", chan);
4169 }
4170
4171 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4172 {
4173 if (chan->mode == L2CAP_MODE_ERTM) {
4174 if (busy)
4175 l2cap_ertm_enter_local_busy(chan);
4176 else
4177 l2cap_ertm_exit_local_busy(chan);
4178 }
4179 }
4180
4181 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4182 {
4183 struct sk_buff *skb;
4184 u32 control;
4185
4186 while ((skb = skb_peek(&chan->srej_q)) &&
4187 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4188 int err;
4189
4190 if (bt_cb(skb)->control.txseq != tx_seq)
4191 break;
4192
4193 skb = skb_dequeue(&chan->srej_q);
4194 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4195 err = l2cap_reassemble_sdu(chan, skb, control);
4196
4197 if (err < 0) {
4198 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4199 break;
4200 }
4201
4202 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4203 tx_seq = __next_seq(chan, tx_seq);
4204 }
4205 }
4206
4207 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4208 {
4209 struct srej_list *l, *tmp;
4210 u32 control;
4211
4212 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4213 if (l->tx_seq == tx_seq) {
4214 list_del(&l->list);
4215 kfree(l);
4216 return;
4217 }
4218 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4219 control |= __set_reqseq(chan, l->tx_seq);
4220 l2cap_send_sframe(chan, control);
4221 list_del(&l->list);
4222 list_add_tail(&l->list, &chan->srej_l);
4223 }
4224 }
4225
4226 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4227 {
4228 struct srej_list *new;
4229 u32 control;
4230
4231 while (tx_seq != chan->expected_tx_seq) {
4232 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4233 control |= __set_reqseq(chan, chan->expected_tx_seq);
4234 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4235 l2cap_send_sframe(chan, control);
4236
4237 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4238 if (!new)
4239 return -ENOMEM;
4240
4241 new->tx_seq = chan->expected_tx_seq;
4242
4243 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4244
4245 list_add_tail(&new->list, &chan->srej_l);
4246 }
4247
4248 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4249
4250 return 0;
4251 }
4252
4253 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4254 {
4255 u16 tx_seq = __get_txseq(chan, rx_control);
4256 u16 req_seq = __get_reqseq(chan, rx_control);
4257 u8 sar = __get_ctrl_sar(chan, rx_control);
4258 int tx_seq_offset, expected_tx_seq_offset;
4259 int num_to_ack = (chan->tx_win/6) + 1;
4260 int err = 0;
4261
4262 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4263 tx_seq, rx_control);
4264
4265 if (__is_ctrl_final(chan, rx_control) &&
4266 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4267 __clear_monitor_timer(chan);
4268 if (chan->unacked_frames > 0)
4269 __set_retrans_timer(chan);
4270 clear_bit(CONN_WAIT_F, &chan->conn_state);
4271 }
4272
4273 chan->expected_ack_seq = req_seq;
4274 l2cap_drop_acked_frames(chan);
4275
4276 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4277
4278 /* invalid tx_seq */
4279 if (tx_seq_offset >= chan->tx_win) {
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4281 goto drop;
4282 }
4283
4284 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4285 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4286 l2cap_send_ack(chan);
4287 goto drop;
4288 }
4289
4290 if (tx_seq == chan->expected_tx_seq)
4291 goto expected;
4292
4293 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4294 struct srej_list *first;
4295
4296 first = list_first_entry(&chan->srej_l,
4297 struct srej_list, list);
4298 if (tx_seq == first->tx_seq) {
4299 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4300 l2cap_check_srej_gap(chan, tx_seq);
4301
4302 list_del(&first->list);
4303 kfree(first);
4304
4305 if (list_empty(&chan->srej_l)) {
4306 chan->buffer_seq = chan->buffer_seq_srej;
4307 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4308 l2cap_send_ack(chan);
4309 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4310 }
4311 } else {
4312 struct srej_list *l;
4313
4314 /* duplicated tx_seq */
4315 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4316 goto drop;
4317
4318 list_for_each_entry(l, &chan->srej_l, list) {
4319 if (l->tx_seq == tx_seq) {
4320 l2cap_resend_srejframe(chan, tx_seq);
4321 return 0;
4322 }
4323 }
4324
4325 err = l2cap_send_srejframe(chan, tx_seq);
4326 if (err < 0) {
4327 l2cap_send_disconn_req(chan->conn, chan, -err);
4328 return err;
4329 }
4330 }
4331 } else {
4332 expected_tx_seq_offset = __seq_offset(chan,
4333 chan->expected_tx_seq, chan->buffer_seq);
4334
4335 /* duplicated tx_seq */
4336 if (tx_seq_offset < expected_tx_seq_offset)
4337 goto drop;
4338
4339 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4340
4341 BT_DBG("chan %p, Enter SREJ", chan);
4342
4343 INIT_LIST_HEAD(&chan->srej_l);
4344 chan->buffer_seq_srej = chan->buffer_seq;
4345
4346 __skb_queue_head_init(&chan->srej_q);
4347 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4348
4349 /* Set P-bit only if there are some I-frames to ack. */
4350 if (__clear_ack_timer(chan))
4351 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4352
4353 err = l2cap_send_srejframe(chan, tx_seq);
4354 if (err < 0) {
4355 l2cap_send_disconn_req(chan->conn, chan, -err);
4356 return err;
4357 }
4358 }
4359 return 0;
4360
4361 expected:
4362 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4363
4364 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4365 bt_cb(skb)->control.txseq = tx_seq;
4366 bt_cb(skb)->control.sar = sar;
4367 __skb_queue_tail(&chan->srej_q, skb);
4368 return 0;
4369 }
4370
4371 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4372 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4373
4374 if (err < 0) {
4375 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4376 return err;
4377 }
4378
4379 if (__is_ctrl_final(chan, rx_control)) {
4380 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4381 l2cap_retransmit_frames(chan);
4382 }
4383
4384
4385 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4386 if (chan->num_acked == num_to_ack - 1)
4387 l2cap_send_ack(chan);
4388 else
4389 __set_ack_timer(chan);
4390
4391 return 0;
4392
4393 drop:
4394 kfree_skb(skb);
4395 return 0;
4396 }
4397
4398 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4399 {
4400 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4401 __get_reqseq(chan, rx_control), rx_control);
4402
4403 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4404 l2cap_drop_acked_frames(chan);
4405
4406 if (__is_ctrl_poll(chan, rx_control)) {
4407 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4408 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4409 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4410 (chan->unacked_frames > 0))
4411 __set_retrans_timer(chan);
4412
4413 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4414 l2cap_send_srejtail(chan);
4415 } else {
4416 l2cap_send_i_or_rr_or_rnr(chan);
4417 }
4418
4419 } else if (__is_ctrl_final(chan, rx_control)) {
4420 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4421
4422 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4423 l2cap_retransmit_frames(chan);
4424
4425 } else {
4426 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4427 (chan->unacked_frames > 0))
4428 __set_retrans_timer(chan);
4429
4430 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4431 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4432 l2cap_send_ack(chan);
4433 else
4434 l2cap_ertm_send(chan);
4435 }
4436 }
4437
4438 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4439 {
4440 u16 tx_seq = __get_reqseq(chan, rx_control);
4441
4442 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4443
4444 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4445
4446 chan->expected_ack_seq = tx_seq;
4447 l2cap_drop_acked_frames(chan);
4448
4449 if (__is_ctrl_final(chan, rx_control)) {
4450 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4451 l2cap_retransmit_frames(chan);
4452 } else {
4453 l2cap_retransmit_frames(chan);
4454
4455 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4456 set_bit(CONN_REJ_ACT, &chan->conn_state);
4457 }
4458 }
4459 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4460 {
4461 u16 tx_seq = __get_reqseq(chan, rx_control);
4462
4463 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4464
4465 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4466
4467 if (__is_ctrl_poll(chan, rx_control)) {
4468 chan->expected_ack_seq = tx_seq;
4469 l2cap_drop_acked_frames(chan);
4470
4471 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4472 l2cap_retransmit_one_frame(chan, tx_seq);
4473
4474 l2cap_ertm_send(chan);
4475
4476 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4477 chan->srej_save_reqseq = tx_seq;
4478 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4479 }
4480 } else if (__is_ctrl_final(chan, rx_control)) {
4481 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4482 chan->srej_save_reqseq == tx_seq)
4483 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4484 else
4485 l2cap_retransmit_one_frame(chan, tx_seq);
4486 } else {
4487 l2cap_retransmit_one_frame(chan, tx_seq);
4488 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4489 chan->srej_save_reqseq = tx_seq;
4490 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4491 }
4492 }
4493 }
4494
4495 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4496 {
4497 u16 tx_seq = __get_reqseq(chan, rx_control);
4498
4499 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4500
4501 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4502 chan->expected_ack_seq = tx_seq;
4503 l2cap_drop_acked_frames(chan);
4504
4505 if (__is_ctrl_poll(chan, rx_control))
4506 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4507
4508 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4509 __clear_retrans_timer(chan);
4510 if (__is_ctrl_poll(chan, rx_control))
4511 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4512 return;
4513 }
4514
4515 if (__is_ctrl_poll(chan, rx_control)) {
4516 l2cap_send_srejtail(chan);
4517 } else {
4518 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4519 l2cap_send_sframe(chan, rx_control);
4520 }
4521 }
4522
4523 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4524 {
4525 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4526
4527 if (__is_ctrl_final(chan, rx_control) &&
4528 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4529 __clear_monitor_timer(chan);
4530 if (chan->unacked_frames > 0)
4531 __set_retrans_timer(chan);
4532 clear_bit(CONN_WAIT_F, &chan->conn_state);
4533 }
4534
4535 switch (__get_ctrl_super(chan, rx_control)) {
4536 case L2CAP_SUPER_RR:
4537 l2cap_data_channel_rrframe(chan, rx_control);
4538 break;
4539
4540 case L2CAP_SUPER_REJ:
4541 l2cap_data_channel_rejframe(chan, rx_control);
4542 break;
4543
4544 case L2CAP_SUPER_SREJ:
4545 l2cap_data_channel_srejframe(chan, rx_control);
4546 break;
4547
4548 case L2CAP_SUPER_RNR:
4549 l2cap_data_channel_rnrframe(chan, rx_control);
4550 break;
4551 }
4552
4553 kfree_skb(skb);
4554 return 0;
4555 }
4556
4557 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4558 {
4559 u32 control;
4560 u16 req_seq;
4561 int len, next_tx_seq_offset, req_seq_offset;
4562
4563 __unpack_control(chan, skb);
4564
4565 control = __get_control(chan, skb->data);
4566 skb_pull(skb, __ctrl_size(chan));
4567 len = skb->len;
4568
4569 /*
4570 * We can just drop the corrupted I-frame here.
4571 * Receiver will miss it and start proper recovery
4572 * procedures and ask retransmission.
4573 */
4574 if (l2cap_check_fcs(chan, skb))
4575 goto drop;
4576
4577 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4578 len -= L2CAP_SDULEN_SIZE;
4579
4580 if (chan->fcs == L2CAP_FCS_CRC16)
4581 len -= L2CAP_FCS_SIZE;
4582
4583 if (len > chan->mps) {
4584 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4585 goto drop;
4586 }
4587
4588 req_seq = __get_reqseq(chan, control);
4589
4590 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4591
4592 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4593 chan->expected_ack_seq);
4594
4595 /* check for invalid req-seq */
4596 if (req_seq_offset > next_tx_seq_offset) {
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4598 goto drop;
4599 }
4600
4601 if (!__is_sframe(chan, control)) {
4602 if (len < 0) {
4603 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4604 goto drop;
4605 }
4606
4607 l2cap_data_channel_iframe(chan, control, skb);
4608 } else {
4609 if (len != 0) {
4610 BT_ERR("%d", len);
4611 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4612 goto drop;
4613 }
4614
4615 l2cap_data_channel_sframe(chan, control, skb);
4616 }
4617
4618 return 0;
4619
4620 drop:
4621 kfree_skb(skb);
4622 return 0;
4623 }
4624
4625 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4626 {
4627 struct l2cap_chan *chan;
4628 u32 control;
4629 u16 tx_seq;
4630 int len;
4631
4632 chan = l2cap_get_chan_by_scid(conn, cid);
4633 if (!chan) {
4634 BT_DBG("unknown cid 0x%4.4x", cid);
4635 /* Drop packet and return */
4636 kfree_skb(skb);
4637 return 0;
4638 }
4639
4640 BT_DBG("chan %p, len %d", chan, skb->len);
4641
4642 if (chan->state != BT_CONNECTED)
4643 goto drop;
4644
4645 switch (chan->mode) {
4646 case L2CAP_MODE_BASIC:
4647 /* If socket recv buffers overflows we drop data here
4648 * which is *bad* because L2CAP has to be reliable.
4649 * But we don't have any other choice. L2CAP doesn't
4650 * provide flow control mechanism. */
4651
4652 if (chan->imtu < skb->len)
4653 goto drop;
4654
4655 if (!chan->ops->recv(chan->data, skb))
4656 goto done;
4657 break;
4658
4659 case L2CAP_MODE_ERTM:
4660 l2cap_ertm_data_rcv(chan, skb);
4661
4662 goto done;
4663
4664 case L2CAP_MODE_STREAMING:
4665 control = __get_control(chan, skb->data);
4666 skb_pull(skb, __ctrl_size(chan));
4667 len = skb->len;
4668
4669 if (l2cap_check_fcs(chan, skb))
4670 goto drop;
4671
4672 if (__is_sar_start(chan, control))
4673 len -= L2CAP_SDULEN_SIZE;
4674
4675 if (chan->fcs == L2CAP_FCS_CRC16)
4676 len -= L2CAP_FCS_SIZE;
4677
4678 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4679 goto drop;
4680
4681 tx_seq = __get_txseq(chan, control);
4682
4683 if (chan->expected_tx_seq != tx_seq) {
4684 /* Frame(s) missing - must discard partial SDU */
4685 kfree_skb(chan->sdu);
4686 chan->sdu = NULL;
4687 chan->sdu_last_frag = NULL;
4688 chan->sdu_len = 0;
4689
4690 /* TODO: Notify userland of missing data */
4691 }
4692
4693 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4694
4695 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4696 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4697
4698 goto done;
4699
4700 default:
4701 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4702 break;
4703 }
4704
4705 drop:
4706 kfree_skb(skb);
4707
4708 done:
4709 l2cap_chan_unlock(chan);
4710
4711 return 0;
4712 }
4713
4714 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4715 {
4716 struct l2cap_chan *chan;
4717
4718 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4719 if (!chan)
4720 goto drop;
4721
4722 BT_DBG("chan %p, len %d", chan, skb->len);
4723
4724 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4725 goto drop;
4726
4727 if (chan->imtu < skb->len)
4728 goto drop;
4729
4730 if (!chan->ops->recv(chan->data, skb))
4731 return 0;
4732
4733 drop:
4734 kfree_skb(skb);
4735
4736 return 0;
4737 }
4738
4739 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4740 struct sk_buff *skb)
4741 {
4742 struct l2cap_chan *chan;
4743
4744 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4745 if (!chan)
4746 goto drop;
4747
4748 BT_DBG("chan %p, len %d", chan, skb->len);
4749
4750 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4751 goto drop;
4752
4753 if (chan->imtu < skb->len)
4754 goto drop;
4755
4756 if (!chan->ops->recv(chan->data, skb))
4757 return 0;
4758
4759 drop:
4760 kfree_skb(skb);
4761
4762 return 0;
4763 }
4764
4765 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4766 {
4767 struct l2cap_hdr *lh = (void *) skb->data;
4768 u16 cid, len;
4769 __le16 psm;
4770
4771 skb_pull(skb, L2CAP_HDR_SIZE);
4772 cid = __le16_to_cpu(lh->cid);
4773 len = __le16_to_cpu(lh->len);
4774
4775 if (len != skb->len) {
4776 kfree_skb(skb);
4777 return;
4778 }
4779
4780 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4781
4782 switch (cid) {
4783 case L2CAP_CID_LE_SIGNALING:
4784 case L2CAP_CID_SIGNALING:
4785 l2cap_sig_channel(conn, skb);
4786 break;
4787
4788 case L2CAP_CID_CONN_LESS:
4789 psm = get_unaligned((__le16 *) skb->data);
4790 skb_pull(skb, 2);
4791 l2cap_conless_channel(conn, psm, skb);
4792 break;
4793
4794 case L2CAP_CID_LE_DATA:
4795 l2cap_att_channel(conn, cid, skb);
4796 break;
4797
4798 case L2CAP_CID_SMP:
4799 if (smp_sig_channel(conn, skb))
4800 l2cap_conn_del(conn->hcon, EACCES);
4801 break;
4802
4803 default:
4804 l2cap_data_channel(conn, cid, skb);
4805 break;
4806 }
4807 }
4808
4809 /* ---- L2CAP interface with lower layer (HCI) ---- */
4810
4811 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4812 {
4813 int exact = 0, lm1 = 0, lm2 = 0;
4814 struct l2cap_chan *c;
4815
4816 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4817
4818 /* Find listening sockets and check their link_mode */
4819 read_lock(&chan_list_lock);
4820 list_for_each_entry(c, &chan_list, global_l) {
4821 struct sock *sk = c->sk;
4822
4823 if (c->state != BT_LISTEN)
4824 continue;
4825
4826 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4827 lm1 |= HCI_LM_ACCEPT;
4828 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4829 lm1 |= HCI_LM_MASTER;
4830 exact++;
4831 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4832 lm2 |= HCI_LM_ACCEPT;
4833 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4834 lm2 |= HCI_LM_MASTER;
4835 }
4836 }
4837 read_unlock(&chan_list_lock);
4838
4839 return exact ? lm1 : lm2;
4840 }
4841
4842 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4843 {
4844 struct l2cap_conn *conn;
4845
4846 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4847
4848 if (!status) {
4849 conn = l2cap_conn_add(hcon, status);
4850 if (conn)
4851 l2cap_conn_ready(conn);
4852 } else
4853 l2cap_conn_del(hcon, bt_to_errno(status));
4854
4855 return 0;
4856 }
4857
4858 int l2cap_disconn_ind(struct hci_conn *hcon)
4859 {
4860 struct l2cap_conn *conn = hcon->l2cap_data;
4861
4862 BT_DBG("hcon %p", hcon);
4863
4864 if (!conn)
4865 return HCI_ERROR_REMOTE_USER_TERM;
4866 return conn->disc_reason;
4867 }
4868
4869 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4870 {
4871 BT_DBG("hcon %p reason %d", hcon, reason);
4872
4873 l2cap_conn_del(hcon, bt_to_errno(reason));
4874 return 0;
4875 }
4876
4877 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4878 {
4879 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4880 return;
4881
4882 if (encrypt == 0x00) {
4883 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4884 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4885 } else if (chan->sec_level == BT_SECURITY_HIGH)
4886 l2cap_chan_close(chan, ECONNREFUSED);
4887 } else {
4888 if (chan->sec_level == BT_SECURITY_MEDIUM)
4889 __clear_chan_timer(chan);
4890 }
4891 }
4892
4893 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4894 {
4895 struct l2cap_conn *conn = hcon->l2cap_data;
4896 struct l2cap_chan *chan;
4897
4898 if (!conn)
4899 return 0;
4900
4901 BT_DBG("conn %p", conn);
4902
4903 if (hcon->type == LE_LINK) {
4904 if (!status && encrypt)
4905 smp_distribute_keys(conn, 0);
4906 cancel_delayed_work(&conn->security_timer);
4907 }
4908
4909 mutex_lock(&conn->chan_lock);
4910
4911 list_for_each_entry(chan, &conn->chan_l, list) {
4912 l2cap_chan_lock(chan);
4913
4914 BT_DBG("chan->scid %d", chan->scid);
4915
4916 if (chan->scid == L2CAP_CID_LE_DATA) {
4917 if (!status && encrypt) {
4918 chan->sec_level = hcon->sec_level;
4919 l2cap_chan_ready(chan);
4920 }
4921
4922 l2cap_chan_unlock(chan);
4923 continue;
4924 }
4925
4926 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4927 l2cap_chan_unlock(chan);
4928 continue;
4929 }
4930
4931 if (!status && (chan->state == BT_CONNECTED ||
4932 chan->state == BT_CONFIG)) {
4933 struct sock *sk = chan->sk;
4934
4935 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4936 sk->sk_state_change(sk);
4937
4938 l2cap_check_encryption(chan, encrypt);
4939 l2cap_chan_unlock(chan);
4940 continue;
4941 }
4942
4943 if (chan->state == BT_CONNECT) {
4944 if (!status) {
4945 l2cap_send_conn_req(chan);
4946 } else {
4947 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4948 }
4949 } else if (chan->state == BT_CONNECT2) {
4950 struct sock *sk = chan->sk;
4951 struct l2cap_conn_rsp rsp;
4952 __u16 res, stat;
4953
4954 lock_sock(sk);
4955
4956 if (!status) {
4957 if (test_bit(BT_SK_DEFER_SETUP,
4958 &bt_sk(sk)->flags)) {
4959 struct sock *parent = bt_sk(sk)->parent;
4960 res = L2CAP_CR_PEND;
4961 stat = L2CAP_CS_AUTHOR_PEND;
4962 if (parent)
4963 parent->sk_data_ready(parent, 0);
4964 } else {
4965 __l2cap_state_change(chan, BT_CONFIG);
4966 res = L2CAP_CR_SUCCESS;
4967 stat = L2CAP_CS_NO_INFO;
4968 }
4969 } else {
4970 __l2cap_state_change(chan, BT_DISCONN);
4971 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4972 res = L2CAP_CR_SEC_BLOCK;
4973 stat = L2CAP_CS_NO_INFO;
4974 }
4975
4976 release_sock(sk);
4977
4978 rsp.scid = cpu_to_le16(chan->dcid);
4979 rsp.dcid = cpu_to_le16(chan->scid);
4980 rsp.result = cpu_to_le16(res);
4981 rsp.status = cpu_to_le16(stat);
4982 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4983 sizeof(rsp), &rsp);
4984 }
4985
4986 l2cap_chan_unlock(chan);
4987 }
4988
4989 mutex_unlock(&conn->chan_lock);
4990
4991 return 0;
4992 }
4993
4994 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4995 {
4996 struct l2cap_conn *conn = hcon->l2cap_data;
4997
4998 if (!conn)
4999 conn = l2cap_conn_add(hcon, 0);
5000
5001 if (!conn)
5002 goto drop;
5003
5004 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5005
5006 if (!(flags & ACL_CONT)) {
5007 struct l2cap_hdr *hdr;
5008 int len;
5009
5010 if (conn->rx_len) {
5011 BT_ERR("Unexpected start frame (len %d)", skb->len);
5012 kfree_skb(conn->rx_skb);
5013 conn->rx_skb = NULL;
5014 conn->rx_len = 0;
5015 l2cap_conn_unreliable(conn, ECOMM);
5016 }
5017
5018 /* Start fragment always begin with Basic L2CAP header */
5019 if (skb->len < L2CAP_HDR_SIZE) {
5020 BT_ERR("Frame is too short (len %d)", skb->len);
5021 l2cap_conn_unreliable(conn, ECOMM);
5022 goto drop;
5023 }
5024
5025 hdr = (struct l2cap_hdr *) skb->data;
5026 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5027
5028 if (len == skb->len) {
5029 /* Complete frame received */
5030 l2cap_recv_frame(conn, skb);
5031 return 0;
5032 }
5033
5034 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5035
5036 if (skb->len > len) {
5037 BT_ERR("Frame is too long (len %d, expected len %d)",
5038 skb->len, len);
5039 l2cap_conn_unreliable(conn, ECOMM);
5040 goto drop;
5041 }
5042
5043 /* Allocate skb for the complete frame (with header) */
5044 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5045 if (!conn->rx_skb)
5046 goto drop;
5047
5048 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5049 skb->len);
5050 conn->rx_len = len - skb->len;
5051 } else {
5052 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5053
5054 if (!conn->rx_len) {
5055 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5056 l2cap_conn_unreliable(conn, ECOMM);
5057 goto drop;
5058 }
5059
5060 if (skb->len > conn->rx_len) {
5061 BT_ERR("Fragment is too long (len %d, expected %d)",
5062 skb->len, conn->rx_len);
5063 kfree_skb(conn->rx_skb);
5064 conn->rx_skb = NULL;
5065 conn->rx_len = 0;
5066 l2cap_conn_unreliable(conn, ECOMM);
5067 goto drop;
5068 }
5069
5070 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5071 skb->len);
5072 conn->rx_len -= skb->len;
5073
5074 if (!conn->rx_len) {
5075 /* Complete frame received */
5076 l2cap_recv_frame(conn, conn->rx_skb);
5077 conn->rx_skb = NULL;
5078 }
5079 }
5080
5081 drop:
5082 kfree_skb(skb);
5083 return 0;
5084 }
5085
5086 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5087 {
5088 struct l2cap_chan *c;
5089
5090 read_lock(&chan_list_lock);
5091
5092 list_for_each_entry(c, &chan_list, global_l) {
5093 struct sock *sk = c->sk;
5094
5095 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5096 batostr(&bt_sk(sk)->src),
5097 batostr(&bt_sk(sk)->dst),
5098 c->state, __le16_to_cpu(c->psm),
5099 c->scid, c->dcid, c->imtu, c->omtu,
5100 c->sec_level, c->mode);
5101 }
5102
5103 read_unlock(&chan_list_lock);
5104
5105 return 0;
5106 }
5107
5108 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5109 {
5110 return single_open(file, l2cap_debugfs_show, inode->i_private);
5111 }
5112
5113 static const struct file_operations l2cap_debugfs_fops = {
5114 .open = l2cap_debugfs_open,
5115 .read = seq_read,
5116 .llseek = seq_lseek,
5117 .release = single_release,
5118 };
5119
5120 static struct dentry *l2cap_debugfs;
5121
5122 int __init l2cap_init(void)
5123 {
5124 int err;
5125
5126 err = l2cap_init_sockets();
5127 if (err < 0)
5128 return err;
5129
5130 if (bt_debugfs) {
5131 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5132 bt_debugfs, NULL, &l2cap_debugfs_fops);
5133 if (!l2cap_debugfs)
5134 BT_ERR("Failed to create L2CAP debug file");
5135 }
5136
5137 return 0;
5138 }
5139
5140 void l2cap_exit(void)
5141 {
5142 debugfs_remove(l2cap_debugfs);
5143 l2cap_cleanup_sockets();
5144 }
5145
5146 module_param(disable_ertm, bool, 0644);
5147 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");