]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/bluetooth/l2cap_core.c
Bluetooth: Drop L2CAP chan reference if ERTM ack_timer fired
[mirror_ubuntu-focal-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
54
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
59
60 bool disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 {
82 struct l2cap_chan *c, *r = NULL;
83
84 rcu_read_lock();
85
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
87 if (c->dcid == cid) {
88 r = c;
89 break;
90 }
91 }
92
93 rcu_read_unlock();
94 return r;
95 }
96
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
98 {
99 struct l2cap_chan *c, *r = NULL;
100
101 rcu_read_lock();
102
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
105 r = c;
106 break;
107 }
108 }
109
110 rcu_read_unlock();
111 return r;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 lock_sock(c->sk);
123 return c;
124 }
125
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
127 {
128 struct l2cap_chan *c, *r = NULL;
129
130 rcu_read_lock();
131
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
134 r = c;
135 break;
136 }
137 }
138
139 rcu_read_unlock();
140 return r;
141 }
142
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
144 {
145 struct l2cap_chan *c;
146
147 c = __l2cap_get_chan_by_ident(conn, ident);
148 if (c)
149 lock_sock(c->sk);
150 return c;
151 }
152
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
154 {
155 struct l2cap_chan *c;
156
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
159 return c;
160 }
161 return NULL;
162 }
163
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 {
166 int err;
167
168 write_lock(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192 done:
193 write_unlock(&chan_list_lock);
194 return err;
195 }
196
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198 {
199 write_lock(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock(&chan_list_lock);
204
205 return 0;
206 }
207
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209 {
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218 }
219
220 static char *state_to_string(int state)
221 {
222 switch(state) {
223 case BT_CONNECTED:
224 return "BT_CONNECTED";
225 case BT_OPEN:
226 return "BT_OPEN";
227 case BT_BOUND:
228 return "BT_BOUND";
229 case BT_LISTEN:
230 return "BT_LISTEN";
231 case BT_CONNECT:
232 return "BT_CONNECT";
233 case BT_CONNECT2:
234 return "BT_CONNECT2";
235 case BT_CONFIG:
236 return "BT_CONFIG";
237 case BT_DISCONN:
238 return "BT_DISCONN";
239 case BT_CLOSED:
240 return "BT_CLOSED";
241 }
242
243 return "invalid state";
244 }
245
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
247 {
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
250
251 chan->state = state;
252 chan->ops->state_change(chan->data, state);
253 }
254
255 static void l2cap_chan_timeout(struct work_struct *work)
256 {
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
258 chan_timer.work);
259 struct sock *sk = chan->sk;
260 int reason;
261
262 BT_DBG("chan %p state %d", chan, chan->state);
263
264 lock_sock(sk);
265
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
271 else
272 reason = ETIMEDOUT;
273
274 l2cap_chan_close(chan, reason);
275
276 release_sock(sk);
277
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
280 }
281
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
283 {
284 struct l2cap_chan *chan;
285
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 if (!chan)
288 return NULL;
289
290 chan->sk = sk;
291
292 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock(&chan_list_lock);
295
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
297
298 chan->state = BT_OPEN;
299
300 atomic_set(&chan->refcnt, 1);
301
302 BT_DBG("sk %p chan %p", sk, chan);
303
304 return chan;
305 }
306
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
308 {
309 write_lock(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock(&chan_list_lock);
312
313 l2cap_chan_put(chan);
314 }
315
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
317 {
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
320
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
322
323 chan->conn = conn;
324
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
327 /* LE connection */
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
331 } else {
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
335 }
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
341 } else {
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
346 }
347
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
354
355 l2cap_chan_hold(chan);
356
357 list_add_rcu(&chan->list, &conn->chan_l);
358 }
359
360 /* Delete channel.
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
363 {
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
367
368 __clear_chan_timer(chan);
369
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
371
372 if (conn) {
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
375 synchronize_rcu();
376
377 l2cap_chan_put(chan);
378
379 chan->conn = NULL;
380 hci_conn_put(conn->hcon);
381 }
382
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
385
386 if (err)
387 sk->sk_err = err;
388
389 if (parent) {
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
392 } else
393 sk->sk_state_change(sk);
394
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 return;
398
399 skb_queue_purge(&chan->tx_q);
400
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
403
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
407
408 skb_queue_purge(&chan->srej_q);
409
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
411 list_del(&l->list);
412 kfree(l);
413 }
414 }
415 }
416
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
418 {
419 struct sock *sk;
420
421 BT_DBG("parent %p", parent);
422
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
427 lock_sock(sk);
428 l2cap_chan_close(chan, ECONNRESET);
429 release_sock(sk);
430 chan->ops->close(chan->data);
431 }
432 }
433
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
435 {
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
438
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
440
441 switch (chan->state) {
442 case BT_LISTEN:
443 l2cap_chan_cleanup_listen(sk);
444
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
447 break;
448
449 case BT_CONNECTED:
450 case BT_CONFIG:
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
456 } else
457 l2cap_chan_del(chan, reason);
458 break;
459
460 case BT_CONNECT2:
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
464 __u16 result;
465
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
468 else
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
471
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
477 sizeof(rsp), &rsp);
478 }
479
480 l2cap_chan_del(chan, reason);
481 break;
482
483 case BT_CONNECT:
484 case BT_DISCONN:
485 l2cap_chan_del(chan, reason);
486 break;
487
488 default:
489 sock_set_flag(sk, SOCK_ZAPPED);
490 break;
491 }
492 }
493
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
495 {
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
502 default:
503 return HCI_AT_NO_BONDING;
504 }
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
508
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
511 else
512 return HCI_AT_NO_BONDING;
513 } else {
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
519 default:
520 return HCI_AT_NO_BONDING;
521 }
522 }
523 }
524
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
527 {
528 struct l2cap_conn *conn = chan->conn;
529 __u8 auth_type;
530
531 auth_type = l2cap_get_auth_type(chan);
532
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
534 }
535
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
537 {
538 u8 id;
539
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
544 */
545
546 spin_lock(&conn->lock);
547
548 if (++conn->tx_ident > 128)
549 conn->tx_ident = 1;
550
551 id = conn->tx_ident;
552
553 spin_unlock(&conn->lock);
554
555 return id;
556 }
557
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
559 {
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
561 u8 flags;
562
563 BT_DBG("code 0x%2.2x", code);
564
565 if (!skb)
566 return;
567
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
570 else
571 flags = ACL_START;
572
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
575
576 hci_send_acl(conn->hchan, skb, flags);
577 }
578
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
580 {
581 struct hci_conn *hcon = chan->conn->hcon;
582 u16 flags;
583
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
585 skb->priority);
586
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
590 else
591 flags = ACL_START;
592
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
595 }
596
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
598 {
599 struct sk_buff *skb;
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
602 int count, hlen;
603
604 if (chan->state != BT_CONNECTED)
605 return;
606
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
609 else
610 hlen = L2CAP_ENH_HDR_SIZE;
611
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
614
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
616
617 count = min_t(unsigned int, conn->mtu, hlen);
618
619 control |= __set_sframe(chan);
620
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
623
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
626
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
628 if (!skb)
629 return;
630
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
634
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
636
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
640 }
641
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
644 }
645
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
647 {
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
651 } else
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
653
654 control |= __set_reqseq(chan, chan->buffer_seq);
655
656 l2cap_send_sframe(chan, control);
657 }
658
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
660 {
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
662 }
663
664 static void l2cap_do_start(struct l2cap_chan *chan)
665 {
666 struct l2cap_conn *conn = chan->conn;
667
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 return;
671
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
676 req.psm = chan->psm;
677
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
682 sizeof(req), &req);
683 }
684 } else {
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
687
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
690
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
693
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
696 }
697 }
698
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
700 {
701 u32 local_feat_mask = l2cap_feat_mask;
702 if (!disable_ertm)
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
704
705 switch (mode) {
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 default:
711 return 0x00;
712 }
713 }
714
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
716 {
717 struct sock *sk;
718 struct l2cap_disconn_req req;
719
720 if (!conn)
721 return;
722
723 sk = chan->sk;
724
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
729 }
730
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
735
736 l2cap_state_change(chan, BT_DISCONN);
737 sk->sk_err = err;
738 }
739
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
742 {
743 struct l2cap_chan *chan;
744
745 BT_DBG("conn %p", conn);
746
747 rcu_read_lock();
748
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
751
752 bh_lock_sock(sk);
753
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
755 bh_unlock_sock(sk);
756 continue;
757 }
758
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
761
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
764 bh_unlock_sock(sk);
765 continue;
766 }
767
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
774 bh_unlock_sock(sk);
775 continue;
776 }
777
778 req.scid = cpu_to_le16(chan->scid);
779 req.psm = chan->psm;
780
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
783
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 sizeof(req), &req);
786
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
789 char buf[128];
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
792
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
800
801 } else {
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 }
806 } else {
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
809 }
810
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 sizeof(rsp), &rsp);
813
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
816 bh_unlock_sock(sk);
817 continue;
818 }
819
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
824 }
825
826 bh_unlock_sock(sk);
827 }
828
829 rcu_read_unlock();
830 }
831
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
834 */
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
836 {
837 struct l2cap_chan *c, *c1 = NULL;
838
839 read_lock(&chan_list_lock);
840
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
843
844 if (state && c->state != state)
845 continue;
846
847 if (c->scid == cid) {
848 /* Exact match. */
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
851 return c;
852 }
853
854 /* Closest match */
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
856 c1 = c;
857 }
858 }
859
860 read_unlock(&chan_list_lock);
861
862 return c1;
863 }
864
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
866 {
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
869
870 BT_DBG("");
871
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
874 conn->src);
875 if (!pchan)
876 return;
877
878 parent = pchan->sk;
879
880 lock_sock(parent);
881
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 goto clean;
886 }
887
888 chan = pchan->ops->new_connection(pchan->data);
889 if (!chan)
890 goto clean;
891
892 sk = chan->sk;
893
894 hci_conn_hold(conn->hcon);
895
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
898
899 bt_accept_enqueue(parent, sk);
900
901 l2cap_chan_add(conn, chan);
902
903 __set_chan_timer(chan, sk->sk_sndtimeo);
904
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
907
908 clean:
909 release_sock(parent);
910 }
911
912 static void l2cap_chan_ready(struct sock *sk)
913 {
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
916
917 BT_DBG("sk %p, parent %p", sk, parent);
918
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
921
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
924
925 if (parent)
926 parent->sk_data_ready(parent, 0);
927 }
928
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
930 {
931 struct l2cap_chan *chan;
932
933 BT_DBG("conn %p", conn);
934
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
937
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
940
941 rcu_read_lock();
942
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
945
946 bh_lock_sock(sk);
947
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
951
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
956
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
959
960 bh_unlock_sock(sk);
961 }
962
963 rcu_read_unlock();
964 }
965
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
968 {
969 struct l2cap_chan *chan;
970
971 BT_DBG("conn %p", conn);
972
973 rcu_read_lock();
974
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
977
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
979 sk->sk_err = err;
980 }
981
982 rcu_read_unlock();
983 }
984
985 static void l2cap_info_timeout(struct work_struct *work)
986 {
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
988 info_timer.work);
989
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
992
993 l2cap_conn_start(conn);
994 }
995
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
997 {
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1000 struct sock *sk;
1001
1002 if (!conn)
1003 return;
1004
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1006
1007 kfree_skb(conn->rx_skb);
1008
1009 /* Kill channels */
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 sk = chan->sk;
1012 lock_sock(sk);
1013 l2cap_chan_del(chan, err);
1014 release_sock(sk);
1015 chan->ops->close(chan->data);
1016 }
1017
1018 hci_chan_del(conn->hchan);
1019
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer);
1022
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 __cancel_delayed_work(&conn->security_timer);
1025 smp_chan_destroy(conn);
1026 }
1027
1028 hcon->l2cap_data = NULL;
1029 kfree(conn);
1030 }
1031
1032 static void security_timeout(struct work_struct *work)
1033 {
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1036
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1038 }
1039
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1041 {
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1044
1045 if (conn || status)
1046 return conn;
1047
1048 hchan = hci_chan_create(hcon);
1049 if (!hchan)
1050 return NULL;
1051
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 if (!conn) {
1054 hci_chan_del(hchan);
1055 return NULL;
1056 }
1057
1058 hcon->l2cap_data = conn;
1059 conn->hcon = hcon;
1060 conn->hchan = hchan;
1061
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1063
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1066 else
1067 conn->mtu = hcon->hdev->acl_mtu;
1068
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1071
1072 conn->feat_mask = 0;
1073
1074 spin_lock_init(&conn->lock);
1075
1076 INIT_LIST_HEAD(&conn->chan_l);
1077
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1080 else
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1082
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1084
1085 return conn;
1086 }
1087
1088 /* ---- Socket interface ---- */
1089
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1092 */
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1094 {
1095 struct l2cap_chan *c, *c1 = NULL;
1096
1097 read_lock(&chan_list_lock);
1098
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1101
1102 if (state && c->state != state)
1103 continue;
1104
1105 if (c->psm == psm) {
1106 /* Exact match. */
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1109 return c;
1110 }
1111
1112 /* Closest match */
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1114 c1 = c;
1115 }
1116 }
1117
1118 read_unlock(&chan_list_lock);
1119
1120 return c1;
1121 }
1122
1123 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1124 {
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1130 __u8 auth_type;
1131 int err;
1132
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1134 chan->psm);
1135
1136 hdev = hci_get_route(dst, src);
1137 if (!hdev)
1138 return -EHOSTUNREACH;
1139
1140 hci_dev_lock(hdev);
1141
1142 lock_sock(sk);
1143
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1147 err = -EINVAL;
1148 goto done;
1149 }
1150
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1152 err = -EINVAL;
1153 goto done;
1154 }
1155
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1158 break;
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1161 if (!disable_ertm)
1162 break;
1163 /* fall through */
1164 default:
1165 err = -ENOTSUPP;
1166 goto done;
1167 }
1168
1169 switch (sk->sk_state) {
1170 case BT_CONNECT:
1171 case BT_CONNECT2:
1172 case BT_CONFIG:
1173 /* Already connecting */
1174 err = 0;
1175 goto done;
1176
1177 case BT_CONNECTED:
1178 /* Already connected */
1179 err = -EISCONN;
1180 goto done;
1181
1182 case BT_OPEN:
1183 case BT_BOUND:
1184 /* Can connect */
1185 break;
1186
1187 default:
1188 err = -EBADFD;
1189 goto done;
1190 }
1191
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1194 chan->psm = psm;
1195 chan->dcid = cid;
1196
1197 auth_type = l2cap_get_auth_type(chan);
1198
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1202 else
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1205
1206 if (IS_ERR(hcon)) {
1207 err = PTR_ERR(hcon);
1208 goto done;
1209 }
1210
1211 conn = l2cap_conn_add(hcon, 0);
1212 if (!conn) {
1213 hci_conn_put(hcon);
1214 err = -ENOMEM;
1215 goto done;
1216 }
1217
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1220
1221 l2cap_chan_add(conn, chan);
1222
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1225
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1231 } else
1232 l2cap_do_start(chan);
1233 }
1234
1235 err = 0;
1236
1237 done:
1238 hci_dev_unlock(hdev);
1239 hci_dev_put(hdev);
1240 return err;
1241 }
1242
1243 int __l2cap_wait_ack(struct sock *sk)
1244 {
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1247 int err = 0;
1248 int timeo = HZ/5;
1249
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1253 if (!timeo)
1254 timeo = HZ/5;
1255
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1258 break;
1259 }
1260
1261 release_sock(sk);
1262 timeo = schedule_timeout(timeo);
1263 lock_sock(sk);
1264 set_current_state(TASK_INTERRUPTIBLE);
1265
1266 err = sock_error(sk);
1267 if (err)
1268 break;
1269 }
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1272 return err;
1273 }
1274
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1276 {
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1280
1281 BT_DBG("chan %p", chan);
1282
1283 lock_sock(sk);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1286 release_sock(sk);
1287 return;
1288 }
1289
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1292
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1294 release_sock(sk);
1295 }
1296
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1298 {
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1302
1303 BT_DBG("chan %p", chan);
1304
1305 lock_sock(sk);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1308
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1310
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1312 release_sock(sk);
1313 }
1314
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1316 {
1317 struct sk_buff *skb;
1318
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1322 break;
1323
1324 skb = skb_dequeue(&chan->tx_q);
1325 kfree_skb(skb);
1326
1327 chan->unacked_frames--;
1328 }
1329
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1332 }
1333
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1335 {
1336 struct sk_buff *skb;
1337 u32 control;
1338 u16 fcs;
1339
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1344
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1350 }
1351
1352 l2cap_do_send(chan, skb);
1353
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1355 }
1356 }
1357
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1359 {
1360 struct sk_buff *skb, *tx_skb;
1361 u16 fcs;
1362 u32 control;
1363
1364 skb = skb_peek(&chan->tx_q);
1365 if (!skb)
1366 return;
1367
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1370 return;
1371
1372 skb = skb_queue_next(&chan->tx_q, skb);
1373 }
1374
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 return;
1379 }
1380
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1383
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1386
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1389
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1392
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1394
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1400 }
1401
1402 l2cap_do_send(chan, tx_skb);
1403 }
1404
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1406 {
1407 struct sk_buff *skb, *tx_skb;
1408 u16 fcs;
1409 u32 control;
1410 int nsent = 0;
1411
1412 if (chan->state != BT_CONNECTED)
1413 return -ENOTCONN;
1414
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1416
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1420 break;
1421 }
1422
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1424
1425 bt_cb(skb)->retries++;
1426
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1429
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1432
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1435
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1437
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1443 }
1444
1445 l2cap_do_send(chan, tx_skb);
1446
1447 __set_retrans_timer(chan);
1448
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1450
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1452
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1455
1456 chan->frames_sent++;
1457
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1460 else
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1462
1463 nsent++;
1464 }
1465
1466 return nsent;
1467 }
1468
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1470 {
1471 int ret;
1472
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1475
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1478 return ret;
1479 }
1480
1481 static void __l2cap_send_ack(struct l2cap_chan *chan)
1482 {
1483 u32 control = 0;
1484
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1486
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1491 return;
1492 }
1493
1494 if (l2cap_ertm_send(chan) > 0)
1495 return;
1496
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1499 }
1500
1501 static void l2cap_send_ack(struct l2cap_chan *chan)
1502 {
1503 __clear_ack_timer(chan);
1504 __l2cap_send_ack(chan);
1505 }
1506
1507 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1508 {
1509 struct srej_list *tail;
1510 u32 control;
1511
1512 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1513 control |= __set_ctrl_final(chan);
1514
1515 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1516 control |= __set_reqseq(chan, tail->tx_seq);
1517
1518 l2cap_send_sframe(chan, control);
1519 }
1520
1521 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1522 {
1523 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1524 struct sk_buff **frag;
1525 int err, sent = 0;
1526
1527 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1528 return -EFAULT;
1529
1530 sent += count;
1531 len -= count;
1532
1533 /* Continuation fragments (no L2CAP header) */
1534 frag = &skb_shinfo(skb)->frag_list;
1535 while (len) {
1536 count = min_t(unsigned int, conn->mtu, len);
1537
1538 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1539 if (!*frag)
1540 return err;
1541 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1542 return -EFAULT;
1543
1544 (*frag)->priority = skb->priority;
1545
1546 sent += count;
1547 len -= count;
1548
1549 frag = &(*frag)->next;
1550 }
1551
1552 return sent;
1553 }
1554
1555 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1556 struct msghdr *msg, size_t len,
1557 u32 priority)
1558 {
1559 struct sock *sk = chan->sk;
1560 struct l2cap_conn *conn = chan->conn;
1561 struct sk_buff *skb;
1562 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1563 struct l2cap_hdr *lh;
1564
1565 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1566
1567 count = min_t(unsigned int, (conn->mtu - hlen), len);
1568 skb = bt_skb_send_alloc(sk, count + hlen,
1569 msg->msg_flags & MSG_DONTWAIT, &err);
1570 if (!skb)
1571 return ERR_PTR(err);
1572
1573 skb->priority = priority;
1574
1575 /* Create L2CAP header */
1576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1577 lh->cid = cpu_to_le16(chan->dcid);
1578 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1579 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1580
1581 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1582 if (unlikely(err < 0)) {
1583 kfree_skb(skb);
1584 return ERR_PTR(err);
1585 }
1586 return skb;
1587 }
1588
1589 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1590 struct msghdr *msg, size_t len,
1591 u32 priority)
1592 {
1593 struct sock *sk = chan->sk;
1594 struct l2cap_conn *conn = chan->conn;
1595 struct sk_buff *skb;
1596 int err, count, hlen = L2CAP_HDR_SIZE;
1597 struct l2cap_hdr *lh;
1598
1599 BT_DBG("sk %p len %d", sk, (int)len);
1600
1601 count = min_t(unsigned int, (conn->mtu - hlen), len);
1602 skb = bt_skb_send_alloc(sk, count + hlen,
1603 msg->msg_flags & MSG_DONTWAIT, &err);
1604 if (!skb)
1605 return ERR_PTR(err);
1606
1607 skb->priority = priority;
1608
1609 /* Create L2CAP header */
1610 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1611 lh->cid = cpu_to_le16(chan->dcid);
1612 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1613
1614 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1615 if (unlikely(err < 0)) {
1616 kfree_skb(skb);
1617 return ERR_PTR(err);
1618 }
1619 return skb;
1620 }
1621
1622 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1623 struct msghdr *msg, size_t len,
1624 u32 control, u16 sdulen)
1625 {
1626 struct sock *sk = chan->sk;
1627 struct l2cap_conn *conn = chan->conn;
1628 struct sk_buff *skb;
1629 int err, count, hlen;
1630 struct l2cap_hdr *lh;
1631
1632 BT_DBG("sk %p len %d", sk, (int)len);
1633
1634 if (!conn)
1635 return ERR_PTR(-ENOTCONN);
1636
1637 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1638 hlen = L2CAP_EXT_HDR_SIZE;
1639 else
1640 hlen = L2CAP_ENH_HDR_SIZE;
1641
1642 if (sdulen)
1643 hlen += L2CAP_SDULEN_SIZE;
1644
1645 if (chan->fcs == L2CAP_FCS_CRC16)
1646 hlen += L2CAP_FCS_SIZE;
1647
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1651 if (!skb)
1652 return ERR_PTR(err);
1653
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(chan->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1658
1659 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1660
1661 if (sdulen)
1662 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1663
1664 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1665 if (unlikely(err < 0)) {
1666 kfree_skb(skb);
1667 return ERR_PTR(err);
1668 }
1669
1670 if (chan->fcs == L2CAP_FCS_CRC16)
1671 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1672
1673 bt_cb(skb)->retries = 0;
1674 return skb;
1675 }
1676
1677 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1678 {
1679 struct sk_buff *skb;
1680 struct sk_buff_head sar_queue;
1681 u32 control;
1682 size_t size = 0;
1683
1684 skb_queue_head_init(&sar_queue);
1685 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1686 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1687 if (IS_ERR(skb))
1688 return PTR_ERR(skb);
1689
1690 __skb_queue_tail(&sar_queue, skb);
1691 len -= chan->remote_mps;
1692 size += chan->remote_mps;
1693
1694 while (len > 0) {
1695 size_t buflen;
1696
1697 if (len > chan->remote_mps) {
1698 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1699 buflen = chan->remote_mps;
1700 } else {
1701 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1702 buflen = len;
1703 }
1704
1705 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1706 if (IS_ERR(skb)) {
1707 skb_queue_purge(&sar_queue);
1708 return PTR_ERR(skb);
1709 }
1710
1711 __skb_queue_tail(&sar_queue, skb);
1712 len -= buflen;
1713 size += buflen;
1714 }
1715 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1716 if (chan->tx_send_head == NULL)
1717 chan->tx_send_head = sar_queue.next;
1718
1719 return size;
1720 }
1721
1722 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1723 u32 priority)
1724 {
1725 struct sk_buff *skb;
1726 u32 control;
1727 int err;
1728
1729 /* Connectionless channel */
1730 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1731 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1732 if (IS_ERR(skb))
1733 return PTR_ERR(skb);
1734
1735 l2cap_do_send(chan, skb);
1736 return len;
1737 }
1738
1739 switch (chan->mode) {
1740 case L2CAP_MODE_BASIC:
1741 /* Check outgoing MTU */
1742 if (len > chan->omtu)
1743 return -EMSGSIZE;
1744
1745 /* Create a basic PDU */
1746 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1747 if (IS_ERR(skb))
1748 return PTR_ERR(skb);
1749
1750 l2cap_do_send(chan, skb);
1751 err = len;
1752 break;
1753
1754 case L2CAP_MODE_ERTM:
1755 case L2CAP_MODE_STREAMING:
1756 /* Entire SDU fits into one PDU */
1757 if (len <= chan->remote_mps) {
1758 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1759 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1760 0);
1761 if (IS_ERR(skb))
1762 return PTR_ERR(skb);
1763
1764 __skb_queue_tail(&chan->tx_q, skb);
1765
1766 if (chan->tx_send_head == NULL)
1767 chan->tx_send_head = skb;
1768
1769 } else {
1770 /* Segment SDU into multiples PDUs */
1771 err = l2cap_sar_segment_sdu(chan, msg, len);
1772 if (err < 0)
1773 return err;
1774 }
1775
1776 if (chan->mode == L2CAP_MODE_STREAMING) {
1777 l2cap_streaming_send(chan);
1778 err = len;
1779 break;
1780 }
1781
1782 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1783 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1784 err = len;
1785 break;
1786 }
1787
1788 err = l2cap_ertm_send(chan);
1789 if (err >= 0)
1790 err = len;
1791
1792 break;
1793
1794 default:
1795 BT_DBG("bad state %1.1x", chan->mode);
1796 err = -EBADFD;
1797 }
1798
1799 return err;
1800 }
1801
1802 /* Copy frame to all raw sockets on that connection */
1803 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1804 {
1805 struct sk_buff *nskb;
1806 struct l2cap_chan *chan;
1807
1808 BT_DBG("conn %p", conn);
1809
1810 rcu_read_lock();
1811
1812 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1813 struct sock *sk = chan->sk;
1814 if (chan->chan_type != L2CAP_CHAN_RAW)
1815 continue;
1816
1817 /* Don't send frame to the socket it came from */
1818 if (skb->sk == sk)
1819 continue;
1820 nskb = skb_clone(skb, GFP_ATOMIC);
1821 if (!nskb)
1822 continue;
1823
1824 if (chan->ops->recv(chan->data, nskb))
1825 kfree_skb(nskb);
1826 }
1827
1828 rcu_read_unlock();
1829 }
1830
1831 /* ---- L2CAP signalling commands ---- */
1832 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1833 u8 code, u8 ident, u16 dlen, void *data)
1834 {
1835 struct sk_buff *skb, **frag;
1836 struct l2cap_cmd_hdr *cmd;
1837 struct l2cap_hdr *lh;
1838 int len, count;
1839
1840 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1841 conn, code, ident, dlen);
1842
1843 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1844 count = min_t(unsigned int, conn->mtu, len);
1845
1846 skb = bt_skb_alloc(count, GFP_ATOMIC);
1847 if (!skb)
1848 return NULL;
1849
1850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1851 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1852
1853 if (conn->hcon->type == LE_LINK)
1854 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1855 else
1856 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1857
1858 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1859 cmd->code = code;
1860 cmd->ident = ident;
1861 cmd->len = cpu_to_le16(dlen);
1862
1863 if (dlen) {
1864 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1865 memcpy(skb_put(skb, count), data, count);
1866 data += count;
1867 }
1868
1869 len -= skb->len;
1870
1871 /* Continuation fragments (no L2CAP header) */
1872 frag = &skb_shinfo(skb)->frag_list;
1873 while (len) {
1874 count = min_t(unsigned int, conn->mtu, len);
1875
1876 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1877 if (!*frag)
1878 goto fail;
1879
1880 memcpy(skb_put(*frag, count), data, count);
1881
1882 len -= count;
1883 data += count;
1884
1885 frag = &(*frag)->next;
1886 }
1887
1888 return skb;
1889
1890 fail:
1891 kfree_skb(skb);
1892 return NULL;
1893 }
1894
1895 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1896 {
1897 struct l2cap_conf_opt *opt = *ptr;
1898 int len;
1899
1900 len = L2CAP_CONF_OPT_SIZE + opt->len;
1901 *ptr += len;
1902
1903 *type = opt->type;
1904 *olen = opt->len;
1905
1906 switch (opt->len) {
1907 case 1:
1908 *val = *((u8 *) opt->val);
1909 break;
1910
1911 case 2:
1912 *val = get_unaligned_le16(opt->val);
1913 break;
1914
1915 case 4:
1916 *val = get_unaligned_le32(opt->val);
1917 break;
1918
1919 default:
1920 *val = (unsigned long) opt->val;
1921 break;
1922 }
1923
1924 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1925 return len;
1926 }
1927
1928 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1929 {
1930 struct l2cap_conf_opt *opt = *ptr;
1931
1932 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1933
1934 opt->type = type;
1935 opt->len = len;
1936
1937 switch (len) {
1938 case 1:
1939 *((u8 *) opt->val) = val;
1940 break;
1941
1942 case 2:
1943 put_unaligned_le16(val, opt->val);
1944 break;
1945
1946 case 4:
1947 put_unaligned_le32(val, opt->val);
1948 break;
1949
1950 default:
1951 memcpy(opt->val, (void *) val, len);
1952 break;
1953 }
1954
1955 *ptr += L2CAP_CONF_OPT_SIZE + len;
1956 }
1957
1958 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1959 {
1960 struct l2cap_conf_efs efs;
1961
1962 switch (chan->mode) {
1963 case L2CAP_MODE_ERTM:
1964 efs.id = chan->local_id;
1965 efs.stype = chan->local_stype;
1966 efs.msdu = cpu_to_le16(chan->local_msdu);
1967 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1968 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1969 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1970 break;
1971
1972 case L2CAP_MODE_STREAMING:
1973 efs.id = 1;
1974 efs.stype = L2CAP_SERV_BESTEFFORT;
1975 efs.msdu = cpu_to_le16(chan->local_msdu);
1976 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1977 efs.acc_lat = 0;
1978 efs.flush_to = 0;
1979 break;
1980
1981 default:
1982 return;
1983 }
1984
1985 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1986 (unsigned long) &efs);
1987 }
1988
1989 static void l2cap_ack_timeout(struct work_struct *work)
1990 {
1991 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1992 ack_timer.work);
1993
1994 BT_DBG("chan %p", chan);
1995
1996 lock_sock(chan->sk);
1997 __l2cap_send_ack(chan);
1998 release_sock(chan->sk);
1999
2000 l2cap_chan_put(chan);
2001 }
2002
2003 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2004 {
2005 chan->expected_ack_seq = 0;
2006 chan->unacked_frames = 0;
2007 chan->buffer_seq = 0;
2008 chan->num_acked = 0;
2009 chan->frames_sent = 0;
2010
2011 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2012 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2013 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2014
2015 skb_queue_head_init(&chan->srej_q);
2016
2017 INIT_LIST_HEAD(&chan->srej_l);
2018 }
2019
2020 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2021 {
2022 switch (mode) {
2023 case L2CAP_MODE_STREAMING:
2024 case L2CAP_MODE_ERTM:
2025 if (l2cap_mode_supported(mode, remote_feat_mask))
2026 return mode;
2027 /* fall through */
2028 default:
2029 return L2CAP_MODE_BASIC;
2030 }
2031 }
2032
2033 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2034 {
2035 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2036 }
2037
2038 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2039 {
2040 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2041 }
2042
2043 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2044 {
2045 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2046 __l2cap_ews_supported(chan)) {
2047 /* use extended control field */
2048 set_bit(FLAG_EXT_CTRL, &chan->flags);
2049 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2050 } else {
2051 chan->tx_win = min_t(u16, chan->tx_win,
2052 L2CAP_DEFAULT_TX_WINDOW);
2053 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2054 }
2055 }
2056
2057 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2058 {
2059 struct l2cap_conf_req *req = data;
2060 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2061 void *ptr = req->data;
2062 u16 size;
2063
2064 BT_DBG("chan %p", chan);
2065
2066 if (chan->num_conf_req || chan->num_conf_rsp)
2067 goto done;
2068
2069 switch (chan->mode) {
2070 case L2CAP_MODE_STREAMING:
2071 case L2CAP_MODE_ERTM:
2072 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2073 break;
2074
2075 if (__l2cap_efs_supported(chan))
2076 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2077
2078 /* fall through */
2079 default:
2080 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2081 break;
2082 }
2083
2084 done:
2085 if (chan->imtu != L2CAP_DEFAULT_MTU)
2086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2087
2088 switch (chan->mode) {
2089 case L2CAP_MODE_BASIC:
2090 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2091 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2092 break;
2093
2094 rfc.mode = L2CAP_MODE_BASIC;
2095 rfc.txwin_size = 0;
2096 rfc.max_transmit = 0;
2097 rfc.retrans_timeout = 0;
2098 rfc.monitor_timeout = 0;
2099 rfc.max_pdu_size = 0;
2100
2101 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2102 (unsigned long) &rfc);
2103 break;
2104
2105 case L2CAP_MODE_ERTM:
2106 rfc.mode = L2CAP_MODE_ERTM;
2107 rfc.max_transmit = chan->max_tx;
2108 rfc.retrans_timeout = 0;
2109 rfc.monitor_timeout = 0;
2110
2111 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2112 L2CAP_EXT_HDR_SIZE -
2113 L2CAP_SDULEN_SIZE -
2114 L2CAP_FCS_SIZE);
2115 rfc.max_pdu_size = cpu_to_le16(size);
2116
2117 l2cap_txwin_setup(chan);
2118
2119 rfc.txwin_size = min_t(u16, chan->tx_win,
2120 L2CAP_DEFAULT_TX_WINDOW);
2121
2122 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2123 (unsigned long) &rfc);
2124
2125 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2126 l2cap_add_opt_efs(&ptr, chan);
2127
2128 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2129 break;
2130
2131 if (chan->fcs == L2CAP_FCS_NONE ||
2132 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2133 chan->fcs = L2CAP_FCS_NONE;
2134 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2135 }
2136
2137 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2138 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2139 chan->tx_win);
2140 break;
2141
2142 case L2CAP_MODE_STREAMING:
2143 rfc.mode = L2CAP_MODE_STREAMING;
2144 rfc.txwin_size = 0;
2145 rfc.max_transmit = 0;
2146 rfc.retrans_timeout = 0;
2147 rfc.monitor_timeout = 0;
2148
2149 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2150 L2CAP_EXT_HDR_SIZE -
2151 L2CAP_SDULEN_SIZE -
2152 L2CAP_FCS_SIZE);
2153 rfc.max_pdu_size = cpu_to_le16(size);
2154
2155 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2156 (unsigned long) &rfc);
2157
2158 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2159 l2cap_add_opt_efs(&ptr, chan);
2160
2161 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2162 break;
2163
2164 if (chan->fcs == L2CAP_FCS_NONE ||
2165 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2166 chan->fcs = L2CAP_FCS_NONE;
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2168 }
2169 break;
2170 }
2171
2172 req->dcid = cpu_to_le16(chan->dcid);
2173 req->flags = cpu_to_le16(0);
2174
2175 return ptr - data;
2176 }
2177
2178 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2179 {
2180 struct l2cap_conf_rsp *rsp = data;
2181 void *ptr = rsp->data;
2182 void *req = chan->conf_req;
2183 int len = chan->conf_len;
2184 int type, hint, olen;
2185 unsigned long val;
2186 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2187 struct l2cap_conf_efs efs;
2188 u8 remote_efs = 0;
2189 u16 mtu = L2CAP_DEFAULT_MTU;
2190 u16 result = L2CAP_CONF_SUCCESS;
2191 u16 size;
2192
2193 BT_DBG("chan %p", chan);
2194
2195 while (len >= L2CAP_CONF_OPT_SIZE) {
2196 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2197
2198 hint = type & L2CAP_CONF_HINT;
2199 type &= L2CAP_CONF_MASK;
2200
2201 switch (type) {
2202 case L2CAP_CONF_MTU:
2203 mtu = val;
2204 break;
2205
2206 case L2CAP_CONF_FLUSH_TO:
2207 chan->flush_to = val;
2208 break;
2209
2210 case L2CAP_CONF_QOS:
2211 break;
2212
2213 case L2CAP_CONF_RFC:
2214 if (olen == sizeof(rfc))
2215 memcpy(&rfc, (void *) val, olen);
2216 break;
2217
2218 case L2CAP_CONF_FCS:
2219 if (val == L2CAP_FCS_NONE)
2220 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2221 break;
2222
2223 case L2CAP_CONF_EFS:
2224 remote_efs = 1;
2225 if (olen == sizeof(efs))
2226 memcpy(&efs, (void *) val, olen);
2227 break;
2228
2229 case L2CAP_CONF_EWS:
2230 if (!enable_hs)
2231 return -ECONNREFUSED;
2232
2233 set_bit(FLAG_EXT_CTRL, &chan->flags);
2234 set_bit(CONF_EWS_RECV, &chan->conf_state);
2235 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2236 chan->remote_tx_win = val;
2237 break;
2238
2239 default:
2240 if (hint)
2241 break;
2242
2243 result = L2CAP_CONF_UNKNOWN;
2244 *((u8 *) ptr++) = type;
2245 break;
2246 }
2247 }
2248
2249 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2250 goto done;
2251
2252 switch (chan->mode) {
2253 case L2CAP_MODE_STREAMING:
2254 case L2CAP_MODE_ERTM:
2255 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2256 chan->mode = l2cap_select_mode(rfc.mode,
2257 chan->conn->feat_mask);
2258 break;
2259 }
2260
2261 if (remote_efs) {
2262 if (__l2cap_efs_supported(chan))
2263 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2264 else
2265 return -ECONNREFUSED;
2266 }
2267
2268 if (chan->mode != rfc.mode)
2269 return -ECONNREFUSED;
2270
2271 break;
2272 }
2273
2274 done:
2275 if (chan->mode != rfc.mode) {
2276 result = L2CAP_CONF_UNACCEPT;
2277 rfc.mode = chan->mode;
2278
2279 if (chan->num_conf_rsp == 1)
2280 return -ECONNREFUSED;
2281
2282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2283 sizeof(rfc), (unsigned long) &rfc);
2284 }
2285
2286 if (result == L2CAP_CONF_SUCCESS) {
2287 /* Configure output options and let the other side know
2288 * which ones we don't like. */
2289
2290 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2291 result = L2CAP_CONF_UNACCEPT;
2292 else {
2293 chan->omtu = mtu;
2294 set_bit(CONF_MTU_DONE, &chan->conf_state);
2295 }
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2297
2298 if (remote_efs) {
2299 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2300 efs.stype != L2CAP_SERV_NOTRAFIC &&
2301 efs.stype != chan->local_stype) {
2302
2303 result = L2CAP_CONF_UNACCEPT;
2304
2305 if (chan->num_conf_req >= 1)
2306 return -ECONNREFUSED;
2307
2308 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2309 sizeof(efs),
2310 (unsigned long) &efs);
2311 } else {
2312 /* Send PENDING Conf Rsp */
2313 result = L2CAP_CONF_PENDING;
2314 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2315 }
2316 }
2317
2318 switch (rfc.mode) {
2319 case L2CAP_MODE_BASIC:
2320 chan->fcs = L2CAP_FCS_NONE;
2321 set_bit(CONF_MODE_DONE, &chan->conf_state);
2322 break;
2323
2324 case L2CAP_MODE_ERTM:
2325 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2326 chan->remote_tx_win = rfc.txwin_size;
2327 else
2328 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2329
2330 chan->remote_max_tx = rfc.max_transmit;
2331
2332 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2333 chan->conn->mtu -
2334 L2CAP_EXT_HDR_SIZE -
2335 L2CAP_SDULEN_SIZE -
2336 L2CAP_FCS_SIZE);
2337 rfc.max_pdu_size = cpu_to_le16(size);
2338 chan->remote_mps = size;
2339
2340 rfc.retrans_timeout =
2341 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2342 rfc.monitor_timeout =
2343 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2344
2345 set_bit(CONF_MODE_DONE, &chan->conf_state);
2346
2347 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2348 sizeof(rfc), (unsigned long) &rfc);
2349
2350 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2351 chan->remote_id = efs.id;
2352 chan->remote_stype = efs.stype;
2353 chan->remote_msdu = le16_to_cpu(efs.msdu);
2354 chan->remote_flush_to =
2355 le32_to_cpu(efs.flush_to);
2356 chan->remote_acc_lat =
2357 le32_to_cpu(efs.acc_lat);
2358 chan->remote_sdu_itime =
2359 le32_to_cpu(efs.sdu_itime);
2360 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2361 sizeof(efs), (unsigned long) &efs);
2362 }
2363 break;
2364
2365 case L2CAP_MODE_STREAMING:
2366 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2367 chan->conn->mtu -
2368 L2CAP_EXT_HDR_SIZE -
2369 L2CAP_SDULEN_SIZE -
2370 L2CAP_FCS_SIZE);
2371 rfc.max_pdu_size = cpu_to_le16(size);
2372 chan->remote_mps = size;
2373
2374 set_bit(CONF_MODE_DONE, &chan->conf_state);
2375
2376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2377 sizeof(rfc), (unsigned long) &rfc);
2378
2379 break;
2380
2381 default:
2382 result = L2CAP_CONF_UNACCEPT;
2383
2384 memset(&rfc, 0, sizeof(rfc));
2385 rfc.mode = chan->mode;
2386 }
2387
2388 if (result == L2CAP_CONF_SUCCESS)
2389 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2390 }
2391 rsp->scid = cpu_to_le16(chan->dcid);
2392 rsp->result = cpu_to_le16(result);
2393 rsp->flags = cpu_to_le16(0x0000);
2394
2395 return ptr - data;
2396 }
2397
2398 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2399 {
2400 struct l2cap_conf_req *req = data;
2401 void *ptr = req->data;
2402 int type, olen;
2403 unsigned long val;
2404 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2405 struct l2cap_conf_efs efs;
2406
2407 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2408
2409 while (len >= L2CAP_CONF_OPT_SIZE) {
2410 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2411
2412 switch (type) {
2413 case L2CAP_CONF_MTU:
2414 if (val < L2CAP_DEFAULT_MIN_MTU) {
2415 *result = L2CAP_CONF_UNACCEPT;
2416 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2417 } else
2418 chan->imtu = val;
2419 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2420 break;
2421
2422 case L2CAP_CONF_FLUSH_TO:
2423 chan->flush_to = val;
2424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2425 2, chan->flush_to);
2426 break;
2427
2428 case L2CAP_CONF_RFC:
2429 if (olen == sizeof(rfc))
2430 memcpy(&rfc, (void *)val, olen);
2431
2432 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2433 rfc.mode != chan->mode)
2434 return -ECONNREFUSED;
2435
2436 chan->fcs = 0;
2437
2438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2439 sizeof(rfc), (unsigned long) &rfc);
2440 break;
2441
2442 case L2CAP_CONF_EWS:
2443 chan->tx_win = min_t(u16, val,
2444 L2CAP_DEFAULT_EXT_WINDOW);
2445 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2446 chan->tx_win);
2447 break;
2448
2449 case L2CAP_CONF_EFS:
2450 if (olen == sizeof(efs))
2451 memcpy(&efs, (void *)val, olen);
2452
2453 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2454 efs.stype != L2CAP_SERV_NOTRAFIC &&
2455 efs.stype != chan->local_stype)
2456 return -ECONNREFUSED;
2457
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2459 sizeof(efs), (unsigned long) &efs);
2460 break;
2461 }
2462 }
2463
2464 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2465 return -ECONNREFUSED;
2466
2467 chan->mode = rfc.mode;
2468
2469 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2470 switch (rfc.mode) {
2471 case L2CAP_MODE_ERTM:
2472 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2473 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2474 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2475
2476 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2477 chan->local_msdu = le16_to_cpu(efs.msdu);
2478 chan->local_sdu_itime =
2479 le32_to_cpu(efs.sdu_itime);
2480 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2481 chan->local_flush_to =
2482 le32_to_cpu(efs.flush_to);
2483 }
2484 break;
2485
2486 case L2CAP_MODE_STREAMING:
2487 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2488 }
2489 }
2490
2491 req->dcid = cpu_to_le16(chan->dcid);
2492 req->flags = cpu_to_le16(0x0000);
2493
2494 return ptr - data;
2495 }
2496
2497 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2498 {
2499 struct l2cap_conf_rsp *rsp = data;
2500 void *ptr = rsp->data;
2501
2502 BT_DBG("chan %p", chan);
2503
2504 rsp->scid = cpu_to_le16(chan->dcid);
2505 rsp->result = cpu_to_le16(result);
2506 rsp->flags = cpu_to_le16(flags);
2507
2508 return ptr - data;
2509 }
2510
2511 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2512 {
2513 struct l2cap_conn_rsp rsp;
2514 struct l2cap_conn *conn = chan->conn;
2515 u8 buf[128];
2516
2517 rsp.scid = cpu_to_le16(chan->dcid);
2518 rsp.dcid = cpu_to_le16(chan->scid);
2519 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2520 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2521 l2cap_send_cmd(conn, chan->ident,
2522 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2523
2524 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2525 return;
2526
2527 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2528 l2cap_build_conf_req(chan, buf), buf);
2529 chan->num_conf_req++;
2530 }
2531
2532 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2533 {
2534 int type, olen;
2535 unsigned long val;
2536 struct l2cap_conf_rfc rfc;
2537
2538 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2539
2540 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2541 return;
2542
2543 while (len >= L2CAP_CONF_OPT_SIZE) {
2544 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2545
2546 switch (type) {
2547 case L2CAP_CONF_RFC:
2548 if (olen == sizeof(rfc))
2549 memcpy(&rfc, (void *)val, olen);
2550 goto done;
2551 }
2552 }
2553
2554 /* Use sane default values in case a misbehaving remote device
2555 * did not send an RFC option.
2556 */
2557 rfc.mode = chan->mode;
2558 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2559 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2560 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2561
2562 BT_ERR("Expected RFC option was not found, using defaults");
2563
2564 done:
2565 switch (rfc.mode) {
2566 case L2CAP_MODE_ERTM:
2567 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2568 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2569 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2570 break;
2571 case L2CAP_MODE_STREAMING:
2572 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2573 }
2574 }
2575
2576 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2577 {
2578 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2579
2580 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2581 return 0;
2582
2583 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2584 cmd->ident == conn->info_ident) {
2585 __cancel_delayed_work(&conn->info_timer);
2586
2587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2588 conn->info_ident = 0;
2589
2590 l2cap_conn_start(conn);
2591 }
2592
2593 return 0;
2594 }
2595
2596 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2597 {
2598 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2599 struct l2cap_conn_rsp rsp;
2600 struct l2cap_chan *chan = NULL, *pchan;
2601 struct sock *parent, *sk = NULL;
2602 int result, status = L2CAP_CS_NO_INFO;
2603
2604 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2605 __le16 psm = req->psm;
2606
2607 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2608
2609 /* Check if we have socket listening on psm */
2610 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2611 if (!pchan) {
2612 result = L2CAP_CR_BAD_PSM;
2613 goto sendresp;
2614 }
2615
2616 parent = pchan->sk;
2617
2618 lock_sock(parent);
2619
2620 /* Check if the ACL is secure enough (if not SDP) */
2621 if (psm != cpu_to_le16(0x0001) &&
2622 !hci_conn_check_link_mode(conn->hcon)) {
2623 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2624 result = L2CAP_CR_SEC_BLOCK;
2625 goto response;
2626 }
2627
2628 result = L2CAP_CR_NO_MEM;
2629
2630 /* Check for backlog size */
2631 if (sk_acceptq_is_full(parent)) {
2632 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2633 goto response;
2634 }
2635
2636 chan = pchan->ops->new_connection(pchan->data);
2637 if (!chan)
2638 goto response;
2639
2640 sk = chan->sk;
2641
2642 /* Check if we already have channel with that dcid */
2643 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2644 sock_set_flag(sk, SOCK_ZAPPED);
2645 chan->ops->close(chan->data);
2646 goto response;
2647 }
2648
2649 hci_conn_hold(conn->hcon);
2650
2651 bacpy(&bt_sk(sk)->src, conn->src);
2652 bacpy(&bt_sk(sk)->dst, conn->dst);
2653 chan->psm = psm;
2654 chan->dcid = scid;
2655
2656 bt_accept_enqueue(parent, sk);
2657
2658 l2cap_chan_add(conn, chan);
2659
2660 dcid = chan->scid;
2661
2662 __set_chan_timer(chan, sk->sk_sndtimeo);
2663
2664 chan->ident = cmd->ident;
2665
2666 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2667 if (l2cap_chan_check_security(chan)) {
2668 if (bt_sk(sk)->defer_setup) {
2669 l2cap_state_change(chan, BT_CONNECT2);
2670 result = L2CAP_CR_PEND;
2671 status = L2CAP_CS_AUTHOR_PEND;
2672 parent->sk_data_ready(parent, 0);
2673 } else {
2674 l2cap_state_change(chan, BT_CONFIG);
2675 result = L2CAP_CR_SUCCESS;
2676 status = L2CAP_CS_NO_INFO;
2677 }
2678 } else {
2679 l2cap_state_change(chan, BT_CONNECT2);
2680 result = L2CAP_CR_PEND;
2681 status = L2CAP_CS_AUTHEN_PEND;
2682 }
2683 } else {
2684 l2cap_state_change(chan, BT_CONNECT2);
2685 result = L2CAP_CR_PEND;
2686 status = L2CAP_CS_NO_INFO;
2687 }
2688
2689 response:
2690 release_sock(parent);
2691
2692 sendresp:
2693 rsp.scid = cpu_to_le16(scid);
2694 rsp.dcid = cpu_to_le16(dcid);
2695 rsp.result = cpu_to_le16(result);
2696 rsp.status = cpu_to_le16(status);
2697 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2698
2699 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2700 struct l2cap_info_req info;
2701 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2702
2703 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2704 conn->info_ident = l2cap_get_ident(conn);
2705
2706 schedule_delayed_work(&conn->info_timer,
2707 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2708
2709 l2cap_send_cmd(conn, conn->info_ident,
2710 L2CAP_INFO_REQ, sizeof(info), &info);
2711 }
2712
2713 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2714 result == L2CAP_CR_SUCCESS) {
2715 u8 buf[128];
2716 set_bit(CONF_REQ_SENT, &chan->conf_state);
2717 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2718 l2cap_build_conf_req(chan, buf), buf);
2719 chan->num_conf_req++;
2720 }
2721
2722 return 0;
2723 }
2724
2725 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2726 {
2727 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2728 u16 scid, dcid, result, status;
2729 struct l2cap_chan *chan;
2730 struct sock *sk;
2731 u8 req[128];
2732
2733 scid = __le16_to_cpu(rsp->scid);
2734 dcid = __le16_to_cpu(rsp->dcid);
2735 result = __le16_to_cpu(rsp->result);
2736 status = __le16_to_cpu(rsp->status);
2737
2738 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2739
2740 if (scid) {
2741 chan = l2cap_get_chan_by_scid(conn, scid);
2742 if (!chan)
2743 return -EFAULT;
2744 } else {
2745 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2746 if (!chan)
2747 return -EFAULT;
2748 }
2749
2750 sk = chan->sk;
2751
2752 switch (result) {
2753 case L2CAP_CR_SUCCESS:
2754 l2cap_state_change(chan, BT_CONFIG);
2755 chan->ident = 0;
2756 chan->dcid = dcid;
2757 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2758
2759 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2760 break;
2761
2762 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2763 l2cap_build_conf_req(chan, req), req);
2764 chan->num_conf_req++;
2765 break;
2766
2767 case L2CAP_CR_PEND:
2768 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2769 break;
2770
2771 default:
2772 l2cap_chan_del(chan, ECONNREFUSED);
2773 break;
2774 }
2775
2776 release_sock(sk);
2777 return 0;
2778 }
2779
2780 static inline void set_default_fcs(struct l2cap_chan *chan)
2781 {
2782 /* FCS is enabled only in ERTM or streaming mode, if one or both
2783 * sides request it.
2784 */
2785 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2786 chan->fcs = L2CAP_FCS_NONE;
2787 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2788 chan->fcs = L2CAP_FCS_CRC16;
2789 }
2790
2791 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2792 {
2793 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2794 u16 dcid, flags;
2795 u8 rsp[64];
2796 struct l2cap_chan *chan;
2797 struct sock *sk;
2798 int len;
2799
2800 dcid = __le16_to_cpu(req->dcid);
2801 flags = __le16_to_cpu(req->flags);
2802
2803 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2804
2805 chan = l2cap_get_chan_by_scid(conn, dcid);
2806 if (!chan)
2807 return -ENOENT;
2808
2809 sk = chan->sk;
2810
2811 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2812 struct l2cap_cmd_rej_cid rej;
2813
2814 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2815 rej.scid = cpu_to_le16(chan->scid);
2816 rej.dcid = cpu_to_le16(chan->dcid);
2817
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2819 sizeof(rej), &rej);
2820 goto unlock;
2821 }
2822
2823 /* Reject if config buffer is too small. */
2824 len = cmd_len - sizeof(*req);
2825 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2826 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2827 l2cap_build_conf_rsp(chan, rsp,
2828 L2CAP_CONF_REJECT, flags), rsp);
2829 goto unlock;
2830 }
2831
2832 /* Store config. */
2833 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2834 chan->conf_len += len;
2835
2836 if (flags & 0x0001) {
2837 /* Incomplete config. Send empty response. */
2838 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2839 l2cap_build_conf_rsp(chan, rsp,
2840 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2841 goto unlock;
2842 }
2843
2844 /* Complete config. */
2845 len = l2cap_parse_conf_req(chan, rsp);
2846 if (len < 0) {
2847 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2848 goto unlock;
2849 }
2850
2851 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2852 chan->num_conf_rsp++;
2853
2854 /* Reset config buffer. */
2855 chan->conf_len = 0;
2856
2857 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2858 goto unlock;
2859
2860 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2861 set_default_fcs(chan);
2862
2863 l2cap_state_change(chan, BT_CONNECTED);
2864
2865 chan->next_tx_seq = 0;
2866 chan->expected_tx_seq = 0;
2867 skb_queue_head_init(&chan->tx_q);
2868 if (chan->mode == L2CAP_MODE_ERTM)
2869 l2cap_ertm_init(chan);
2870
2871 l2cap_chan_ready(sk);
2872 goto unlock;
2873 }
2874
2875 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2876 u8 buf[64];
2877 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2878 l2cap_build_conf_req(chan, buf), buf);
2879 chan->num_conf_req++;
2880 }
2881
2882 /* Got Conf Rsp PENDING from remote side and asume we sent
2883 Conf Rsp PENDING in the code above */
2884 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2885 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2886
2887 /* check compatibility */
2888
2889 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2890 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2891
2892 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2893 l2cap_build_conf_rsp(chan, rsp,
2894 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2895 }
2896
2897 unlock:
2898 release_sock(sk);
2899 return 0;
2900 }
2901
2902 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2903 {
2904 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2905 u16 scid, flags, result;
2906 struct l2cap_chan *chan;
2907 struct sock *sk;
2908 int len = cmd->len - sizeof(*rsp);
2909
2910 scid = __le16_to_cpu(rsp->scid);
2911 flags = __le16_to_cpu(rsp->flags);
2912 result = __le16_to_cpu(rsp->result);
2913
2914 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2915 scid, flags, result);
2916
2917 chan = l2cap_get_chan_by_scid(conn, scid);
2918 if (!chan)
2919 return 0;
2920
2921 sk = chan->sk;
2922
2923 switch (result) {
2924 case L2CAP_CONF_SUCCESS:
2925 l2cap_conf_rfc_get(chan, rsp->data, len);
2926 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2927 break;
2928
2929 case L2CAP_CONF_PENDING:
2930 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2931
2932 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2933 char buf[64];
2934
2935 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2936 buf, &result);
2937 if (len < 0) {
2938 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2939 goto done;
2940 }
2941
2942 /* check compatibility */
2943
2944 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2945 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2946
2947 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2948 l2cap_build_conf_rsp(chan, buf,
2949 L2CAP_CONF_SUCCESS, 0x0000), buf);
2950 }
2951 goto done;
2952
2953 case L2CAP_CONF_UNACCEPT:
2954 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2955 char req[64];
2956
2957 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2958 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2959 goto done;
2960 }
2961
2962 /* throw out any old stored conf requests */
2963 result = L2CAP_CONF_SUCCESS;
2964 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2965 req, &result);
2966 if (len < 0) {
2967 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2968 goto done;
2969 }
2970
2971 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2972 L2CAP_CONF_REQ, len, req);
2973 chan->num_conf_req++;
2974 if (result != L2CAP_CONF_SUCCESS)
2975 goto done;
2976 break;
2977 }
2978
2979 default:
2980 sk->sk_err = ECONNRESET;
2981 __set_chan_timer(chan,
2982 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2983 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2984 goto done;
2985 }
2986
2987 if (flags & 0x01)
2988 goto done;
2989
2990 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2991
2992 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2993 set_default_fcs(chan);
2994
2995 l2cap_state_change(chan, BT_CONNECTED);
2996 chan->next_tx_seq = 0;
2997 chan->expected_tx_seq = 0;
2998 skb_queue_head_init(&chan->tx_q);
2999 if (chan->mode == L2CAP_MODE_ERTM)
3000 l2cap_ertm_init(chan);
3001
3002 l2cap_chan_ready(sk);
3003 }
3004
3005 done:
3006 release_sock(sk);
3007 return 0;
3008 }
3009
3010 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3011 {
3012 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3013 struct l2cap_disconn_rsp rsp;
3014 u16 dcid, scid;
3015 struct l2cap_chan *chan;
3016 struct sock *sk;
3017
3018 scid = __le16_to_cpu(req->scid);
3019 dcid = __le16_to_cpu(req->dcid);
3020
3021 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3022
3023 chan = l2cap_get_chan_by_scid(conn, dcid);
3024 if (!chan)
3025 return 0;
3026
3027 sk = chan->sk;
3028
3029 rsp.dcid = cpu_to_le16(chan->scid);
3030 rsp.scid = cpu_to_le16(chan->dcid);
3031 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3032
3033 sk->sk_shutdown = SHUTDOWN_MASK;
3034
3035 l2cap_chan_del(chan, ECONNRESET);
3036 release_sock(sk);
3037
3038 chan->ops->close(chan->data);
3039 return 0;
3040 }
3041
3042 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3043 {
3044 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3045 u16 dcid, scid;
3046 struct l2cap_chan *chan;
3047 struct sock *sk;
3048
3049 scid = __le16_to_cpu(rsp->scid);
3050 dcid = __le16_to_cpu(rsp->dcid);
3051
3052 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3053
3054 chan = l2cap_get_chan_by_scid(conn, scid);
3055 if (!chan)
3056 return 0;
3057
3058 sk = chan->sk;
3059
3060 l2cap_chan_del(chan, 0);
3061 release_sock(sk);
3062
3063 chan->ops->close(chan->data);
3064 return 0;
3065 }
3066
3067 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3068 {
3069 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3070 u16 type;
3071
3072 type = __le16_to_cpu(req->type);
3073
3074 BT_DBG("type 0x%4.4x", type);
3075
3076 if (type == L2CAP_IT_FEAT_MASK) {
3077 u8 buf[8];
3078 u32 feat_mask = l2cap_feat_mask;
3079 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3080 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3081 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3082 if (!disable_ertm)
3083 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3084 | L2CAP_FEAT_FCS;
3085 if (enable_hs)
3086 feat_mask |= L2CAP_FEAT_EXT_FLOW
3087 | L2CAP_FEAT_EXT_WINDOW;
3088
3089 put_unaligned_le32(feat_mask, rsp->data);
3090 l2cap_send_cmd(conn, cmd->ident,
3091 L2CAP_INFO_RSP, sizeof(buf), buf);
3092 } else if (type == L2CAP_IT_FIXED_CHAN) {
3093 u8 buf[12];
3094 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3095
3096 if (enable_hs)
3097 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3098 else
3099 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3100
3101 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3102 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3103 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3104 l2cap_send_cmd(conn, cmd->ident,
3105 L2CAP_INFO_RSP, sizeof(buf), buf);
3106 } else {
3107 struct l2cap_info_rsp rsp;
3108 rsp.type = cpu_to_le16(type);
3109 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3110 l2cap_send_cmd(conn, cmd->ident,
3111 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3112 }
3113
3114 return 0;
3115 }
3116
3117 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3118 {
3119 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3120 u16 type, result;
3121
3122 type = __le16_to_cpu(rsp->type);
3123 result = __le16_to_cpu(rsp->result);
3124
3125 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3126
3127 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3128 if (cmd->ident != conn->info_ident ||
3129 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3130 return 0;
3131
3132 __cancel_delayed_work(&conn->info_timer);
3133
3134 if (result != L2CAP_IR_SUCCESS) {
3135 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3136 conn->info_ident = 0;
3137
3138 l2cap_conn_start(conn);
3139
3140 return 0;
3141 }
3142
3143 if (type == L2CAP_IT_FEAT_MASK) {
3144 conn->feat_mask = get_unaligned_le32(rsp->data);
3145
3146 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3147 struct l2cap_info_req req;
3148 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3149
3150 conn->info_ident = l2cap_get_ident(conn);
3151
3152 l2cap_send_cmd(conn, conn->info_ident,
3153 L2CAP_INFO_REQ, sizeof(req), &req);
3154 } else {
3155 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3156 conn->info_ident = 0;
3157
3158 l2cap_conn_start(conn);
3159 }
3160 } else if (type == L2CAP_IT_FIXED_CHAN) {
3161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3162 conn->info_ident = 0;
3163
3164 l2cap_conn_start(conn);
3165 }
3166
3167 return 0;
3168 }
3169
3170 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3172 void *data)
3173 {
3174 struct l2cap_create_chan_req *req = data;
3175 struct l2cap_create_chan_rsp rsp;
3176 u16 psm, scid;
3177
3178 if (cmd_len != sizeof(*req))
3179 return -EPROTO;
3180
3181 if (!enable_hs)
3182 return -EINVAL;
3183
3184 psm = le16_to_cpu(req->psm);
3185 scid = le16_to_cpu(req->scid);
3186
3187 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3188
3189 /* Placeholder: Always reject */
3190 rsp.dcid = 0;
3191 rsp.scid = cpu_to_le16(scid);
3192 rsp.result = L2CAP_CR_NO_MEM;
3193 rsp.status = L2CAP_CS_NO_INFO;
3194
3195 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3196 sizeof(rsp), &rsp);
3197
3198 return 0;
3199 }
3200
3201 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3202 struct l2cap_cmd_hdr *cmd, void *data)
3203 {
3204 BT_DBG("conn %p", conn);
3205
3206 return l2cap_connect_rsp(conn, cmd, data);
3207 }
3208
3209 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3210 u16 icid, u16 result)
3211 {
3212 struct l2cap_move_chan_rsp rsp;
3213
3214 BT_DBG("icid %d, result %d", icid, result);
3215
3216 rsp.icid = cpu_to_le16(icid);
3217 rsp.result = cpu_to_le16(result);
3218
3219 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3220 }
3221
3222 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3223 struct l2cap_chan *chan, u16 icid, u16 result)
3224 {
3225 struct l2cap_move_chan_cfm cfm;
3226 u8 ident;
3227
3228 BT_DBG("icid %d, result %d", icid, result);
3229
3230 ident = l2cap_get_ident(conn);
3231 if (chan)
3232 chan->ident = ident;
3233
3234 cfm.icid = cpu_to_le16(icid);
3235 cfm.result = cpu_to_le16(result);
3236
3237 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3238 }
3239
3240 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3241 u16 icid)
3242 {
3243 struct l2cap_move_chan_cfm_rsp rsp;
3244
3245 BT_DBG("icid %d", icid);
3246
3247 rsp.icid = cpu_to_le16(icid);
3248 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3249 }
3250
3251 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3252 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3253 {
3254 struct l2cap_move_chan_req *req = data;
3255 u16 icid = 0;
3256 u16 result = L2CAP_MR_NOT_ALLOWED;
3257
3258 if (cmd_len != sizeof(*req))
3259 return -EPROTO;
3260
3261 icid = le16_to_cpu(req->icid);
3262
3263 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3264
3265 if (!enable_hs)
3266 return -EINVAL;
3267
3268 /* Placeholder: Always refuse */
3269 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3270
3271 return 0;
3272 }
3273
3274 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3275 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3276 {
3277 struct l2cap_move_chan_rsp *rsp = data;
3278 u16 icid, result;
3279
3280 if (cmd_len != sizeof(*rsp))
3281 return -EPROTO;
3282
3283 icid = le16_to_cpu(rsp->icid);
3284 result = le16_to_cpu(rsp->result);
3285
3286 BT_DBG("icid %d, result %d", icid, result);
3287
3288 /* Placeholder: Always unconfirmed */
3289 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3290
3291 return 0;
3292 }
3293
3294 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3295 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3296 {
3297 struct l2cap_move_chan_cfm *cfm = data;
3298 u16 icid, result;
3299
3300 if (cmd_len != sizeof(*cfm))
3301 return -EPROTO;
3302
3303 icid = le16_to_cpu(cfm->icid);
3304 result = le16_to_cpu(cfm->result);
3305
3306 BT_DBG("icid %d, result %d", icid, result);
3307
3308 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3309
3310 return 0;
3311 }
3312
3313 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3314 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3315 {
3316 struct l2cap_move_chan_cfm_rsp *rsp = data;
3317 u16 icid;
3318
3319 if (cmd_len != sizeof(*rsp))
3320 return -EPROTO;
3321
3322 icid = le16_to_cpu(rsp->icid);
3323
3324 BT_DBG("icid %d", icid);
3325
3326 return 0;
3327 }
3328
3329 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3330 u16 to_multiplier)
3331 {
3332 u16 max_latency;
3333
3334 if (min > max || min < 6 || max > 3200)
3335 return -EINVAL;
3336
3337 if (to_multiplier < 10 || to_multiplier > 3200)
3338 return -EINVAL;
3339
3340 if (max >= to_multiplier * 8)
3341 return -EINVAL;
3342
3343 max_latency = (to_multiplier * 8 / max) - 1;
3344 if (latency > 499 || latency > max_latency)
3345 return -EINVAL;
3346
3347 return 0;
3348 }
3349
3350 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3351 struct l2cap_cmd_hdr *cmd, u8 *data)
3352 {
3353 struct hci_conn *hcon = conn->hcon;
3354 struct l2cap_conn_param_update_req *req;
3355 struct l2cap_conn_param_update_rsp rsp;
3356 u16 min, max, latency, to_multiplier, cmd_len;
3357 int err;
3358
3359 if (!(hcon->link_mode & HCI_LM_MASTER))
3360 return -EINVAL;
3361
3362 cmd_len = __le16_to_cpu(cmd->len);
3363 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3364 return -EPROTO;
3365
3366 req = (struct l2cap_conn_param_update_req *) data;
3367 min = __le16_to_cpu(req->min);
3368 max = __le16_to_cpu(req->max);
3369 latency = __le16_to_cpu(req->latency);
3370 to_multiplier = __le16_to_cpu(req->to_multiplier);
3371
3372 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3373 min, max, latency, to_multiplier);
3374
3375 memset(&rsp, 0, sizeof(rsp));
3376
3377 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3378 if (err)
3379 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3380 else
3381 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3382
3383 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3384 sizeof(rsp), &rsp);
3385
3386 if (!err)
3387 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3388
3389 return 0;
3390 }
3391
3392 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3393 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3394 {
3395 int err = 0;
3396
3397 switch (cmd->code) {
3398 case L2CAP_COMMAND_REJ:
3399 l2cap_command_rej(conn, cmd, data);
3400 break;
3401
3402 case L2CAP_CONN_REQ:
3403 err = l2cap_connect_req(conn, cmd, data);
3404 break;
3405
3406 case L2CAP_CONN_RSP:
3407 err = l2cap_connect_rsp(conn, cmd, data);
3408 break;
3409
3410 case L2CAP_CONF_REQ:
3411 err = l2cap_config_req(conn, cmd, cmd_len, data);
3412 break;
3413
3414 case L2CAP_CONF_RSP:
3415 err = l2cap_config_rsp(conn, cmd, data);
3416 break;
3417
3418 case L2CAP_DISCONN_REQ:
3419 err = l2cap_disconnect_req(conn, cmd, data);
3420 break;
3421
3422 case L2CAP_DISCONN_RSP:
3423 err = l2cap_disconnect_rsp(conn, cmd, data);
3424 break;
3425
3426 case L2CAP_ECHO_REQ:
3427 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3428 break;
3429
3430 case L2CAP_ECHO_RSP:
3431 break;
3432
3433 case L2CAP_INFO_REQ:
3434 err = l2cap_information_req(conn, cmd, data);
3435 break;
3436
3437 case L2CAP_INFO_RSP:
3438 err = l2cap_information_rsp(conn, cmd, data);
3439 break;
3440
3441 case L2CAP_CREATE_CHAN_REQ:
3442 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3443 break;
3444
3445 case L2CAP_CREATE_CHAN_RSP:
3446 err = l2cap_create_channel_rsp(conn, cmd, data);
3447 break;
3448
3449 case L2CAP_MOVE_CHAN_REQ:
3450 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3451 break;
3452
3453 case L2CAP_MOVE_CHAN_RSP:
3454 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3455 break;
3456
3457 case L2CAP_MOVE_CHAN_CFM:
3458 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3459 break;
3460
3461 case L2CAP_MOVE_CHAN_CFM_RSP:
3462 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3463 break;
3464
3465 default:
3466 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3467 err = -EINVAL;
3468 break;
3469 }
3470
3471 return err;
3472 }
3473
3474 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3475 struct l2cap_cmd_hdr *cmd, u8 *data)
3476 {
3477 switch (cmd->code) {
3478 case L2CAP_COMMAND_REJ:
3479 return 0;
3480
3481 case L2CAP_CONN_PARAM_UPDATE_REQ:
3482 return l2cap_conn_param_update_req(conn, cmd, data);
3483
3484 case L2CAP_CONN_PARAM_UPDATE_RSP:
3485 return 0;
3486
3487 default:
3488 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3489 return -EINVAL;
3490 }
3491 }
3492
3493 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3494 struct sk_buff *skb)
3495 {
3496 u8 *data = skb->data;
3497 int len = skb->len;
3498 struct l2cap_cmd_hdr cmd;
3499 int err;
3500
3501 l2cap_raw_recv(conn, skb);
3502
3503 while (len >= L2CAP_CMD_HDR_SIZE) {
3504 u16 cmd_len;
3505 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3506 data += L2CAP_CMD_HDR_SIZE;
3507 len -= L2CAP_CMD_HDR_SIZE;
3508
3509 cmd_len = le16_to_cpu(cmd.len);
3510
3511 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3512
3513 if (cmd_len > len || !cmd.ident) {
3514 BT_DBG("corrupted command");
3515 break;
3516 }
3517
3518 if (conn->hcon->type == LE_LINK)
3519 err = l2cap_le_sig_cmd(conn, &cmd, data);
3520 else
3521 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3522
3523 if (err) {
3524 struct l2cap_cmd_rej_unk rej;
3525
3526 BT_ERR("Wrong link type (%d)", err);
3527
3528 /* FIXME: Map err to a valid reason */
3529 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3530 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3531 }
3532
3533 data += cmd_len;
3534 len -= cmd_len;
3535 }
3536
3537 kfree_skb(skb);
3538 }
3539
3540 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3541 {
3542 u16 our_fcs, rcv_fcs;
3543 int hdr_size;
3544
3545 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3546 hdr_size = L2CAP_EXT_HDR_SIZE;
3547 else
3548 hdr_size = L2CAP_ENH_HDR_SIZE;
3549
3550 if (chan->fcs == L2CAP_FCS_CRC16) {
3551 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3552 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3553 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3554
3555 if (our_fcs != rcv_fcs)
3556 return -EBADMSG;
3557 }
3558 return 0;
3559 }
3560
3561 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3562 {
3563 u32 control = 0;
3564
3565 chan->frames_sent = 0;
3566
3567 control |= __set_reqseq(chan, chan->buffer_seq);
3568
3569 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3570 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3571 l2cap_send_sframe(chan, control);
3572 set_bit(CONN_RNR_SENT, &chan->conn_state);
3573 }
3574
3575 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3576 l2cap_retransmit_frames(chan);
3577
3578 l2cap_ertm_send(chan);
3579
3580 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3581 chan->frames_sent == 0) {
3582 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3583 l2cap_send_sframe(chan, control);
3584 }
3585 }
3586
3587 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3588 {
3589 struct sk_buff *next_skb;
3590 int tx_seq_offset, next_tx_seq_offset;
3591
3592 bt_cb(skb)->tx_seq = tx_seq;
3593 bt_cb(skb)->sar = sar;
3594
3595 next_skb = skb_peek(&chan->srej_q);
3596
3597 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3598
3599 while (next_skb) {
3600 if (bt_cb(next_skb)->tx_seq == tx_seq)
3601 return -EINVAL;
3602
3603 next_tx_seq_offset = __seq_offset(chan,
3604 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3605
3606 if (next_tx_seq_offset > tx_seq_offset) {
3607 __skb_queue_before(&chan->srej_q, next_skb, skb);
3608 return 0;
3609 }
3610
3611 if (skb_queue_is_last(&chan->srej_q, next_skb))
3612 next_skb = NULL;
3613 else
3614 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3615 }
3616
3617 __skb_queue_tail(&chan->srej_q, skb);
3618
3619 return 0;
3620 }
3621
3622 static void append_skb_frag(struct sk_buff *skb,
3623 struct sk_buff *new_frag, struct sk_buff **last_frag)
3624 {
3625 /* skb->len reflects data in skb as well as all fragments
3626 * skb->data_len reflects only data in fragments
3627 */
3628 if (!skb_has_frag_list(skb))
3629 skb_shinfo(skb)->frag_list = new_frag;
3630
3631 new_frag->next = NULL;
3632
3633 (*last_frag)->next = new_frag;
3634 *last_frag = new_frag;
3635
3636 skb->len += new_frag->len;
3637 skb->data_len += new_frag->len;
3638 skb->truesize += new_frag->truesize;
3639 }
3640
3641 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3642 {
3643 int err = -EINVAL;
3644
3645 switch (__get_ctrl_sar(chan, control)) {
3646 case L2CAP_SAR_UNSEGMENTED:
3647 if (chan->sdu)
3648 break;
3649
3650 err = chan->ops->recv(chan->data, skb);
3651 break;
3652
3653 case L2CAP_SAR_START:
3654 if (chan->sdu)
3655 break;
3656
3657 chan->sdu_len = get_unaligned_le16(skb->data);
3658 skb_pull(skb, L2CAP_SDULEN_SIZE);
3659
3660 if (chan->sdu_len > chan->imtu) {
3661 err = -EMSGSIZE;
3662 break;
3663 }
3664
3665 if (skb->len >= chan->sdu_len)
3666 break;
3667
3668 chan->sdu = skb;
3669 chan->sdu_last_frag = skb;
3670
3671 skb = NULL;
3672 err = 0;
3673 break;
3674
3675 case L2CAP_SAR_CONTINUE:
3676 if (!chan->sdu)
3677 break;
3678
3679 append_skb_frag(chan->sdu, skb,
3680 &chan->sdu_last_frag);
3681 skb = NULL;
3682
3683 if (chan->sdu->len >= chan->sdu_len)
3684 break;
3685
3686 err = 0;
3687 break;
3688
3689 case L2CAP_SAR_END:
3690 if (!chan->sdu)
3691 break;
3692
3693 append_skb_frag(chan->sdu, skb,
3694 &chan->sdu_last_frag);
3695 skb = NULL;
3696
3697 if (chan->sdu->len != chan->sdu_len)
3698 break;
3699
3700 err = chan->ops->recv(chan->data, chan->sdu);
3701
3702 if (!err) {
3703 /* Reassembly complete */
3704 chan->sdu = NULL;
3705 chan->sdu_last_frag = NULL;
3706 chan->sdu_len = 0;
3707 }
3708 break;
3709 }
3710
3711 if (err) {
3712 kfree_skb(skb);
3713 kfree_skb(chan->sdu);
3714 chan->sdu = NULL;
3715 chan->sdu_last_frag = NULL;
3716 chan->sdu_len = 0;
3717 }
3718
3719 return err;
3720 }
3721
3722 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3723 {
3724 BT_DBG("chan %p, Enter local busy", chan);
3725
3726 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3727
3728 __set_ack_timer(chan);
3729 }
3730
3731 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3732 {
3733 u32 control;
3734
3735 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3736 goto done;
3737
3738 control = __set_reqseq(chan, chan->buffer_seq);
3739 control |= __set_ctrl_poll(chan);
3740 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3741 l2cap_send_sframe(chan, control);
3742 chan->retry_count = 1;
3743
3744 __clear_retrans_timer(chan);
3745 __set_monitor_timer(chan);
3746
3747 set_bit(CONN_WAIT_F, &chan->conn_state);
3748
3749 done:
3750 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3751 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3752
3753 BT_DBG("chan %p, Exit local busy", chan);
3754 }
3755
3756 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3757 {
3758 if (chan->mode == L2CAP_MODE_ERTM) {
3759 if (busy)
3760 l2cap_ertm_enter_local_busy(chan);
3761 else
3762 l2cap_ertm_exit_local_busy(chan);
3763 }
3764 }
3765
3766 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3767 {
3768 struct sk_buff *skb;
3769 u32 control;
3770
3771 while ((skb = skb_peek(&chan->srej_q)) &&
3772 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3773 int err;
3774
3775 if (bt_cb(skb)->tx_seq != tx_seq)
3776 break;
3777
3778 skb = skb_dequeue(&chan->srej_q);
3779 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3780 err = l2cap_reassemble_sdu(chan, skb, control);
3781
3782 if (err < 0) {
3783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3784 break;
3785 }
3786
3787 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3788 tx_seq = __next_seq(chan, tx_seq);
3789 }
3790 }
3791
3792 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3793 {
3794 struct srej_list *l, *tmp;
3795 u32 control;
3796
3797 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3798 if (l->tx_seq == tx_seq) {
3799 list_del(&l->list);
3800 kfree(l);
3801 return;
3802 }
3803 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3804 control |= __set_reqseq(chan, l->tx_seq);
3805 l2cap_send_sframe(chan, control);
3806 list_del(&l->list);
3807 list_add_tail(&l->list, &chan->srej_l);
3808 }
3809 }
3810
3811 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3812 {
3813 struct srej_list *new;
3814 u32 control;
3815
3816 while (tx_seq != chan->expected_tx_seq) {
3817 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3818 control |= __set_reqseq(chan, chan->expected_tx_seq);
3819 l2cap_send_sframe(chan, control);
3820
3821 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3822 if (!new)
3823 return -ENOMEM;
3824
3825 new->tx_seq = chan->expected_tx_seq;
3826
3827 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3828
3829 list_add_tail(&new->list, &chan->srej_l);
3830 }
3831
3832 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3833
3834 return 0;
3835 }
3836
3837 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3838 {
3839 u16 tx_seq = __get_txseq(chan, rx_control);
3840 u16 req_seq = __get_reqseq(chan, rx_control);
3841 u8 sar = __get_ctrl_sar(chan, rx_control);
3842 int tx_seq_offset, expected_tx_seq_offset;
3843 int num_to_ack = (chan->tx_win/6) + 1;
3844 int err = 0;
3845
3846 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3847 tx_seq, rx_control);
3848
3849 if (__is_ctrl_final(chan, rx_control) &&
3850 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3851 __clear_monitor_timer(chan);
3852 if (chan->unacked_frames > 0)
3853 __set_retrans_timer(chan);
3854 clear_bit(CONN_WAIT_F, &chan->conn_state);
3855 }
3856
3857 chan->expected_ack_seq = req_seq;
3858 l2cap_drop_acked_frames(chan);
3859
3860 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3861
3862 /* invalid tx_seq */
3863 if (tx_seq_offset >= chan->tx_win) {
3864 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3865 goto drop;
3866 }
3867
3868 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3869 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3870 l2cap_send_ack(chan);
3871 goto drop;
3872 }
3873
3874 if (tx_seq == chan->expected_tx_seq)
3875 goto expected;
3876
3877 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3878 struct srej_list *first;
3879
3880 first = list_first_entry(&chan->srej_l,
3881 struct srej_list, list);
3882 if (tx_seq == first->tx_seq) {
3883 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3884 l2cap_check_srej_gap(chan, tx_seq);
3885
3886 list_del(&first->list);
3887 kfree(first);
3888
3889 if (list_empty(&chan->srej_l)) {
3890 chan->buffer_seq = chan->buffer_seq_srej;
3891 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3892 l2cap_send_ack(chan);
3893 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3894 }
3895 } else {
3896 struct srej_list *l;
3897
3898 /* duplicated tx_seq */
3899 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3900 goto drop;
3901
3902 list_for_each_entry(l, &chan->srej_l, list) {
3903 if (l->tx_seq == tx_seq) {
3904 l2cap_resend_srejframe(chan, tx_seq);
3905 return 0;
3906 }
3907 }
3908
3909 err = l2cap_send_srejframe(chan, tx_seq);
3910 if (err < 0) {
3911 l2cap_send_disconn_req(chan->conn, chan, -err);
3912 return err;
3913 }
3914 }
3915 } else {
3916 expected_tx_seq_offset = __seq_offset(chan,
3917 chan->expected_tx_seq, chan->buffer_seq);
3918
3919 /* duplicated tx_seq */
3920 if (tx_seq_offset < expected_tx_seq_offset)
3921 goto drop;
3922
3923 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3924
3925 BT_DBG("chan %p, Enter SREJ", chan);
3926
3927 INIT_LIST_HEAD(&chan->srej_l);
3928 chan->buffer_seq_srej = chan->buffer_seq;
3929
3930 __skb_queue_head_init(&chan->srej_q);
3931 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3932
3933 /* Set P-bit only if there are some I-frames to ack. */
3934 if (__clear_ack_timer(chan))
3935 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3936
3937 err = l2cap_send_srejframe(chan, tx_seq);
3938 if (err < 0) {
3939 l2cap_send_disconn_req(chan->conn, chan, -err);
3940 return err;
3941 }
3942 }
3943 return 0;
3944
3945 expected:
3946 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3947
3948 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3949 bt_cb(skb)->tx_seq = tx_seq;
3950 bt_cb(skb)->sar = sar;
3951 __skb_queue_tail(&chan->srej_q, skb);
3952 return 0;
3953 }
3954
3955 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3956 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3957
3958 if (err < 0) {
3959 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3960 return err;
3961 }
3962
3963 if (__is_ctrl_final(chan, rx_control)) {
3964 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3965 l2cap_retransmit_frames(chan);
3966 }
3967
3968
3969 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3970 if (chan->num_acked == num_to_ack - 1)
3971 l2cap_send_ack(chan);
3972 else
3973 __set_ack_timer(chan);
3974
3975 return 0;
3976
3977 drop:
3978 kfree_skb(skb);
3979 return 0;
3980 }
3981
3982 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3983 {
3984 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3985 __get_reqseq(chan, rx_control), rx_control);
3986
3987 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3988 l2cap_drop_acked_frames(chan);
3989
3990 if (__is_ctrl_poll(chan, rx_control)) {
3991 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3992 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3993 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3994 (chan->unacked_frames > 0))
3995 __set_retrans_timer(chan);
3996
3997 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3998 l2cap_send_srejtail(chan);
3999 } else {
4000 l2cap_send_i_or_rr_or_rnr(chan);
4001 }
4002
4003 } else if (__is_ctrl_final(chan, rx_control)) {
4004 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4005
4006 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4007 l2cap_retransmit_frames(chan);
4008
4009 } else {
4010 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4011 (chan->unacked_frames > 0))
4012 __set_retrans_timer(chan);
4013
4014 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4015 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4016 l2cap_send_ack(chan);
4017 else
4018 l2cap_ertm_send(chan);
4019 }
4020 }
4021
4022 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4023 {
4024 u16 tx_seq = __get_reqseq(chan, rx_control);
4025
4026 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4027
4028 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4029
4030 chan->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(chan);
4032
4033 if (__is_ctrl_final(chan, rx_control)) {
4034 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4035 l2cap_retransmit_frames(chan);
4036 } else {
4037 l2cap_retransmit_frames(chan);
4038
4039 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4040 set_bit(CONN_REJ_ACT, &chan->conn_state);
4041 }
4042 }
4043 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4044 {
4045 u16 tx_seq = __get_reqseq(chan, rx_control);
4046
4047 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4048
4049 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4050
4051 if (__is_ctrl_poll(chan, rx_control)) {
4052 chan->expected_ack_seq = tx_seq;
4053 l2cap_drop_acked_frames(chan);
4054
4055 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4056 l2cap_retransmit_one_frame(chan, tx_seq);
4057
4058 l2cap_ertm_send(chan);
4059
4060 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4061 chan->srej_save_reqseq = tx_seq;
4062 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4063 }
4064 } else if (__is_ctrl_final(chan, rx_control)) {
4065 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4066 chan->srej_save_reqseq == tx_seq)
4067 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4068 else
4069 l2cap_retransmit_one_frame(chan, tx_seq);
4070 } else {
4071 l2cap_retransmit_one_frame(chan, tx_seq);
4072 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4073 chan->srej_save_reqseq = tx_seq;
4074 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4075 }
4076 }
4077 }
4078
4079 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4080 {
4081 u16 tx_seq = __get_reqseq(chan, rx_control);
4082
4083 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4084
4085 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4086 chan->expected_ack_seq = tx_seq;
4087 l2cap_drop_acked_frames(chan);
4088
4089 if (__is_ctrl_poll(chan, rx_control))
4090 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4091
4092 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4093 __clear_retrans_timer(chan);
4094 if (__is_ctrl_poll(chan, rx_control))
4095 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4096 return;
4097 }
4098
4099 if (__is_ctrl_poll(chan, rx_control)) {
4100 l2cap_send_srejtail(chan);
4101 } else {
4102 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4103 l2cap_send_sframe(chan, rx_control);
4104 }
4105 }
4106
4107 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4108 {
4109 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4110
4111 if (__is_ctrl_final(chan, rx_control) &&
4112 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4113 __clear_monitor_timer(chan);
4114 if (chan->unacked_frames > 0)
4115 __set_retrans_timer(chan);
4116 clear_bit(CONN_WAIT_F, &chan->conn_state);
4117 }
4118
4119 switch (__get_ctrl_super(chan, rx_control)) {
4120 case L2CAP_SUPER_RR:
4121 l2cap_data_channel_rrframe(chan, rx_control);
4122 break;
4123
4124 case L2CAP_SUPER_REJ:
4125 l2cap_data_channel_rejframe(chan, rx_control);
4126 break;
4127
4128 case L2CAP_SUPER_SREJ:
4129 l2cap_data_channel_srejframe(chan, rx_control);
4130 break;
4131
4132 case L2CAP_SUPER_RNR:
4133 l2cap_data_channel_rnrframe(chan, rx_control);
4134 break;
4135 }
4136
4137 kfree_skb(skb);
4138 return 0;
4139 }
4140
4141 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4142 {
4143 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4144 u32 control;
4145 u16 req_seq;
4146 int len, next_tx_seq_offset, req_seq_offset;
4147
4148 control = __get_control(chan, skb->data);
4149 skb_pull(skb, __ctrl_size(chan));
4150 len = skb->len;
4151
4152 /*
4153 * We can just drop the corrupted I-frame here.
4154 * Receiver will miss it and start proper recovery
4155 * procedures and ask retransmission.
4156 */
4157 if (l2cap_check_fcs(chan, skb))
4158 goto drop;
4159
4160 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4161 len -= L2CAP_SDULEN_SIZE;
4162
4163 if (chan->fcs == L2CAP_FCS_CRC16)
4164 len -= L2CAP_FCS_SIZE;
4165
4166 if (len > chan->mps) {
4167 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4168 goto drop;
4169 }
4170
4171 req_seq = __get_reqseq(chan, control);
4172
4173 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4174
4175 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4176 chan->expected_ack_seq);
4177
4178 /* check for invalid req-seq */
4179 if (req_seq_offset > next_tx_seq_offset) {
4180 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4181 goto drop;
4182 }
4183
4184 if (!__is_sframe(chan, control)) {
4185 if (len < 0) {
4186 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4187 goto drop;
4188 }
4189
4190 l2cap_data_channel_iframe(chan, control, skb);
4191 } else {
4192 if (len != 0) {
4193 BT_ERR("%d", len);
4194 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4195 goto drop;
4196 }
4197
4198 l2cap_data_channel_sframe(chan, control, skb);
4199 }
4200
4201 return 0;
4202
4203 drop:
4204 kfree_skb(skb);
4205 return 0;
4206 }
4207
4208 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4209 {
4210 struct l2cap_chan *chan;
4211 struct sock *sk = NULL;
4212 u32 control;
4213 u16 tx_seq;
4214 int len;
4215
4216 chan = l2cap_get_chan_by_scid(conn, cid);
4217 if (!chan) {
4218 BT_DBG("unknown cid 0x%4.4x", cid);
4219 goto drop;
4220 }
4221
4222 sk = chan->sk;
4223
4224 BT_DBG("chan %p, len %d", chan, skb->len);
4225
4226 if (chan->state != BT_CONNECTED)
4227 goto drop;
4228
4229 switch (chan->mode) {
4230 case L2CAP_MODE_BASIC:
4231 /* If socket recv buffers overflows we drop data here
4232 * which is *bad* because L2CAP has to be reliable.
4233 * But we don't have any other choice. L2CAP doesn't
4234 * provide flow control mechanism. */
4235
4236 if (chan->imtu < skb->len)
4237 goto drop;
4238
4239 if (!chan->ops->recv(chan->data, skb))
4240 goto done;
4241 break;
4242
4243 case L2CAP_MODE_ERTM:
4244 l2cap_ertm_data_rcv(sk, skb);
4245
4246 goto done;
4247
4248 case L2CAP_MODE_STREAMING:
4249 control = __get_control(chan, skb->data);
4250 skb_pull(skb, __ctrl_size(chan));
4251 len = skb->len;
4252
4253 if (l2cap_check_fcs(chan, skb))
4254 goto drop;
4255
4256 if (__is_sar_start(chan, control))
4257 len -= L2CAP_SDULEN_SIZE;
4258
4259 if (chan->fcs == L2CAP_FCS_CRC16)
4260 len -= L2CAP_FCS_SIZE;
4261
4262 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4263 goto drop;
4264
4265 tx_seq = __get_txseq(chan, control);
4266
4267 if (chan->expected_tx_seq != tx_seq) {
4268 /* Frame(s) missing - must discard partial SDU */
4269 kfree_skb(chan->sdu);
4270 chan->sdu = NULL;
4271 chan->sdu_last_frag = NULL;
4272 chan->sdu_len = 0;
4273
4274 /* TODO: Notify userland of missing data */
4275 }
4276
4277 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4278
4279 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4281
4282 goto done;
4283
4284 default:
4285 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4286 break;
4287 }
4288
4289 drop:
4290 kfree_skb(skb);
4291
4292 done:
4293 if (sk)
4294 release_sock(sk);
4295
4296 return 0;
4297 }
4298
4299 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4300 {
4301 struct sock *sk = NULL;
4302 struct l2cap_chan *chan;
4303
4304 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4305 if (!chan)
4306 goto drop;
4307
4308 sk = chan->sk;
4309
4310 lock_sock(sk);
4311
4312 BT_DBG("sk %p, len %d", sk, skb->len);
4313
4314 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4315 goto drop;
4316
4317 if (chan->imtu < skb->len)
4318 goto drop;
4319
4320 if (!chan->ops->recv(chan->data, skb))
4321 goto done;
4322
4323 drop:
4324 kfree_skb(skb);
4325
4326 done:
4327 if (sk)
4328 release_sock(sk);
4329 return 0;
4330 }
4331
4332 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4333 {
4334 struct sock *sk = NULL;
4335 struct l2cap_chan *chan;
4336
4337 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4338 if (!chan)
4339 goto drop;
4340
4341 sk = chan->sk;
4342
4343 lock_sock(sk);
4344
4345 BT_DBG("sk %p, len %d", sk, skb->len);
4346
4347 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4348 goto drop;
4349
4350 if (chan->imtu < skb->len)
4351 goto drop;
4352
4353 if (!chan->ops->recv(chan->data, skb))
4354 goto done;
4355
4356 drop:
4357 kfree_skb(skb);
4358
4359 done:
4360 if (sk)
4361 release_sock(sk);
4362 return 0;
4363 }
4364
4365 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4366 {
4367 struct l2cap_hdr *lh = (void *) skb->data;
4368 u16 cid, len;
4369 __le16 psm;
4370
4371 skb_pull(skb, L2CAP_HDR_SIZE);
4372 cid = __le16_to_cpu(lh->cid);
4373 len = __le16_to_cpu(lh->len);
4374
4375 if (len != skb->len) {
4376 kfree_skb(skb);
4377 return;
4378 }
4379
4380 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4381
4382 switch (cid) {
4383 case L2CAP_CID_LE_SIGNALING:
4384 case L2CAP_CID_SIGNALING:
4385 l2cap_sig_channel(conn, skb);
4386 break;
4387
4388 case L2CAP_CID_CONN_LESS:
4389 psm = get_unaligned_le16(skb->data);
4390 skb_pull(skb, 2);
4391 l2cap_conless_channel(conn, psm, skb);
4392 break;
4393
4394 case L2CAP_CID_LE_DATA:
4395 l2cap_att_channel(conn, cid, skb);
4396 break;
4397
4398 case L2CAP_CID_SMP:
4399 if (smp_sig_channel(conn, skb))
4400 l2cap_conn_del(conn->hcon, EACCES);
4401 break;
4402
4403 default:
4404 l2cap_data_channel(conn, cid, skb);
4405 break;
4406 }
4407 }
4408
4409 /* ---- L2CAP interface with lower layer (HCI) ---- */
4410
4411 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4412 {
4413 int exact = 0, lm1 = 0, lm2 = 0;
4414 struct l2cap_chan *c;
4415
4416 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4417
4418 /* Find listening sockets and check their link_mode */
4419 read_lock(&chan_list_lock);
4420 list_for_each_entry(c, &chan_list, global_l) {
4421 struct sock *sk = c->sk;
4422
4423 if (c->state != BT_LISTEN)
4424 continue;
4425
4426 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4427 lm1 |= HCI_LM_ACCEPT;
4428 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4429 lm1 |= HCI_LM_MASTER;
4430 exact++;
4431 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4432 lm2 |= HCI_LM_ACCEPT;
4433 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4434 lm2 |= HCI_LM_MASTER;
4435 }
4436 }
4437 read_unlock(&chan_list_lock);
4438
4439 return exact ? lm1 : lm2;
4440 }
4441
4442 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4443 {
4444 struct l2cap_conn *conn;
4445
4446 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4447
4448 if (!status) {
4449 conn = l2cap_conn_add(hcon, status);
4450 if (conn)
4451 l2cap_conn_ready(conn);
4452 } else
4453 l2cap_conn_del(hcon, bt_to_errno(status));
4454
4455 return 0;
4456 }
4457
4458 int l2cap_disconn_ind(struct hci_conn *hcon)
4459 {
4460 struct l2cap_conn *conn = hcon->l2cap_data;
4461
4462 BT_DBG("hcon %p", hcon);
4463
4464 if (!conn)
4465 return HCI_ERROR_REMOTE_USER_TERM;
4466 return conn->disc_reason;
4467 }
4468
4469 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4470 {
4471 BT_DBG("hcon %p reason %d", hcon, reason);
4472
4473 l2cap_conn_del(hcon, bt_to_errno(reason));
4474 return 0;
4475 }
4476
4477 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4478 {
4479 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4480 return;
4481
4482 if (encrypt == 0x00) {
4483 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4484 __clear_chan_timer(chan);
4485 __set_chan_timer(chan,
4486 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4487 } else if (chan->sec_level == BT_SECURITY_HIGH)
4488 l2cap_chan_close(chan, ECONNREFUSED);
4489 } else {
4490 if (chan->sec_level == BT_SECURITY_MEDIUM)
4491 __clear_chan_timer(chan);
4492 }
4493 }
4494
4495 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4496 {
4497 struct l2cap_conn *conn = hcon->l2cap_data;
4498 struct l2cap_chan *chan;
4499
4500 if (!conn)
4501 return 0;
4502
4503 BT_DBG("conn %p", conn);
4504
4505 if (hcon->type == LE_LINK) {
4506 smp_distribute_keys(conn, 0);
4507 __cancel_delayed_work(&conn->security_timer);
4508 }
4509
4510 rcu_read_lock();
4511
4512 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4513 struct sock *sk = chan->sk;
4514
4515 bh_lock_sock(sk);
4516
4517 BT_DBG("chan->scid %d", chan->scid);
4518
4519 if (chan->scid == L2CAP_CID_LE_DATA) {
4520 if (!status && encrypt) {
4521 chan->sec_level = hcon->sec_level;
4522 l2cap_chan_ready(sk);
4523 }
4524
4525 bh_unlock_sock(sk);
4526 continue;
4527 }
4528
4529 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4530 bh_unlock_sock(sk);
4531 continue;
4532 }
4533
4534 if (!status && (chan->state == BT_CONNECTED ||
4535 chan->state == BT_CONFIG)) {
4536 l2cap_check_encryption(chan, encrypt);
4537 bh_unlock_sock(sk);
4538 continue;
4539 }
4540
4541 if (chan->state == BT_CONNECT) {
4542 if (!status) {
4543 struct l2cap_conn_req req;
4544 req.scid = cpu_to_le16(chan->scid);
4545 req.psm = chan->psm;
4546
4547 chan->ident = l2cap_get_ident(conn);
4548 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4549
4550 l2cap_send_cmd(conn, chan->ident,
4551 L2CAP_CONN_REQ, sizeof(req), &req);
4552 } else {
4553 __clear_chan_timer(chan);
4554 __set_chan_timer(chan,
4555 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4556 }
4557 } else if (chan->state == BT_CONNECT2) {
4558 struct l2cap_conn_rsp rsp;
4559 __u16 res, stat;
4560
4561 if (!status) {
4562 if (bt_sk(sk)->defer_setup) {
4563 struct sock *parent = bt_sk(sk)->parent;
4564 res = L2CAP_CR_PEND;
4565 stat = L2CAP_CS_AUTHOR_PEND;
4566 if (parent)
4567 parent->sk_data_ready(parent, 0);
4568 } else {
4569 l2cap_state_change(chan, BT_CONFIG);
4570 res = L2CAP_CR_SUCCESS;
4571 stat = L2CAP_CS_NO_INFO;
4572 }
4573 } else {
4574 l2cap_state_change(chan, BT_DISCONN);
4575 __set_chan_timer(chan,
4576 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4577 res = L2CAP_CR_SEC_BLOCK;
4578 stat = L2CAP_CS_NO_INFO;
4579 }
4580
4581 rsp.scid = cpu_to_le16(chan->dcid);
4582 rsp.dcid = cpu_to_le16(chan->scid);
4583 rsp.result = cpu_to_le16(res);
4584 rsp.status = cpu_to_le16(stat);
4585 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4586 sizeof(rsp), &rsp);
4587 }
4588
4589 bh_unlock_sock(sk);
4590 }
4591
4592 rcu_read_unlock();
4593
4594 return 0;
4595 }
4596
4597 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4598 {
4599 struct l2cap_conn *conn = hcon->l2cap_data;
4600
4601 if (!conn)
4602 conn = l2cap_conn_add(hcon, 0);
4603
4604 if (!conn)
4605 goto drop;
4606
4607 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4608
4609 if (!(flags & ACL_CONT)) {
4610 struct l2cap_hdr *hdr;
4611 struct l2cap_chan *chan;
4612 u16 cid;
4613 int len;
4614
4615 if (conn->rx_len) {
4616 BT_ERR("Unexpected start frame (len %d)", skb->len);
4617 kfree_skb(conn->rx_skb);
4618 conn->rx_skb = NULL;
4619 conn->rx_len = 0;
4620 l2cap_conn_unreliable(conn, ECOMM);
4621 }
4622
4623 /* Start fragment always begin with Basic L2CAP header */
4624 if (skb->len < L2CAP_HDR_SIZE) {
4625 BT_ERR("Frame is too short (len %d)", skb->len);
4626 l2cap_conn_unreliable(conn, ECOMM);
4627 goto drop;
4628 }
4629
4630 hdr = (struct l2cap_hdr *) skb->data;
4631 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4632 cid = __le16_to_cpu(hdr->cid);
4633
4634 if (len == skb->len) {
4635 /* Complete frame received */
4636 l2cap_recv_frame(conn, skb);
4637 return 0;
4638 }
4639
4640 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4641
4642 if (skb->len > len) {
4643 BT_ERR("Frame is too long (len %d, expected len %d)",
4644 skb->len, len);
4645 l2cap_conn_unreliable(conn, ECOMM);
4646 goto drop;
4647 }
4648
4649 chan = l2cap_get_chan_by_scid(conn, cid);
4650
4651 if (chan && chan->sk) {
4652 struct sock *sk = chan->sk;
4653
4654 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4655 BT_ERR("Frame exceeding recv MTU (len %d, "
4656 "MTU %d)", len,
4657 chan->imtu);
4658 release_sock(sk);
4659 l2cap_conn_unreliable(conn, ECOMM);
4660 goto drop;
4661 }
4662 release_sock(sk);
4663 }
4664
4665 /* Allocate skb for the complete frame (with header) */
4666 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4667 if (!conn->rx_skb)
4668 goto drop;
4669
4670 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4671 skb->len);
4672 conn->rx_len = len - skb->len;
4673 } else {
4674 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4675
4676 if (!conn->rx_len) {
4677 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4678 l2cap_conn_unreliable(conn, ECOMM);
4679 goto drop;
4680 }
4681
4682 if (skb->len > conn->rx_len) {
4683 BT_ERR("Fragment is too long (len %d, expected %d)",
4684 skb->len, conn->rx_len);
4685 kfree_skb(conn->rx_skb);
4686 conn->rx_skb = NULL;
4687 conn->rx_len = 0;
4688 l2cap_conn_unreliable(conn, ECOMM);
4689 goto drop;
4690 }
4691
4692 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4693 skb->len);
4694 conn->rx_len -= skb->len;
4695
4696 if (!conn->rx_len) {
4697 /* Complete frame received */
4698 l2cap_recv_frame(conn, conn->rx_skb);
4699 conn->rx_skb = NULL;
4700 }
4701 }
4702
4703 drop:
4704 kfree_skb(skb);
4705 return 0;
4706 }
4707
4708 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4709 {
4710 struct l2cap_chan *c;
4711
4712 read_lock(&chan_list_lock);
4713
4714 list_for_each_entry(c, &chan_list, global_l) {
4715 struct sock *sk = c->sk;
4716
4717 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4718 batostr(&bt_sk(sk)->src),
4719 batostr(&bt_sk(sk)->dst),
4720 c->state, __le16_to_cpu(c->psm),
4721 c->scid, c->dcid, c->imtu, c->omtu,
4722 c->sec_level, c->mode);
4723 }
4724
4725 read_unlock(&chan_list_lock);
4726
4727 return 0;
4728 }
4729
4730 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4731 {
4732 return single_open(file, l2cap_debugfs_show, inode->i_private);
4733 }
4734
4735 static const struct file_operations l2cap_debugfs_fops = {
4736 .open = l2cap_debugfs_open,
4737 .read = seq_read,
4738 .llseek = seq_lseek,
4739 .release = single_release,
4740 };
4741
4742 static struct dentry *l2cap_debugfs;
4743
4744 int __init l2cap_init(void)
4745 {
4746 int err;
4747
4748 err = l2cap_init_sockets();
4749 if (err < 0)
4750 return err;
4751
4752 if (bt_debugfs) {
4753 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4754 bt_debugfs, NULL, &l2cap_debugfs_fops);
4755 if (!l2cap_debugfs)
4756 BT_ERR("Failed to create L2CAP debug file");
4757 }
4758
4759 return 0;
4760 }
4761
4762 void l2cap_exit(void)
4763 {
4764 debugfs_remove(l2cap_debugfs);
4765 l2cap_cleanup_sockets();
4766 }
4767
4768 module_param(disable_ertm, bool, 0644);
4769 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");