]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/l2cap_core.c
Bluetooth: move l2cap_sock_bind()/listen() to l2cap_sock.c
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static struct workqueue_struct *_busy_wq;
66
67 struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void l2cap_sock_close(struct sock *sk);
74
75 static int l2cap_build_conf_req(struct sock *sk, void *data);
76 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
77 u8 code, u8 ident, u16 dlen, void *data);
78
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80
81 /* ---- L2CAP timers ---- */
82 void l2cap_sock_set_timer(struct sock *sk, long timeout)
83 {
84 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
85 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
86 }
87
88 static void l2cap_sock_clear_timer(struct sock *sk)
89 {
90 BT_DBG("sock %p state %d", sk, sk->sk_state);
91 sk_stop_timer(sk, &sk->sk_timer);
92 }
93
94 /* ---- L2CAP channels ---- */
95 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
96 {
97 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
101 }
102 return s;
103 }
104
105 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
106 {
107 struct sock *s;
108 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
109 if (l2cap_pi(s)->scid == cid)
110 break;
111 }
112 return s;
113 }
114
115 /* Find channel with given SCID.
116 * Returns locked socket */
117 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
118 {
119 struct sock *s;
120 read_lock(&l->lock);
121 s = __l2cap_get_chan_by_scid(l, cid);
122 if (s)
123 bh_lock_sock(s);
124 read_unlock(&l->lock);
125 return s;
126 }
127
128 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
129 {
130 struct sock *s;
131 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
132 if (l2cap_pi(s)->ident == ident)
133 break;
134 }
135 return s;
136 }
137
138 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
139 {
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_ident(l, ident);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
147 }
148
149 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
150 {
151 u16 cid = L2CAP_CID_DYN_START;
152
153 for (; cid < L2CAP_CID_DYN_END; cid++) {
154 if (!__l2cap_get_chan_by_scid(l, cid))
155 return cid;
156 }
157
158 return 0;
159 }
160
161 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
162 {
163 sock_hold(sk);
164
165 if (l->head)
166 l2cap_pi(l->head)->prev_c = sk;
167
168 l2cap_pi(sk)->next_c = l->head;
169 l2cap_pi(sk)->prev_c = NULL;
170 l->head = sk;
171 }
172
173 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
174 {
175 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
176
177 write_lock_bh(&l->lock);
178 if (sk == l->head)
179 l->head = next;
180
181 if (next)
182 l2cap_pi(next)->prev_c = prev;
183 if (prev)
184 l2cap_pi(prev)->next_c = next;
185 write_unlock_bh(&l->lock);
186
187 __sock_put(sk);
188 }
189
190 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
191 {
192 struct l2cap_chan_list *l = &conn->chan_list;
193
194 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
195 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
196
197 conn->disc_reason = 0x13;
198
199 l2cap_pi(sk)->conn = conn;
200
201 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
202 /* Alloc CID for connection-oriented socket */
203 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
204 } else if (sk->sk_type == SOCK_DGRAM) {
205 /* Connectionless socket */
206 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
207 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
208 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
209 } else {
210 /* Raw socket can send/recv signalling messages only */
211 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
212 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
213 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
214 }
215
216 __l2cap_chan_link(l, sk);
217
218 if (parent)
219 bt_accept_enqueue(parent, sk);
220 }
221
222 /* Delete channel.
223 * Must be called on the locked socket. */
224 static void l2cap_chan_del(struct sock *sk, int err)
225 {
226 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
227 struct sock *parent = bt_sk(sk)->parent;
228
229 l2cap_sock_clear_timer(sk);
230
231 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
232
233 if (conn) {
234 /* Unlink from channel list */
235 l2cap_chan_unlink(&conn->chan_list, sk);
236 l2cap_pi(sk)->conn = NULL;
237 hci_conn_put(conn->hcon);
238 }
239
240 sk->sk_state = BT_CLOSED;
241 sock_set_flag(sk, SOCK_ZAPPED);
242
243 if (err)
244 sk->sk_err = err;
245
246 if (parent) {
247 bt_accept_unlink(sk);
248 parent->sk_data_ready(parent, 0);
249 } else
250 sk->sk_state_change(sk);
251
252 skb_queue_purge(TX_QUEUE(sk));
253
254 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
255 struct srej_list *l, *tmp;
256
257 del_timer(&l2cap_pi(sk)->retrans_timer);
258 del_timer(&l2cap_pi(sk)->monitor_timer);
259 del_timer(&l2cap_pi(sk)->ack_timer);
260
261 skb_queue_purge(SREJ_QUEUE(sk));
262 skb_queue_purge(BUSY_QUEUE(sk));
263
264 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
265 list_del(&l->list);
266 kfree(l);
267 }
268 }
269 }
270
271 static inline u8 l2cap_get_auth_type(struct sock *sk)
272 {
273 if (sk->sk_type == SOCK_RAW) {
274 switch (l2cap_pi(sk)->sec_level) {
275 case BT_SECURITY_HIGH:
276 return HCI_AT_DEDICATED_BONDING_MITM;
277 case BT_SECURITY_MEDIUM:
278 return HCI_AT_DEDICATED_BONDING;
279 default:
280 return HCI_AT_NO_BONDING;
281 }
282 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 return HCI_AT_NO_BONDING_MITM;
288 else
289 return HCI_AT_NO_BONDING;
290 } else {
291 switch (l2cap_pi(sk)->sec_level) {
292 case BT_SECURITY_HIGH:
293 return HCI_AT_GENERAL_BONDING_MITM;
294 case BT_SECURITY_MEDIUM:
295 return HCI_AT_GENERAL_BONDING;
296 default:
297 return HCI_AT_NO_BONDING;
298 }
299 }
300 }
301
302 /* Service level security */
303 static inline int l2cap_check_security(struct sock *sk)
304 {
305 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
306 __u8 auth_type;
307
308 auth_type = l2cap_get_auth_type(sk);
309
310 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
311 auth_type);
312 }
313
314 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
315 {
316 u8 id;
317
318 /* Get next available identificator.
319 * 1 - 128 are used by kernel.
320 * 129 - 199 are reserved.
321 * 200 - 254 are used by utilities like l2ping, etc.
322 */
323
324 spin_lock_bh(&conn->lock);
325
326 if (++conn->tx_ident > 128)
327 conn->tx_ident = 1;
328
329 id = conn->tx_ident;
330
331 spin_unlock_bh(&conn->lock);
332
333 return id;
334 }
335
336 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
337 {
338 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
339 u8 flags;
340
341 BT_DBG("code 0x%2.2x", code);
342
343 if (!skb)
344 return;
345
346 if (lmp_no_flush_capable(conn->hcon->hdev))
347 flags = ACL_START_NO_FLUSH;
348 else
349 flags = ACL_START;
350
351 hci_send_acl(conn->hcon, skb, flags);
352 }
353
354 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
355 {
356 struct sk_buff *skb;
357 struct l2cap_hdr *lh;
358 struct l2cap_conn *conn = pi->conn;
359 struct sock *sk = (struct sock *)pi;
360 int count, hlen = L2CAP_HDR_SIZE + 2;
361 u8 flags;
362
363 if (sk->sk_state != BT_CONNECTED)
364 return;
365
366 if (pi->fcs == L2CAP_FCS_CRC16)
367 hlen += 2;
368
369 BT_DBG("pi %p, control 0x%2.2x", pi, control);
370
371 count = min_t(unsigned int, conn->mtu, hlen);
372 control |= L2CAP_CTRL_FRAME_TYPE;
373
374 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
375 control |= L2CAP_CTRL_FINAL;
376 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
377 }
378
379 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
380 control |= L2CAP_CTRL_POLL;
381 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
382 }
383
384 skb = bt_skb_alloc(count, GFP_ATOMIC);
385 if (!skb)
386 return;
387
388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
389 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
390 lh->cid = cpu_to_le16(pi->dcid);
391 put_unaligned_le16(control, skb_put(skb, 2));
392
393 if (pi->fcs == L2CAP_FCS_CRC16) {
394 u16 fcs = crc16(0, (u8 *)lh, count - 2);
395 put_unaligned_le16(fcs, skb_put(skb, 2));
396 }
397
398 if (lmp_no_flush_capable(conn->hcon->hdev))
399 flags = ACL_START_NO_FLUSH;
400 else
401 flags = ACL_START;
402
403 hci_send_acl(pi->conn->hcon, skb, flags);
404 }
405
406 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
407 {
408 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
409 control |= L2CAP_SUPER_RCV_NOT_READY;
410 pi->conn_state |= L2CAP_CONN_RNR_SENT;
411 } else
412 control |= L2CAP_SUPER_RCV_READY;
413
414 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
415
416 l2cap_send_sframe(pi, control);
417 }
418
419 static inline int __l2cap_no_conn_pending(struct sock *sk)
420 {
421 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
422 }
423
424 static void l2cap_do_start(struct sock *sk)
425 {
426 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
427
428 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
429 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
430 return;
431
432 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
433 struct l2cap_conn_req req;
434 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
435 req.psm = l2cap_pi(sk)->psm;
436
437 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
438 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
439
440 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
441 L2CAP_CONN_REQ, sizeof(req), &req);
442 }
443 } else {
444 struct l2cap_info_req req;
445 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
446
447 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
448 conn->info_ident = l2cap_get_ident(conn);
449
450 mod_timer(&conn->info_timer, jiffies +
451 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
452
453 l2cap_send_cmd(conn, conn->info_ident,
454 L2CAP_INFO_REQ, sizeof(req), &req);
455 }
456 }
457
458 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
459 {
460 u32 local_feat_mask = l2cap_feat_mask;
461 if (!disable_ertm)
462 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
463
464 switch (mode) {
465 case L2CAP_MODE_ERTM:
466 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
467 case L2CAP_MODE_STREAMING:
468 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
469 default:
470 return 0x00;
471 }
472 }
473
474 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
475 {
476 struct l2cap_disconn_req req;
477
478 if (!conn)
479 return;
480
481 skb_queue_purge(TX_QUEUE(sk));
482
483 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
484 del_timer(&l2cap_pi(sk)->retrans_timer);
485 del_timer(&l2cap_pi(sk)->monitor_timer);
486 del_timer(&l2cap_pi(sk)->ack_timer);
487 }
488
489 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
490 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
491 l2cap_send_cmd(conn, l2cap_get_ident(conn),
492 L2CAP_DISCONN_REQ, sizeof(req), &req);
493
494 sk->sk_state = BT_DISCONN;
495 sk->sk_err = err;
496 }
497
498 /* ---- L2CAP connections ---- */
499 static void l2cap_conn_start(struct l2cap_conn *conn)
500 {
501 struct l2cap_chan_list *l = &conn->chan_list;
502 struct sock_del_list del, *tmp1, *tmp2;
503 struct sock *sk;
504
505 BT_DBG("conn %p", conn);
506
507 INIT_LIST_HEAD(&del.list);
508
509 read_lock(&l->lock);
510
511 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
512 bh_lock_sock(sk);
513
514 if (sk->sk_type != SOCK_SEQPACKET &&
515 sk->sk_type != SOCK_STREAM) {
516 bh_unlock_sock(sk);
517 continue;
518 }
519
520 if (sk->sk_state == BT_CONNECT) {
521 struct l2cap_conn_req req;
522
523 if (!l2cap_check_security(sk) ||
524 !__l2cap_no_conn_pending(sk)) {
525 bh_unlock_sock(sk);
526 continue;
527 }
528
529 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
530 conn->feat_mask)
531 && l2cap_pi(sk)->conf_state &
532 L2CAP_CONF_STATE2_DEVICE) {
533 tmp1 = kzalloc(sizeof(struct sock_del_list),
534 GFP_ATOMIC);
535 tmp1->sk = sk;
536 list_add_tail(&tmp1->list, &del.list);
537 bh_unlock_sock(sk);
538 continue;
539 }
540
541 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
542 req.psm = l2cap_pi(sk)->psm;
543
544 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
545 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
546
547 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
548 L2CAP_CONN_REQ, sizeof(req), &req);
549
550 } else if (sk->sk_state == BT_CONNECT2) {
551 struct l2cap_conn_rsp rsp;
552 char buf[128];
553 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
554 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
555
556 if (l2cap_check_security(sk)) {
557 if (bt_sk(sk)->defer_setup) {
558 struct sock *parent = bt_sk(sk)->parent;
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
561 parent->sk_data_ready(parent, 0);
562
563 } else {
564 sk->sk_state = BT_CONFIG;
565 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
566 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 }
568 } else {
569 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
570 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 }
572
573 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
574 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
575
576 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
577 rsp.result != L2CAP_CR_SUCCESS) {
578 bh_unlock_sock(sk);
579 continue;
580 }
581
582 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
583 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
584 l2cap_build_conf_req(sk, buf), buf);
585 l2cap_pi(sk)->num_conf_req++;
586 }
587
588 bh_unlock_sock(sk);
589 }
590
591 read_unlock(&l->lock);
592
593 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
594 bh_lock_sock(tmp1->sk);
595 __l2cap_sock_close(tmp1->sk, ECONNRESET);
596 bh_unlock_sock(tmp1->sk);
597 list_del(&tmp1->list);
598 kfree(tmp1);
599 }
600 }
601
602 static void l2cap_conn_ready(struct l2cap_conn *conn)
603 {
604 struct l2cap_chan_list *l = &conn->chan_list;
605 struct sock *sk;
606
607 BT_DBG("conn %p", conn);
608
609 read_lock(&l->lock);
610
611 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
612 bh_lock_sock(sk);
613
614 if (sk->sk_type != SOCK_SEQPACKET &&
615 sk->sk_type != SOCK_STREAM) {
616 l2cap_sock_clear_timer(sk);
617 sk->sk_state = BT_CONNECTED;
618 sk->sk_state_change(sk);
619 } else if (sk->sk_state == BT_CONNECT)
620 l2cap_do_start(sk);
621
622 bh_unlock_sock(sk);
623 }
624
625 read_unlock(&l->lock);
626 }
627
628 /* Notify sockets that we cannot guaranty reliability anymore */
629 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
630 {
631 struct l2cap_chan_list *l = &conn->chan_list;
632 struct sock *sk;
633
634 BT_DBG("conn %p", conn);
635
636 read_lock(&l->lock);
637
638 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
639 if (l2cap_pi(sk)->force_reliable)
640 sk->sk_err = err;
641 }
642
643 read_unlock(&l->lock);
644 }
645
646 static void l2cap_info_timeout(unsigned long arg)
647 {
648 struct l2cap_conn *conn = (void *) arg;
649
650 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
651 conn->info_ident = 0;
652
653 l2cap_conn_start(conn);
654 }
655
656 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
657 {
658 struct l2cap_conn *conn = hcon->l2cap_data;
659
660 if (conn || status)
661 return conn;
662
663 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
664 if (!conn)
665 return NULL;
666
667 hcon->l2cap_data = conn;
668 conn->hcon = hcon;
669
670 BT_DBG("hcon %p conn %p", hcon, conn);
671
672 conn->mtu = hcon->hdev->acl_mtu;
673 conn->src = &hcon->hdev->bdaddr;
674 conn->dst = &hcon->dst;
675
676 conn->feat_mask = 0;
677
678 spin_lock_init(&conn->lock);
679 rwlock_init(&conn->chan_list.lock);
680
681 setup_timer(&conn->info_timer, l2cap_info_timeout,
682 (unsigned long) conn);
683
684 conn->disc_reason = 0x13;
685
686 return conn;
687 }
688
689 static void l2cap_conn_del(struct hci_conn *hcon, int err)
690 {
691 struct l2cap_conn *conn = hcon->l2cap_data;
692 struct sock *sk;
693
694 if (!conn)
695 return;
696
697 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
698
699 kfree_skb(conn->rx_skb);
700
701 /* Kill channels */
702 while ((sk = conn->chan_list.head)) {
703 bh_lock_sock(sk);
704 l2cap_chan_del(sk, err);
705 bh_unlock_sock(sk);
706 l2cap_sock_kill(sk);
707 }
708
709 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
710 del_timer_sync(&conn->info_timer);
711
712 hcon->l2cap_data = NULL;
713 kfree(conn);
714 }
715
716 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
717 {
718 struct l2cap_chan_list *l = &conn->chan_list;
719 write_lock_bh(&l->lock);
720 __l2cap_chan_add(conn, sk, parent);
721 write_unlock_bh(&l->lock);
722 }
723
724 /* ---- Socket interface ---- */
725
726 /* Find socket with psm and source bdaddr.
727 * Returns closest match.
728 */
729 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
730 {
731 struct sock *sk = NULL, *sk1 = NULL;
732 struct hlist_node *node;
733
734 read_lock(&l2cap_sk_list.lock);
735
736 sk_for_each(sk, node, &l2cap_sk_list.head) {
737 if (state && sk->sk_state != state)
738 continue;
739
740 if (l2cap_pi(sk)->psm == psm) {
741 /* Exact match. */
742 if (!bacmp(&bt_sk(sk)->src, src))
743 break;
744
745 /* Closest match */
746 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
747 sk1 = sk;
748 }
749 }
750
751 read_unlock(&l2cap_sk_list.lock);
752
753 return node ? sk : sk1;
754 }
755
756 static void l2cap_sock_cleanup_listen(struct sock *parent)
757 {
758 struct sock *sk;
759
760 BT_DBG("parent %p", parent);
761
762 /* Close not yet accepted channels */
763 while ((sk = bt_accept_dequeue(parent, NULL)))
764 l2cap_sock_close(sk);
765
766 parent->sk_state = BT_CLOSED;
767 sock_set_flag(parent, SOCK_ZAPPED);
768 }
769
770 /* Kill socket (only if zapped and orphan)
771 * Must be called on unlocked socket.
772 */
773 void l2cap_sock_kill(struct sock *sk)
774 {
775 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
776 return;
777
778 BT_DBG("sk %p state %d", sk, sk->sk_state);
779
780 /* Kill poor orphan */
781 bt_sock_unlink(&l2cap_sk_list, sk);
782 sock_set_flag(sk, SOCK_DEAD);
783 sock_put(sk);
784 }
785
786 void __l2cap_sock_close(struct sock *sk, int reason)
787 {
788 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
789
790 switch (sk->sk_state) {
791 case BT_LISTEN:
792 l2cap_sock_cleanup_listen(sk);
793 break;
794
795 case BT_CONNECTED:
796 case BT_CONFIG:
797 if (sk->sk_type == SOCK_SEQPACKET ||
798 sk->sk_type == SOCK_STREAM) {
799 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
800
801 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
802 l2cap_send_disconn_req(conn, sk, reason);
803 } else
804 l2cap_chan_del(sk, reason);
805 break;
806
807 case BT_CONNECT2:
808 if (sk->sk_type == SOCK_SEQPACKET ||
809 sk->sk_type == SOCK_STREAM) {
810 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
811 struct l2cap_conn_rsp rsp;
812 __u16 result;
813
814 if (bt_sk(sk)->defer_setup)
815 result = L2CAP_CR_SEC_BLOCK;
816 else
817 result = L2CAP_CR_BAD_PSM;
818 sk->sk_state = BT_DISCONN;
819
820 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
821 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
822 rsp.result = cpu_to_le16(result);
823 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
824 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
825 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
826 } else
827 l2cap_chan_del(sk, reason);
828 break;
829
830 case BT_CONNECT:
831 case BT_DISCONN:
832 l2cap_chan_del(sk, reason);
833 break;
834
835 default:
836 sock_set_flag(sk, SOCK_ZAPPED);
837 break;
838 }
839 }
840
841 /* Must be called on unlocked socket. */
842 static void l2cap_sock_close(struct sock *sk)
843 {
844 l2cap_sock_clear_timer(sk);
845 lock_sock(sk);
846 __l2cap_sock_close(sk, ECONNRESET);
847 release_sock(sk);
848 l2cap_sock_kill(sk);
849 }
850
851 static int l2cap_do_connect(struct sock *sk)
852 {
853 bdaddr_t *src = &bt_sk(sk)->src;
854 bdaddr_t *dst = &bt_sk(sk)->dst;
855 struct l2cap_conn *conn;
856 struct hci_conn *hcon;
857 struct hci_dev *hdev;
858 __u8 auth_type;
859 int err;
860
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
862 l2cap_pi(sk)->psm);
863
864 hdev = hci_get_route(dst, src);
865 if (!hdev)
866 return -EHOSTUNREACH;
867
868 hci_dev_lock_bh(hdev);
869
870 err = -ENOMEM;
871
872 auth_type = l2cap_get_auth_type(sk);
873
874 hcon = hci_connect(hdev, ACL_LINK, dst,
875 l2cap_pi(sk)->sec_level, auth_type);
876 if (!hcon)
877 goto done;
878
879 conn = l2cap_conn_add(hcon, 0);
880 if (!conn) {
881 hci_conn_put(hcon);
882 goto done;
883 }
884
885 err = 0;
886
887 /* Update source addr of the socket */
888 bacpy(src, conn->src);
889
890 l2cap_chan_add(conn, sk, NULL);
891
892 sk->sk_state = BT_CONNECT;
893 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
894
895 if (hcon->state == BT_CONNECTED) {
896 if (sk->sk_type != SOCK_SEQPACKET &&
897 sk->sk_type != SOCK_STREAM) {
898 l2cap_sock_clear_timer(sk);
899 if (l2cap_check_security(sk))
900 sk->sk_state = BT_CONNECTED;
901 } else
902 l2cap_do_start(sk);
903 }
904
905 done:
906 hci_dev_unlock_bh(hdev);
907 hci_dev_put(hdev);
908 return err;
909 }
910
911 int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
912 {
913 struct sock *sk = sock->sk;
914 struct sockaddr_l2 la;
915 int len, err = 0;
916
917 BT_DBG("sk %p", sk);
918
919 if (!addr || alen < sizeof(addr->sa_family) ||
920 addr->sa_family != AF_BLUETOOTH)
921 return -EINVAL;
922
923 memset(&la, 0, sizeof(la));
924 len = min_t(unsigned int, sizeof(la), alen);
925 memcpy(&la, addr, len);
926
927 if (la.l2_cid)
928 return -EINVAL;
929
930 lock_sock(sk);
931
932 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
933 && !la.l2_psm) {
934 err = -EINVAL;
935 goto done;
936 }
937
938 switch (l2cap_pi(sk)->mode) {
939 case L2CAP_MODE_BASIC:
940 break;
941 case L2CAP_MODE_ERTM:
942 case L2CAP_MODE_STREAMING:
943 if (!disable_ertm)
944 break;
945 /* fall through */
946 default:
947 err = -ENOTSUPP;
948 goto done;
949 }
950
951 switch (sk->sk_state) {
952 case BT_CONNECT:
953 case BT_CONNECT2:
954 case BT_CONFIG:
955 /* Already connecting */
956 goto wait;
957
958 case BT_CONNECTED:
959 /* Already connected */
960 err = -EISCONN;
961 goto done;
962
963 case BT_OPEN:
964 case BT_BOUND:
965 /* Can connect */
966 break;
967
968 default:
969 err = -EBADFD;
970 goto done;
971 }
972
973 /* PSM must be odd and lsb of upper byte must be 0 */
974 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
975 sk->sk_type != SOCK_RAW) {
976 err = -EINVAL;
977 goto done;
978 }
979
980 /* Set destination address and psm */
981 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
982 l2cap_pi(sk)->psm = la.l2_psm;
983
984 err = l2cap_do_connect(sk);
985 if (err)
986 goto done;
987
988 wait:
989 err = bt_sock_wait_state(sk, BT_CONNECTED,
990 sock_sndtimeo(sk, flags & O_NONBLOCK));
991 done:
992 release_sock(sk);
993 return err;
994 }
995
996 int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
997 {
998 DECLARE_WAITQUEUE(wait, current);
999 struct sock *sk = sock->sk, *nsk;
1000 long timeo;
1001 int err = 0;
1002
1003 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1004
1005 if (sk->sk_state != BT_LISTEN) {
1006 err = -EBADFD;
1007 goto done;
1008 }
1009
1010 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1011
1012 BT_DBG("sk %p timeo %ld", sk, timeo);
1013
1014 /* Wait for an incoming connection. (wake-one). */
1015 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1016 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1017 set_current_state(TASK_INTERRUPTIBLE);
1018 if (!timeo) {
1019 err = -EAGAIN;
1020 break;
1021 }
1022
1023 release_sock(sk);
1024 timeo = schedule_timeout(timeo);
1025 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1026
1027 if (sk->sk_state != BT_LISTEN) {
1028 err = -EBADFD;
1029 break;
1030 }
1031
1032 if (signal_pending(current)) {
1033 err = sock_intr_errno(timeo);
1034 break;
1035 }
1036 }
1037 set_current_state(TASK_RUNNING);
1038 remove_wait_queue(sk_sleep(sk), &wait);
1039
1040 if (err)
1041 goto done;
1042
1043 newsock->state = SS_CONNECTED;
1044
1045 BT_DBG("new socket %p", nsk);
1046
1047 done:
1048 release_sock(sk);
1049 return err;
1050 }
1051
1052 int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1053 {
1054 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1055 struct sock *sk = sock->sk;
1056
1057 BT_DBG("sock %p, sk %p", sock, sk);
1058
1059 addr->sa_family = AF_BLUETOOTH;
1060 *len = sizeof(struct sockaddr_l2);
1061
1062 if (peer) {
1063 la->l2_psm = l2cap_pi(sk)->psm;
1064 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1065 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1066 } else {
1067 la->l2_psm = l2cap_pi(sk)->sport;
1068 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1069 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1070 }
1071
1072 return 0;
1073 }
1074
1075 static int __l2cap_wait_ack(struct sock *sk)
1076 {
1077 DECLARE_WAITQUEUE(wait, current);
1078 int err = 0;
1079 int timeo = HZ/5;
1080
1081 add_wait_queue(sk_sleep(sk), &wait);
1082 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1083 set_current_state(TASK_INTERRUPTIBLE);
1084
1085 if (!timeo)
1086 timeo = HZ/5;
1087
1088 if (signal_pending(current)) {
1089 err = sock_intr_errno(timeo);
1090 break;
1091 }
1092
1093 release_sock(sk);
1094 timeo = schedule_timeout(timeo);
1095 lock_sock(sk);
1096
1097 err = sock_error(sk);
1098 if (err)
1099 break;
1100 }
1101 set_current_state(TASK_RUNNING);
1102 remove_wait_queue(sk_sleep(sk), &wait);
1103 return err;
1104 }
1105
1106 static void l2cap_monitor_timeout(unsigned long arg)
1107 {
1108 struct sock *sk = (void *) arg;
1109
1110 BT_DBG("sk %p", sk);
1111
1112 bh_lock_sock(sk);
1113 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1114 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1115 bh_unlock_sock(sk);
1116 return;
1117 }
1118
1119 l2cap_pi(sk)->retry_count++;
1120 __mod_monitor_timer();
1121
1122 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1123 bh_unlock_sock(sk);
1124 }
1125
1126 static void l2cap_retrans_timeout(unsigned long arg)
1127 {
1128 struct sock *sk = (void *) arg;
1129
1130 BT_DBG("sk %p", sk);
1131
1132 bh_lock_sock(sk);
1133 l2cap_pi(sk)->retry_count = 1;
1134 __mod_monitor_timer();
1135
1136 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1137
1138 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1139 bh_unlock_sock(sk);
1140 }
1141
1142 static void l2cap_drop_acked_frames(struct sock *sk)
1143 {
1144 struct sk_buff *skb;
1145
1146 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1147 l2cap_pi(sk)->unacked_frames) {
1148 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1149 break;
1150
1151 skb = skb_dequeue(TX_QUEUE(sk));
1152 kfree_skb(skb);
1153
1154 l2cap_pi(sk)->unacked_frames--;
1155 }
1156
1157 if (!l2cap_pi(sk)->unacked_frames)
1158 del_timer(&l2cap_pi(sk)->retrans_timer);
1159 }
1160
1161 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1162 {
1163 struct l2cap_pinfo *pi = l2cap_pi(sk);
1164 struct hci_conn *hcon = pi->conn->hcon;
1165 u16 flags;
1166
1167 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1168
1169 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1170 flags = ACL_START_NO_FLUSH;
1171 else
1172 flags = ACL_START;
1173
1174 hci_send_acl(hcon, skb, flags);
1175 }
1176
1177 static void l2cap_streaming_send(struct sock *sk)
1178 {
1179 struct sk_buff *skb;
1180 struct l2cap_pinfo *pi = l2cap_pi(sk);
1181 u16 control, fcs;
1182
1183 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1184 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1185 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1186 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1187
1188 if (pi->fcs == L2CAP_FCS_CRC16) {
1189 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1190 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1191 }
1192
1193 l2cap_do_send(sk, skb);
1194
1195 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1196 }
1197 }
1198
1199 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1200 {
1201 struct l2cap_pinfo *pi = l2cap_pi(sk);
1202 struct sk_buff *skb, *tx_skb;
1203 u16 control, fcs;
1204
1205 skb = skb_peek(TX_QUEUE(sk));
1206 if (!skb)
1207 return;
1208
1209 do {
1210 if (bt_cb(skb)->tx_seq == tx_seq)
1211 break;
1212
1213 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1214 return;
1215
1216 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1217
1218 if (pi->remote_max_tx &&
1219 bt_cb(skb)->retries == pi->remote_max_tx) {
1220 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1221 return;
1222 }
1223
1224 tx_skb = skb_clone(skb, GFP_ATOMIC);
1225 bt_cb(skb)->retries++;
1226 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1227
1228 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1229 control |= L2CAP_CTRL_FINAL;
1230 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1231 }
1232
1233 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1234 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1235
1236 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1237
1238 if (pi->fcs == L2CAP_FCS_CRC16) {
1239 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1240 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1241 }
1242
1243 l2cap_do_send(sk, tx_skb);
1244 }
1245
1246 static int l2cap_ertm_send(struct sock *sk)
1247 {
1248 struct sk_buff *skb, *tx_skb;
1249 struct l2cap_pinfo *pi = l2cap_pi(sk);
1250 u16 control, fcs;
1251 int nsent = 0;
1252
1253 if (sk->sk_state != BT_CONNECTED)
1254 return -ENOTCONN;
1255
1256 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1257
1258 if (pi->remote_max_tx &&
1259 bt_cb(skb)->retries == pi->remote_max_tx) {
1260 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1261 break;
1262 }
1263
1264 tx_skb = skb_clone(skb, GFP_ATOMIC);
1265
1266 bt_cb(skb)->retries++;
1267
1268 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1269 control &= L2CAP_CTRL_SAR;
1270
1271 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1272 control |= L2CAP_CTRL_FINAL;
1273 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1274 }
1275 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1276 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1277 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1278
1279
1280 if (pi->fcs == L2CAP_FCS_CRC16) {
1281 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1282 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1283 }
1284
1285 l2cap_do_send(sk, tx_skb);
1286
1287 __mod_retrans_timer();
1288
1289 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1290 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1291
1292 pi->unacked_frames++;
1293 pi->frames_sent++;
1294
1295 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1296 sk->sk_send_head = NULL;
1297 else
1298 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1299
1300 nsent++;
1301 }
1302
1303 return nsent;
1304 }
1305
1306 static int l2cap_retransmit_frames(struct sock *sk)
1307 {
1308 struct l2cap_pinfo *pi = l2cap_pi(sk);
1309 int ret;
1310
1311 if (!skb_queue_empty(TX_QUEUE(sk)))
1312 sk->sk_send_head = TX_QUEUE(sk)->next;
1313
1314 pi->next_tx_seq = pi->expected_ack_seq;
1315 ret = l2cap_ertm_send(sk);
1316 return ret;
1317 }
1318
1319 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1320 {
1321 struct sock *sk = (struct sock *)pi;
1322 u16 control = 0;
1323
1324 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1325
1326 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1327 control |= L2CAP_SUPER_RCV_NOT_READY;
1328 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1329 l2cap_send_sframe(pi, control);
1330 return;
1331 }
1332
1333 if (l2cap_ertm_send(sk) > 0)
1334 return;
1335
1336 control |= L2CAP_SUPER_RCV_READY;
1337 l2cap_send_sframe(pi, control);
1338 }
1339
1340 static void l2cap_send_srejtail(struct sock *sk)
1341 {
1342 struct srej_list *tail;
1343 u16 control;
1344
1345 control = L2CAP_SUPER_SELECT_REJECT;
1346 control |= L2CAP_CTRL_FINAL;
1347
1348 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1349 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1350
1351 l2cap_send_sframe(l2cap_pi(sk), control);
1352 }
1353
1354 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1355 {
1356 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1357 struct sk_buff **frag;
1358 int err, sent = 0;
1359
1360 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1361 return -EFAULT;
1362
1363 sent += count;
1364 len -= count;
1365
1366 /* Continuation fragments (no L2CAP header) */
1367 frag = &skb_shinfo(skb)->frag_list;
1368 while (len) {
1369 count = min_t(unsigned int, conn->mtu, len);
1370
1371 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1372 if (!*frag)
1373 return err;
1374 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1375 return -EFAULT;
1376
1377 sent += count;
1378 len -= count;
1379
1380 frag = &(*frag)->next;
1381 }
1382
1383 return sent;
1384 }
1385
1386 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1387 {
1388 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1389 struct sk_buff *skb;
1390 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1391 struct l2cap_hdr *lh;
1392
1393 BT_DBG("sk %p len %d", sk, (int)len);
1394
1395 count = min_t(unsigned int, (conn->mtu - hlen), len);
1396 skb = bt_skb_send_alloc(sk, count + hlen,
1397 msg->msg_flags & MSG_DONTWAIT, &err);
1398 if (!skb)
1399 return ERR_PTR(err);
1400
1401 /* Create L2CAP header */
1402 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1403 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1404 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1405 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1406
1407 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1408 if (unlikely(err < 0)) {
1409 kfree_skb(skb);
1410 return ERR_PTR(err);
1411 }
1412 return skb;
1413 }
1414
1415 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1416 {
1417 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1418 struct sk_buff *skb;
1419 int err, count, hlen = L2CAP_HDR_SIZE;
1420 struct l2cap_hdr *lh;
1421
1422 BT_DBG("sk %p len %d", sk, (int)len);
1423
1424 count = min_t(unsigned int, (conn->mtu - hlen), len);
1425 skb = bt_skb_send_alloc(sk, count + hlen,
1426 msg->msg_flags & MSG_DONTWAIT, &err);
1427 if (!skb)
1428 return ERR_PTR(err);
1429
1430 /* Create L2CAP header */
1431 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1432 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1433 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1434
1435 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1436 if (unlikely(err < 0)) {
1437 kfree_skb(skb);
1438 return ERR_PTR(err);
1439 }
1440 return skb;
1441 }
1442
1443 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1444 {
1445 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1446 struct sk_buff *skb;
1447 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1448 struct l2cap_hdr *lh;
1449
1450 BT_DBG("sk %p len %d", sk, (int)len);
1451
1452 if (!conn)
1453 return ERR_PTR(-ENOTCONN);
1454
1455 if (sdulen)
1456 hlen += 2;
1457
1458 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1459 hlen += 2;
1460
1461 count = min_t(unsigned int, (conn->mtu - hlen), len);
1462 skb = bt_skb_send_alloc(sk, count + hlen,
1463 msg->msg_flags & MSG_DONTWAIT, &err);
1464 if (!skb)
1465 return ERR_PTR(err);
1466
1467 /* Create L2CAP header */
1468 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1469 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1470 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1471 put_unaligned_le16(control, skb_put(skb, 2));
1472 if (sdulen)
1473 put_unaligned_le16(sdulen, skb_put(skb, 2));
1474
1475 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1476 if (unlikely(err < 0)) {
1477 kfree_skb(skb);
1478 return ERR_PTR(err);
1479 }
1480
1481 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1482 put_unaligned_le16(0, skb_put(skb, 2));
1483
1484 bt_cb(skb)->retries = 0;
1485 return skb;
1486 }
1487
1488 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1489 {
1490 struct l2cap_pinfo *pi = l2cap_pi(sk);
1491 struct sk_buff *skb;
1492 struct sk_buff_head sar_queue;
1493 u16 control;
1494 size_t size = 0;
1495
1496 skb_queue_head_init(&sar_queue);
1497 control = L2CAP_SDU_START;
1498 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1499 if (IS_ERR(skb))
1500 return PTR_ERR(skb);
1501
1502 __skb_queue_tail(&sar_queue, skb);
1503 len -= pi->remote_mps;
1504 size += pi->remote_mps;
1505
1506 while (len > 0) {
1507 size_t buflen;
1508
1509 if (len > pi->remote_mps) {
1510 control = L2CAP_SDU_CONTINUE;
1511 buflen = pi->remote_mps;
1512 } else {
1513 control = L2CAP_SDU_END;
1514 buflen = len;
1515 }
1516
1517 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1518 if (IS_ERR(skb)) {
1519 skb_queue_purge(&sar_queue);
1520 return PTR_ERR(skb);
1521 }
1522
1523 __skb_queue_tail(&sar_queue, skb);
1524 len -= buflen;
1525 size += buflen;
1526 }
1527 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1528 if (sk->sk_send_head == NULL)
1529 sk->sk_send_head = sar_queue.next;
1530
1531 return size;
1532 }
1533
1534 int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1535 {
1536 struct sock *sk = sock->sk;
1537 struct l2cap_pinfo *pi = l2cap_pi(sk);
1538 struct sk_buff *skb;
1539 u16 control;
1540 int err;
1541
1542 BT_DBG("sock %p, sk %p", sock, sk);
1543
1544 err = sock_error(sk);
1545 if (err)
1546 return err;
1547
1548 if (msg->msg_flags & MSG_OOB)
1549 return -EOPNOTSUPP;
1550
1551 lock_sock(sk);
1552
1553 if (sk->sk_state != BT_CONNECTED) {
1554 err = -ENOTCONN;
1555 goto done;
1556 }
1557
1558 /* Connectionless channel */
1559 if (sk->sk_type == SOCK_DGRAM) {
1560 skb = l2cap_create_connless_pdu(sk, msg, len);
1561 if (IS_ERR(skb)) {
1562 err = PTR_ERR(skb);
1563 } else {
1564 l2cap_do_send(sk, skb);
1565 err = len;
1566 }
1567 goto done;
1568 }
1569
1570 switch (pi->mode) {
1571 case L2CAP_MODE_BASIC:
1572 /* Check outgoing MTU */
1573 if (len > pi->omtu) {
1574 err = -EMSGSIZE;
1575 goto done;
1576 }
1577
1578 /* Create a basic PDU */
1579 skb = l2cap_create_basic_pdu(sk, msg, len);
1580 if (IS_ERR(skb)) {
1581 err = PTR_ERR(skb);
1582 goto done;
1583 }
1584
1585 l2cap_do_send(sk, skb);
1586 err = len;
1587 break;
1588
1589 case L2CAP_MODE_ERTM:
1590 case L2CAP_MODE_STREAMING:
1591 /* Entire SDU fits into one PDU */
1592 if (len <= pi->remote_mps) {
1593 control = L2CAP_SDU_UNSEGMENTED;
1594 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1595 if (IS_ERR(skb)) {
1596 err = PTR_ERR(skb);
1597 goto done;
1598 }
1599 __skb_queue_tail(TX_QUEUE(sk), skb);
1600
1601 if (sk->sk_send_head == NULL)
1602 sk->sk_send_head = skb;
1603
1604 } else {
1605 /* Segment SDU into multiples PDUs */
1606 err = l2cap_sar_segment_sdu(sk, msg, len);
1607 if (err < 0)
1608 goto done;
1609 }
1610
1611 if (pi->mode == L2CAP_MODE_STREAMING) {
1612 l2cap_streaming_send(sk);
1613 } else {
1614 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1615 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1616 err = len;
1617 break;
1618 }
1619 err = l2cap_ertm_send(sk);
1620 }
1621
1622 if (err >= 0)
1623 err = len;
1624 break;
1625
1626 default:
1627 BT_DBG("bad state %1.1x", pi->mode);
1628 err = -EBADFD;
1629 }
1630
1631 done:
1632 release_sock(sk);
1633 return err;
1634 }
1635
1636 int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1637 {
1638 struct sock *sk = sock->sk;
1639
1640 lock_sock(sk);
1641
1642 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1643 struct l2cap_conn_rsp rsp;
1644 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1645 u8 buf[128];
1646
1647 sk->sk_state = BT_CONFIG;
1648
1649 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1650 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1651 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1652 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1653 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1654 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1655
1656 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1657 release_sock(sk);
1658 return 0;
1659 }
1660
1661 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1662 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1663 l2cap_build_conf_req(sk, buf), buf);
1664 l2cap_pi(sk)->num_conf_req++;
1665
1666 release_sock(sk);
1667 return 0;
1668 }
1669
1670 release_sock(sk);
1671
1672 if (sock->type == SOCK_STREAM)
1673 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1674
1675 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1676 }
1677
1678 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1679 {
1680 struct sock *sk = sock->sk;
1681 struct l2cap_options opts;
1682 int len, err = 0;
1683 u32 opt;
1684
1685 BT_DBG("sk %p", sk);
1686
1687 lock_sock(sk);
1688
1689 switch (optname) {
1690 case L2CAP_OPTIONS:
1691 if (sk->sk_state == BT_CONNECTED) {
1692 err = -EINVAL;
1693 break;
1694 }
1695
1696 opts.imtu = l2cap_pi(sk)->imtu;
1697 opts.omtu = l2cap_pi(sk)->omtu;
1698 opts.flush_to = l2cap_pi(sk)->flush_to;
1699 opts.mode = l2cap_pi(sk)->mode;
1700 opts.fcs = l2cap_pi(sk)->fcs;
1701 opts.max_tx = l2cap_pi(sk)->max_tx;
1702 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1703
1704 len = min_t(unsigned int, sizeof(opts), optlen);
1705 if (copy_from_user((char *) &opts, optval, len)) {
1706 err = -EFAULT;
1707 break;
1708 }
1709
1710 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1711 err = -EINVAL;
1712 break;
1713 }
1714
1715 l2cap_pi(sk)->mode = opts.mode;
1716 switch (l2cap_pi(sk)->mode) {
1717 case L2CAP_MODE_BASIC:
1718 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1719 break;
1720 case L2CAP_MODE_ERTM:
1721 case L2CAP_MODE_STREAMING:
1722 if (!disable_ertm)
1723 break;
1724 /* fall through */
1725 default:
1726 err = -EINVAL;
1727 break;
1728 }
1729
1730 l2cap_pi(sk)->imtu = opts.imtu;
1731 l2cap_pi(sk)->omtu = opts.omtu;
1732 l2cap_pi(sk)->fcs = opts.fcs;
1733 l2cap_pi(sk)->max_tx = opts.max_tx;
1734 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1735 break;
1736
1737 case L2CAP_LM:
1738 if (get_user(opt, (u32 __user *) optval)) {
1739 err = -EFAULT;
1740 break;
1741 }
1742
1743 if (opt & L2CAP_LM_AUTH)
1744 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1745 if (opt & L2CAP_LM_ENCRYPT)
1746 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1747 if (opt & L2CAP_LM_SECURE)
1748 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1749
1750 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1751 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1752 break;
1753
1754 default:
1755 err = -ENOPROTOOPT;
1756 break;
1757 }
1758
1759 release_sock(sk);
1760 return err;
1761 }
1762
1763 int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1764 {
1765 struct sock *sk = sock->sk;
1766 struct bt_security sec;
1767 int len, err = 0;
1768 u32 opt;
1769
1770 BT_DBG("sk %p", sk);
1771
1772 if (level == SOL_L2CAP)
1773 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1774
1775 if (level != SOL_BLUETOOTH)
1776 return -ENOPROTOOPT;
1777
1778 lock_sock(sk);
1779
1780 switch (optname) {
1781 case BT_SECURITY:
1782 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1783 && sk->sk_type != SOCK_RAW) {
1784 err = -EINVAL;
1785 break;
1786 }
1787
1788 sec.level = BT_SECURITY_LOW;
1789
1790 len = min_t(unsigned int, sizeof(sec), optlen);
1791 if (copy_from_user((char *) &sec, optval, len)) {
1792 err = -EFAULT;
1793 break;
1794 }
1795
1796 if (sec.level < BT_SECURITY_LOW ||
1797 sec.level > BT_SECURITY_HIGH) {
1798 err = -EINVAL;
1799 break;
1800 }
1801
1802 l2cap_pi(sk)->sec_level = sec.level;
1803 break;
1804
1805 case BT_DEFER_SETUP:
1806 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1807 err = -EINVAL;
1808 break;
1809 }
1810
1811 if (get_user(opt, (u32 __user *) optval)) {
1812 err = -EFAULT;
1813 break;
1814 }
1815
1816 bt_sk(sk)->defer_setup = opt;
1817 break;
1818
1819 case BT_FLUSHABLE:
1820 if (get_user(opt, (u32 __user *) optval)) {
1821 err = -EFAULT;
1822 break;
1823 }
1824
1825 if (opt > BT_FLUSHABLE_ON) {
1826 err = -EINVAL;
1827 break;
1828 }
1829
1830 if (opt == BT_FLUSHABLE_OFF) {
1831 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1832 /* proceed futher only when we have l2cap_conn and
1833 No Flush support in the LM */
1834 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
1835 err = -EINVAL;
1836 break;
1837 }
1838 }
1839
1840 l2cap_pi(sk)->flushable = opt;
1841 break;
1842
1843 default:
1844 err = -ENOPROTOOPT;
1845 break;
1846 }
1847
1848 release_sock(sk);
1849 return err;
1850 }
1851
1852 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1853 {
1854 struct sock *sk = sock->sk;
1855 struct l2cap_options opts;
1856 struct l2cap_conninfo cinfo;
1857 int len, err = 0;
1858 u32 opt;
1859
1860 BT_DBG("sk %p", sk);
1861
1862 if (get_user(len, optlen))
1863 return -EFAULT;
1864
1865 lock_sock(sk);
1866
1867 switch (optname) {
1868 case L2CAP_OPTIONS:
1869 opts.imtu = l2cap_pi(sk)->imtu;
1870 opts.omtu = l2cap_pi(sk)->omtu;
1871 opts.flush_to = l2cap_pi(sk)->flush_to;
1872 opts.mode = l2cap_pi(sk)->mode;
1873 opts.fcs = l2cap_pi(sk)->fcs;
1874 opts.max_tx = l2cap_pi(sk)->max_tx;
1875 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1876
1877 len = min_t(unsigned int, len, sizeof(opts));
1878 if (copy_to_user(optval, (char *) &opts, len))
1879 err = -EFAULT;
1880
1881 break;
1882
1883 case L2CAP_LM:
1884 switch (l2cap_pi(sk)->sec_level) {
1885 case BT_SECURITY_LOW:
1886 opt = L2CAP_LM_AUTH;
1887 break;
1888 case BT_SECURITY_MEDIUM:
1889 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1890 break;
1891 case BT_SECURITY_HIGH:
1892 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1893 L2CAP_LM_SECURE;
1894 break;
1895 default:
1896 opt = 0;
1897 break;
1898 }
1899
1900 if (l2cap_pi(sk)->role_switch)
1901 opt |= L2CAP_LM_MASTER;
1902
1903 if (l2cap_pi(sk)->force_reliable)
1904 opt |= L2CAP_LM_RELIABLE;
1905
1906 if (put_user(opt, (u32 __user *) optval))
1907 err = -EFAULT;
1908 break;
1909
1910 case L2CAP_CONNINFO:
1911 if (sk->sk_state != BT_CONNECTED &&
1912 !(sk->sk_state == BT_CONNECT2 &&
1913 bt_sk(sk)->defer_setup)) {
1914 err = -ENOTCONN;
1915 break;
1916 }
1917
1918 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1919 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1920
1921 len = min_t(unsigned int, len, sizeof(cinfo));
1922 if (copy_to_user(optval, (char *) &cinfo, len))
1923 err = -EFAULT;
1924
1925 break;
1926
1927 default:
1928 err = -ENOPROTOOPT;
1929 break;
1930 }
1931
1932 release_sock(sk);
1933 return err;
1934 }
1935
1936 int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1937 {
1938 struct sock *sk = sock->sk;
1939 struct bt_security sec;
1940 int len, err = 0;
1941
1942 BT_DBG("sk %p", sk);
1943
1944 if (level == SOL_L2CAP)
1945 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1946
1947 if (level != SOL_BLUETOOTH)
1948 return -ENOPROTOOPT;
1949
1950 if (get_user(len, optlen))
1951 return -EFAULT;
1952
1953 lock_sock(sk);
1954
1955 switch (optname) {
1956 case BT_SECURITY:
1957 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1958 && sk->sk_type != SOCK_RAW) {
1959 err = -EINVAL;
1960 break;
1961 }
1962
1963 sec.level = l2cap_pi(sk)->sec_level;
1964
1965 len = min_t(unsigned int, len, sizeof(sec));
1966 if (copy_to_user(optval, (char *) &sec, len))
1967 err = -EFAULT;
1968
1969 break;
1970
1971 case BT_DEFER_SETUP:
1972 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1973 err = -EINVAL;
1974 break;
1975 }
1976
1977 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1978 err = -EFAULT;
1979
1980 break;
1981
1982 case BT_FLUSHABLE:
1983 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
1984 err = -EFAULT;
1985
1986 break;
1987
1988 default:
1989 err = -ENOPROTOOPT;
1990 break;
1991 }
1992
1993 release_sock(sk);
1994 return err;
1995 }
1996
1997 int l2cap_sock_shutdown(struct socket *sock, int how)
1998 {
1999 struct sock *sk = sock->sk;
2000 int err = 0;
2001
2002 BT_DBG("sock %p, sk %p", sock, sk);
2003
2004 if (!sk)
2005 return 0;
2006
2007 lock_sock(sk);
2008 if (!sk->sk_shutdown) {
2009 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2010 err = __l2cap_wait_ack(sk);
2011
2012 sk->sk_shutdown = SHUTDOWN_MASK;
2013 l2cap_sock_clear_timer(sk);
2014 __l2cap_sock_close(sk, 0);
2015
2016 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2017 err = bt_sock_wait_state(sk, BT_CLOSED,
2018 sk->sk_lingertime);
2019 }
2020
2021 if (!err && sk->sk_err)
2022 err = -sk->sk_err;
2023
2024 release_sock(sk);
2025 return err;
2026 }
2027
2028 static void l2cap_chan_ready(struct sock *sk)
2029 {
2030 struct sock *parent = bt_sk(sk)->parent;
2031
2032 BT_DBG("sk %p, parent %p", sk, parent);
2033
2034 l2cap_pi(sk)->conf_state = 0;
2035 l2cap_sock_clear_timer(sk);
2036
2037 if (!parent) {
2038 /* Outgoing channel.
2039 * Wake up socket sleeping on connect.
2040 */
2041 sk->sk_state = BT_CONNECTED;
2042 sk->sk_state_change(sk);
2043 } else {
2044 /* Incoming channel.
2045 * Wake up socket sleeping on accept.
2046 */
2047 parent->sk_data_ready(parent, 0);
2048 }
2049 }
2050
2051 /* Copy frame to all raw sockets on that connection */
2052 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2053 {
2054 struct l2cap_chan_list *l = &conn->chan_list;
2055 struct sk_buff *nskb;
2056 struct sock *sk;
2057
2058 BT_DBG("conn %p", conn);
2059
2060 read_lock(&l->lock);
2061 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2062 if (sk->sk_type != SOCK_RAW)
2063 continue;
2064
2065 /* Don't send frame to the socket it came from */
2066 if (skb->sk == sk)
2067 continue;
2068 nskb = skb_clone(skb, GFP_ATOMIC);
2069 if (!nskb)
2070 continue;
2071
2072 if (sock_queue_rcv_skb(sk, nskb))
2073 kfree_skb(nskb);
2074 }
2075 read_unlock(&l->lock);
2076 }
2077
2078 /* ---- L2CAP signalling commands ---- */
2079 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2080 u8 code, u8 ident, u16 dlen, void *data)
2081 {
2082 struct sk_buff *skb, **frag;
2083 struct l2cap_cmd_hdr *cmd;
2084 struct l2cap_hdr *lh;
2085 int len, count;
2086
2087 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2088 conn, code, ident, dlen);
2089
2090 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2091 count = min_t(unsigned int, conn->mtu, len);
2092
2093 skb = bt_skb_alloc(count, GFP_ATOMIC);
2094 if (!skb)
2095 return NULL;
2096
2097 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2098 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2099 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2100
2101 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2102 cmd->code = code;
2103 cmd->ident = ident;
2104 cmd->len = cpu_to_le16(dlen);
2105
2106 if (dlen) {
2107 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2108 memcpy(skb_put(skb, count), data, count);
2109 data += count;
2110 }
2111
2112 len -= skb->len;
2113
2114 /* Continuation fragments (no L2CAP header) */
2115 frag = &skb_shinfo(skb)->frag_list;
2116 while (len) {
2117 count = min_t(unsigned int, conn->mtu, len);
2118
2119 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2120 if (!*frag)
2121 goto fail;
2122
2123 memcpy(skb_put(*frag, count), data, count);
2124
2125 len -= count;
2126 data += count;
2127
2128 frag = &(*frag)->next;
2129 }
2130
2131 return skb;
2132
2133 fail:
2134 kfree_skb(skb);
2135 return NULL;
2136 }
2137
2138 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2139 {
2140 struct l2cap_conf_opt *opt = *ptr;
2141 int len;
2142
2143 len = L2CAP_CONF_OPT_SIZE + opt->len;
2144 *ptr += len;
2145
2146 *type = opt->type;
2147 *olen = opt->len;
2148
2149 switch (opt->len) {
2150 case 1:
2151 *val = *((u8 *) opt->val);
2152 break;
2153
2154 case 2:
2155 *val = get_unaligned_le16(opt->val);
2156 break;
2157
2158 case 4:
2159 *val = get_unaligned_le32(opt->val);
2160 break;
2161
2162 default:
2163 *val = (unsigned long) opt->val;
2164 break;
2165 }
2166
2167 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2168 return len;
2169 }
2170
2171 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2172 {
2173 struct l2cap_conf_opt *opt = *ptr;
2174
2175 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2176
2177 opt->type = type;
2178 opt->len = len;
2179
2180 switch (len) {
2181 case 1:
2182 *((u8 *) opt->val) = val;
2183 break;
2184
2185 case 2:
2186 put_unaligned_le16(val, opt->val);
2187 break;
2188
2189 case 4:
2190 put_unaligned_le32(val, opt->val);
2191 break;
2192
2193 default:
2194 memcpy(opt->val, (void *) val, len);
2195 break;
2196 }
2197
2198 *ptr += L2CAP_CONF_OPT_SIZE + len;
2199 }
2200
2201 static void l2cap_ack_timeout(unsigned long arg)
2202 {
2203 struct sock *sk = (void *) arg;
2204
2205 bh_lock_sock(sk);
2206 l2cap_send_ack(l2cap_pi(sk));
2207 bh_unlock_sock(sk);
2208 }
2209
2210 static inline void l2cap_ertm_init(struct sock *sk)
2211 {
2212 l2cap_pi(sk)->expected_ack_seq = 0;
2213 l2cap_pi(sk)->unacked_frames = 0;
2214 l2cap_pi(sk)->buffer_seq = 0;
2215 l2cap_pi(sk)->num_acked = 0;
2216 l2cap_pi(sk)->frames_sent = 0;
2217
2218 setup_timer(&l2cap_pi(sk)->retrans_timer,
2219 l2cap_retrans_timeout, (unsigned long) sk);
2220 setup_timer(&l2cap_pi(sk)->monitor_timer,
2221 l2cap_monitor_timeout, (unsigned long) sk);
2222 setup_timer(&l2cap_pi(sk)->ack_timer,
2223 l2cap_ack_timeout, (unsigned long) sk);
2224
2225 __skb_queue_head_init(SREJ_QUEUE(sk));
2226 __skb_queue_head_init(BUSY_QUEUE(sk));
2227
2228 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2229
2230 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2231 }
2232
2233 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2234 {
2235 switch (mode) {
2236 case L2CAP_MODE_STREAMING:
2237 case L2CAP_MODE_ERTM:
2238 if (l2cap_mode_supported(mode, remote_feat_mask))
2239 return mode;
2240 /* fall through */
2241 default:
2242 return L2CAP_MODE_BASIC;
2243 }
2244 }
2245
2246 static int l2cap_build_conf_req(struct sock *sk, void *data)
2247 {
2248 struct l2cap_pinfo *pi = l2cap_pi(sk);
2249 struct l2cap_conf_req *req = data;
2250 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2251 void *ptr = req->data;
2252
2253 BT_DBG("sk %p", sk);
2254
2255 if (pi->num_conf_req || pi->num_conf_rsp)
2256 goto done;
2257
2258 switch (pi->mode) {
2259 case L2CAP_MODE_STREAMING:
2260 case L2CAP_MODE_ERTM:
2261 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2262 break;
2263
2264 /* fall through */
2265 default:
2266 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2267 break;
2268 }
2269
2270 done:
2271 if (pi->imtu != L2CAP_DEFAULT_MTU)
2272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2273
2274 switch (pi->mode) {
2275 case L2CAP_MODE_BASIC:
2276 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2277 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2278 break;
2279
2280 rfc.mode = L2CAP_MODE_BASIC;
2281 rfc.txwin_size = 0;
2282 rfc.max_transmit = 0;
2283 rfc.retrans_timeout = 0;
2284 rfc.monitor_timeout = 0;
2285 rfc.max_pdu_size = 0;
2286
2287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2288 (unsigned long) &rfc);
2289 break;
2290
2291 case L2CAP_MODE_ERTM:
2292 rfc.mode = L2CAP_MODE_ERTM;
2293 rfc.txwin_size = pi->tx_win;
2294 rfc.max_transmit = pi->max_tx;
2295 rfc.retrans_timeout = 0;
2296 rfc.monitor_timeout = 0;
2297 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2298 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2299 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2300
2301 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2302 (unsigned long) &rfc);
2303
2304 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2305 break;
2306
2307 if (pi->fcs == L2CAP_FCS_NONE ||
2308 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2309 pi->fcs = L2CAP_FCS_NONE;
2310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2311 }
2312 break;
2313
2314 case L2CAP_MODE_STREAMING:
2315 rfc.mode = L2CAP_MODE_STREAMING;
2316 rfc.txwin_size = 0;
2317 rfc.max_transmit = 0;
2318 rfc.retrans_timeout = 0;
2319 rfc.monitor_timeout = 0;
2320 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2321 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2322 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2323
2324 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2325 (unsigned long) &rfc);
2326
2327 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2328 break;
2329
2330 if (pi->fcs == L2CAP_FCS_NONE ||
2331 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2332 pi->fcs = L2CAP_FCS_NONE;
2333 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2334 }
2335 break;
2336 }
2337
2338 /* FIXME: Need actual value of the flush timeout */
2339 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2340 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2341
2342 req->dcid = cpu_to_le16(pi->dcid);
2343 req->flags = cpu_to_le16(0);
2344
2345 return ptr - data;
2346 }
2347
2348 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2349 {
2350 struct l2cap_pinfo *pi = l2cap_pi(sk);
2351 struct l2cap_conf_rsp *rsp = data;
2352 void *ptr = rsp->data;
2353 void *req = pi->conf_req;
2354 int len = pi->conf_len;
2355 int type, hint, olen;
2356 unsigned long val;
2357 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2358 u16 mtu = L2CAP_DEFAULT_MTU;
2359 u16 result = L2CAP_CONF_SUCCESS;
2360
2361 BT_DBG("sk %p", sk);
2362
2363 while (len >= L2CAP_CONF_OPT_SIZE) {
2364 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2365
2366 hint = type & L2CAP_CONF_HINT;
2367 type &= L2CAP_CONF_MASK;
2368
2369 switch (type) {
2370 case L2CAP_CONF_MTU:
2371 mtu = val;
2372 break;
2373
2374 case L2CAP_CONF_FLUSH_TO:
2375 pi->flush_to = val;
2376 break;
2377
2378 case L2CAP_CONF_QOS:
2379 break;
2380
2381 case L2CAP_CONF_RFC:
2382 if (olen == sizeof(rfc))
2383 memcpy(&rfc, (void *) val, olen);
2384 break;
2385
2386 case L2CAP_CONF_FCS:
2387 if (val == L2CAP_FCS_NONE)
2388 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2389
2390 break;
2391
2392 default:
2393 if (hint)
2394 break;
2395
2396 result = L2CAP_CONF_UNKNOWN;
2397 *((u8 *) ptr++) = type;
2398 break;
2399 }
2400 }
2401
2402 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2403 goto done;
2404
2405 switch (pi->mode) {
2406 case L2CAP_MODE_STREAMING:
2407 case L2CAP_MODE_ERTM:
2408 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2409 pi->mode = l2cap_select_mode(rfc.mode,
2410 pi->conn->feat_mask);
2411 break;
2412 }
2413
2414 if (pi->mode != rfc.mode)
2415 return -ECONNREFUSED;
2416
2417 break;
2418 }
2419
2420 done:
2421 if (pi->mode != rfc.mode) {
2422 result = L2CAP_CONF_UNACCEPT;
2423 rfc.mode = pi->mode;
2424
2425 if (pi->num_conf_rsp == 1)
2426 return -ECONNREFUSED;
2427
2428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2429 sizeof(rfc), (unsigned long) &rfc);
2430 }
2431
2432
2433 if (result == L2CAP_CONF_SUCCESS) {
2434 /* Configure output options and let the other side know
2435 * which ones we don't like. */
2436
2437 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2438 result = L2CAP_CONF_UNACCEPT;
2439 else {
2440 pi->omtu = mtu;
2441 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2442 }
2443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2444
2445 switch (rfc.mode) {
2446 case L2CAP_MODE_BASIC:
2447 pi->fcs = L2CAP_FCS_NONE;
2448 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2449 break;
2450
2451 case L2CAP_MODE_ERTM:
2452 pi->remote_tx_win = rfc.txwin_size;
2453 pi->remote_max_tx = rfc.max_transmit;
2454
2455 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2456 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2457
2458 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2459
2460 rfc.retrans_timeout =
2461 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2462 rfc.monitor_timeout =
2463 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2464
2465 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2466
2467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2468 sizeof(rfc), (unsigned long) &rfc);
2469
2470 break;
2471
2472 case L2CAP_MODE_STREAMING:
2473 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2474 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2475
2476 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2477
2478 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2479
2480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2481 sizeof(rfc), (unsigned long) &rfc);
2482
2483 break;
2484
2485 default:
2486 result = L2CAP_CONF_UNACCEPT;
2487
2488 memset(&rfc, 0, sizeof(rfc));
2489 rfc.mode = pi->mode;
2490 }
2491
2492 if (result == L2CAP_CONF_SUCCESS)
2493 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2494 }
2495 rsp->scid = cpu_to_le16(pi->dcid);
2496 rsp->result = cpu_to_le16(result);
2497 rsp->flags = cpu_to_le16(0x0000);
2498
2499 return ptr - data;
2500 }
2501
2502 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2503 {
2504 struct l2cap_pinfo *pi = l2cap_pi(sk);
2505 struct l2cap_conf_req *req = data;
2506 void *ptr = req->data;
2507 int type, olen;
2508 unsigned long val;
2509 struct l2cap_conf_rfc rfc;
2510
2511 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2512
2513 while (len >= L2CAP_CONF_OPT_SIZE) {
2514 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2515
2516 switch (type) {
2517 case L2CAP_CONF_MTU:
2518 if (val < L2CAP_DEFAULT_MIN_MTU) {
2519 *result = L2CAP_CONF_UNACCEPT;
2520 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2521 } else
2522 pi->imtu = val;
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2524 break;
2525
2526 case L2CAP_CONF_FLUSH_TO:
2527 pi->flush_to = val;
2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2529 2, pi->flush_to);
2530 break;
2531
2532 case L2CAP_CONF_RFC:
2533 if (olen == sizeof(rfc))
2534 memcpy(&rfc, (void *)val, olen);
2535
2536 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2537 rfc.mode != pi->mode)
2538 return -ECONNREFUSED;
2539
2540 pi->fcs = 0;
2541
2542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2543 sizeof(rfc), (unsigned long) &rfc);
2544 break;
2545 }
2546 }
2547
2548 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2549 return -ECONNREFUSED;
2550
2551 pi->mode = rfc.mode;
2552
2553 if (*result == L2CAP_CONF_SUCCESS) {
2554 switch (rfc.mode) {
2555 case L2CAP_MODE_ERTM:
2556 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2557 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2558 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2559 break;
2560 case L2CAP_MODE_STREAMING:
2561 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2562 }
2563 }
2564
2565 req->dcid = cpu_to_le16(pi->dcid);
2566 req->flags = cpu_to_le16(0x0000);
2567
2568 return ptr - data;
2569 }
2570
2571 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2572 {
2573 struct l2cap_conf_rsp *rsp = data;
2574 void *ptr = rsp->data;
2575
2576 BT_DBG("sk %p", sk);
2577
2578 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2579 rsp->result = cpu_to_le16(result);
2580 rsp->flags = cpu_to_le16(flags);
2581
2582 return ptr - data;
2583 }
2584
2585 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2586 {
2587 struct l2cap_pinfo *pi = l2cap_pi(sk);
2588 int type, olen;
2589 unsigned long val;
2590 struct l2cap_conf_rfc rfc;
2591
2592 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2593
2594 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2595 return;
2596
2597 while (len >= L2CAP_CONF_OPT_SIZE) {
2598 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2599
2600 switch (type) {
2601 case L2CAP_CONF_RFC:
2602 if (olen == sizeof(rfc))
2603 memcpy(&rfc, (void *)val, olen);
2604 goto done;
2605 }
2606 }
2607
2608 done:
2609 switch (rfc.mode) {
2610 case L2CAP_MODE_ERTM:
2611 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2612 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2613 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2614 break;
2615 case L2CAP_MODE_STREAMING:
2616 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2617 }
2618 }
2619
2620 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2621 {
2622 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2623
2624 if (rej->reason != 0x0000)
2625 return 0;
2626
2627 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2628 cmd->ident == conn->info_ident) {
2629 del_timer(&conn->info_timer);
2630
2631 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2632 conn->info_ident = 0;
2633
2634 l2cap_conn_start(conn);
2635 }
2636
2637 return 0;
2638 }
2639
2640 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2641 {
2642 struct l2cap_chan_list *list = &conn->chan_list;
2643 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2644 struct l2cap_conn_rsp rsp;
2645 struct sock *parent, *sk = NULL;
2646 int result, status = L2CAP_CS_NO_INFO;
2647
2648 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2649 __le16 psm = req->psm;
2650
2651 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2652
2653 /* Check if we have socket listening on psm */
2654 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2655 if (!parent) {
2656 result = L2CAP_CR_BAD_PSM;
2657 goto sendresp;
2658 }
2659
2660 bh_lock_sock(parent);
2661
2662 /* Check if the ACL is secure enough (if not SDP) */
2663 if (psm != cpu_to_le16(0x0001) &&
2664 !hci_conn_check_link_mode(conn->hcon)) {
2665 conn->disc_reason = 0x05;
2666 result = L2CAP_CR_SEC_BLOCK;
2667 goto response;
2668 }
2669
2670 result = L2CAP_CR_NO_MEM;
2671
2672 /* Check for backlog size */
2673 if (sk_acceptq_is_full(parent)) {
2674 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2675 goto response;
2676 }
2677
2678 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2679 if (!sk)
2680 goto response;
2681
2682 write_lock_bh(&list->lock);
2683
2684 /* Check if we already have channel with that dcid */
2685 if (__l2cap_get_chan_by_dcid(list, scid)) {
2686 write_unlock_bh(&list->lock);
2687 sock_set_flag(sk, SOCK_ZAPPED);
2688 l2cap_sock_kill(sk);
2689 goto response;
2690 }
2691
2692 hci_conn_hold(conn->hcon);
2693
2694 l2cap_sock_init(sk, parent);
2695 bacpy(&bt_sk(sk)->src, conn->src);
2696 bacpy(&bt_sk(sk)->dst, conn->dst);
2697 l2cap_pi(sk)->psm = psm;
2698 l2cap_pi(sk)->dcid = scid;
2699
2700 __l2cap_chan_add(conn, sk, parent);
2701 dcid = l2cap_pi(sk)->scid;
2702
2703 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2704
2705 l2cap_pi(sk)->ident = cmd->ident;
2706
2707 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2708 if (l2cap_check_security(sk)) {
2709 if (bt_sk(sk)->defer_setup) {
2710 sk->sk_state = BT_CONNECT2;
2711 result = L2CAP_CR_PEND;
2712 status = L2CAP_CS_AUTHOR_PEND;
2713 parent->sk_data_ready(parent, 0);
2714 } else {
2715 sk->sk_state = BT_CONFIG;
2716 result = L2CAP_CR_SUCCESS;
2717 status = L2CAP_CS_NO_INFO;
2718 }
2719 } else {
2720 sk->sk_state = BT_CONNECT2;
2721 result = L2CAP_CR_PEND;
2722 status = L2CAP_CS_AUTHEN_PEND;
2723 }
2724 } else {
2725 sk->sk_state = BT_CONNECT2;
2726 result = L2CAP_CR_PEND;
2727 status = L2CAP_CS_NO_INFO;
2728 }
2729
2730 write_unlock_bh(&list->lock);
2731
2732 response:
2733 bh_unlock_sock(parent);
2734
2735 sendresp:
2736 rsp.scid = cpu_to_le16(scid);
2737 rsp.dcid = cpu_to_le16(dcid);
2738 rsp.result = cpu_to_le16(result);
2739 rsp.status = cpu_to_le16(status);
2740 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2741
2742 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2743 struct l2cap_info_req info;
2744 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2745
2746 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2747 conn->info_ident = l2cap_get_ident(conn);
2748
2749 mod_timer(&conn->info_timer, jiffies +
2750 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2751
2752 l2cap_send_cmd(conn, conn->info_ident,
2753 L2CAP_INFO_REQ, sizeof(info), &info);
2754 }
2755
2756 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2757 result == L2CAP_CR_SUCCESS) {
2758 u8 buf[128];
2759 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2760 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2761 l2cap_build_conf_req(sk, buf), buf);
2762 l2cap_pi(sk)->num_conf_req++;
2763 }
2764
2765 return 0;
2766 }
2767
2768 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2769 {
2770 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2771 u16 scid, dcid, result, status;
2772 struct sock *sk;
2773 u8 req[128];
2774
2775 scid = __le16_to_cpu(rsp->scid);
2776 dcid = __le16_to_cpu(rsp->dcid);
2777 result = __le16_to_cpu(rsp->result);
2778 status = __le16_to_cpu(rsp->status);
2779
2780 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2781
2782 if (scid) {
2783 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2784 if (!sk)
2785 return -EFAULT;
2786 } else {
2787 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2788 if (!sk)
2789 return -EFAULT;
2790 }
2791
2792 switch (result) {
2793 case L2CAP_CR_SUCCESS:
2794 sk->sk_state = BT_CONFIG;
2795 l2cap_pi(sk)->ident = 0;
2796 l2cap_pi(sk)->dcid = dcid;
2797 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2798
2799 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2800 break;
2801
2802 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2803
2804 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2805 l2cap_build_conf_req(sk, req), req);
2806 l2cap_pi(sk)->num_conf_req++;
2807 break;
2808
2809 case L2CAP_CR_PEND:
2810 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2811 break;
2812
2813 default:
2814 /* don't delete l2cap channel if sk is owned by user */
2815 if (sock_owned_by_user(sk)) {
2816 sk->sk_state = BT_DISCONN;
2817 l2cap_sock_clear_timer(sk);
2818 l2cap_sock_set_timer(sk, HZ / 5);
2819 break;
2820 }
2821
2822 l2cap_chan_del(sk, ECONNREFUSED);
2823 break;
2824 }
2825
2826 bh_unlock_sock(sk);
2827 return 0;
2828 }
2829
2830 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2831 {
2832 /* FCS is enabled only in ERTM or streaming mode, if one or both
2833 * sides request it.
2834 */
2835 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2836 pi->fcs = L2CAP_FCS_NONE;
2837 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2838 pi->fcs = L2CAP_FCS_CRC16;
2839 }
2840
2841 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2842 {
2843 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2844 u16 dcid, flags;
2845 u8 rsp[64];
2846 struct sock *sk;
2847 int len;
2848
2849 dcid = __le16_to_cpu(req->dcid);
2850 flags = __le16_to_cpu(req->flags);
2851
2852 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2853
2854 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2855 if (!sk)
2856 return -ENOENT;
2857
2858 if (sk->sk_state != BT_CONFIG) {
2859 struct l2cap_cmd_rej rej;
2860
2861 rej.reason = cpu_to_le16(0x0002);
2862 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2863 sizeof(rej), &rej);
2864 goto unlock;
2865 }
2866
2867 /* Reject if config buffer is too small. */
2868 len = cmd_len - sizeof(*req);
2869 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2871 l2cap_build_conf_rsp(sk, rsp,
2872 L2CAP_CONF_REJECT, flags), rsp);
2873 goto unlock;
2874 }
2875
2876 /* Store config. */
2877 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2878 l2cap_pi(sk)->conf_len += len;
2879
2880 if (flags & 0x0001) {
2881 /* Incomplete config. Send empty response. */
2882 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2883 l2cap_build_conf_rsp(sk, rsp,
2884 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2885 goto unlock;
2886 }
2887
2888 /* Complete config. */
2889 len = l2cap_parse_conf_req(sk, rsp);
2890 if (len < 0) {
2891 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2892 goto unlock;
2893 }
2894
2895 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2896 l2cap_pi(sk)->num_conf_rsp++;
2897
2898 /* Reset config buffer. */
2899 l2cap_pi(sk)->conf_len = 0;
2900
2901 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2902 goto unlock;
2903
2904 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2905 set_default_fcs(l2cap_pi(sk));
2906
2907 sk->sk_state = BT_CONNECTED;
2908
2909 l2cap_pi(sk)->next_tx_seq = 0;
2910 l2cap_pi(sk)->expected_tx_seq = 0;
2911 __skb_queue_head_init(TX_QUEUE(sk));
2912 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2913 l2cap_ertm_init(sk);
2914
2915 l2cap_chan_ready(sk);
2916 goto unlock;
2917 }
2918
2919 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2920 u8 buf[64];
2921 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2922 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2923 l2cap_build_conf_req(sk, buf), buf);
2924 l2cap_pi(sk)->num_conf_req++;
2925 }
2926
2927 unlock:
2928 bh_unlock_sock(sk);
2929 return 0;
2930 }
2931
2932 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2933 {
2934 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2935 u16 scid, flags, result;
2936 struct sock *sk;
2937 int len = cmd->len - sizeof(*rsp);
2938
2939 scid = __le16_to_cpu(rsp->scid);
2940 flags = __le16_to_cpu(rsp->flags);
2941 result = __le16_to_cpu(rsp->result);
2942
2943 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2944 scid, flags, result);
2945
2946 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2947 if (!sk)
2948 return 0;
2949
2950 switch (result) {
2951 case L2CAP_CONF_SUCCESS:
2952 l2cap_conf_rfc_get(sk, rsp->data, len);
2953 break;
2954
2955 case L2CAP_CONF_UNACCEPT:
2956 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2957 char req[64];
2958
2959 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2960 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2961 goto done;
2962 }
2963
2964 /* throw out any old stored conf requests */
2965 result = L2CAP_CONF_SUCCESS;
2966 len = l2cap_parse_conf_rsp(sk, rsp->data,
2967 len, req, &result);
2968 if (len < 0) {
2969 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2970 goto done;
2971 }
2972
2973 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2974 L2CAP_CONF_REQ, len, req);
2975 l2cap_pi(sk)->num_conf_req++;
2976 if (result != L2CAP_CONF_SUCCESS)
2977 goto done;
2978 break;
2979 }
2980
2981 default:
2982 sk->sk_err = ECONNRESET;
2983 l2cap_sock_set_timer(sk, HZ * 5);
2984 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2985 goto done;
2986 }
2987
2988 if (flags & 0x01)
2989 goto done;
2990
2991 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2992
2993 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2994 set_default_fcs(l2cap_pi(sk));
2995
2996 sk->sk_state = BT_CONNECTED;
2997 l2cap_pi(sk)->next_tx_seq = 0;
2998 l2cap_pi(sk)->expected_tx_seq = 0;
2999 __skb_queue_head_init(TX_QUEUE(sk));
3000 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3001 l2cap_ertm_init(sk);
3002
3003 l2cap_chan_ready(sk);
3004 }
3005
3006 done:
3007 bh_unlock_sock(sk);
3008 return 0;
3009 }
3010
3011 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 {
3013 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3014 struct l2cap_disconn_rsp rsp;
3015 u16 dcid, scid;
3016 struct sock *sk;
3017
3018 scid = __le16_to_cpu(req->scid);
3019 dcid = __le16_to_cpu(req->dcid);
3020
3021 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3022
3023 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3024 if (!sk)
3025 return 0;
3026
3027 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3028 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3029 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3030
3031 sk->sk_shutdown = SHUTDOWN_MASK;
3032
3033 /* don't delete l2cap channel if sk is owned by user */
3034 if (sock_owned_by_user(sk)) {
3035 sk->sk_state = BT_DISCONN;
3036 l2cap_sock_clear_timer(sk);
3037 l2cap_sock_set_timer(sk, HZ / 5);
3038 bh_unlock_sock(sk);
3039 return 0;
3040 }
3041
3042 l2cap_chan_del(sk, ECONNRESET);
3043 bh_unlock_sock(sk);
3044
3045 l2cap_sock_kill(sk);
3046 return 0;
3047 }
3048
3049 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3050 {
3051 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3052 u16 dcid, scid;
3053 struct sock *sk;
3054
3055 scid = __le16_to_cpu(rsp->scid);
3056 dcid = __le16_to_cpu(rsp->dcid);
3057
3058 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3059
3060 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3061 if (!sk)
3062 return 0;
3063
3064 /* don't delete l2cap channel if sk is owned by user */
3065 if (sock_owned_by_user(sk)) {
3066 sk->sk_state = BT_DISCONN;
3067 l2cap_sock_clear_timer(sk);
3068 l2cap_sock_set_timer(sk, HZ / 5);
3069 bh_unlock_sock(sk);
3070 return 0;
3071 }
3072
3073 l2cap_chan_del(sk, 0);
3074 bh_unlock_sock(sk);
3075
3076 l2cap_sock_kill(sk);
3077 return 0;
3078 }
3079
3080 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3081 {
3082 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3083 u16 type;
3084
3085 type = __le16_to_cpu(req->type);
3086
3087 BT_DBG("type 0x%4.4x", type);
3088
3089 if (type == L2CAP_IT_FEAT_MASK) {
3090 u8 buf[8];
3091 u32 feat_mask = l2cap_feat_mask;
3092 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3093 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3094 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3095 if (!disable_ertm)
3096 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3097 | L2CAP_FEAT_FCS;
3098 put_unaligned_le32(feat_mask, rsp->data);
3099 l2cap_send_cmd(conn, cmd->ident,
3100 L2CAP_INFO_RSP, sizeof(buf), buf);
3101 } else if (type == L2CAP_IT_FIXED_CHAN) {
3102 u8 buf[12];
3103 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3104 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3105 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3106 memcpy(buf + 4, l2cap_fixed_chan, 8);
3107 l2cap_send_cmd(conn, cmd->ident,
3108 L2CAP_INFO_RSP, sizeof(buf), buf);
3109 } else {
3110 struct l2cap_info_rsp rsp;
3111 rsp.type = cpu_to_le16(type);
3112 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3113 l2cap_send_cmd(conn, cmd->ident,
3114 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3115 }
3116
3117 return 0;
3118 }
3119
3120 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3121 {
3122 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3123 u16 type, result;
3124
3125 type = __le16_to_cpu(rsp->type);
3126 result = __le16_to_cpu(rsp->result);
3127
3128 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3129
3130 del_timer(&conn->info_timer);
3131
3132 if (result != L2CAP_IR_SUCCESS) {
3133 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3134 conn->info_ident = 0;
3135
3136 l2cap_conn_start(conn);
3137
3138 return 0;
3139 }
3140
3141 if (type == L2CAP_IT_FEAT_MASK) {
3142 conn->feat_mask = get_unaligned_le32(rsp->data);
3143
3144 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3145 struct l2cap_info_req req;
3146 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3147
3148 conn->info_ident = l2cap_get_ident(conn);
3149
3150 l2cap_send_cmd(conn, conn->info_ident,
3151 L2CAP_INFO_REQ, sizeof(req), &req);
3152 } else {
3153 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3154 conn->info_ident = 0;
3155
3156 l2cap_conn_start(conn);
3157 }
3158 } else if (type == L2CAP_IT_FIXED_CHAN) {
3159 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3160 conn->info_ident = 0;
3161
3162 l2cap_conn_start(conn);
3163 }
3164
3165 return 0;
3166 }
3167
3168 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3169 {
3170 u8 *data = skb->data;
3171 int len = skb->len;
3172 struct l2cap_cmd_hdr cmd;
3173 int err = 0;
3174
3175 l2cap_raw_recv(conn, skb);
3176
3177 while (len >= L2CAP_CMD_HDR_SIZE) {
3178 u16 cmd_len;
3179 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3180 data += L2CAP_CMD_HDR_SIZE;
3181 len -= L2CAP_CMD_HDR_SIZE;
3182
3183 cmd_len = le16_to_cpu(cmd.len);
3184
3185 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3186
3187 if (cmd_len > len || !cmd.ident) {
3188 BT_DBG("corrupted command");
3189 break;
3190 }
3191
3192 switch (cmd.code) {
3193 case L2CAP_COMMAND_REJ:
3194 l2cap_command_rej(conn, &cmd, data);
3195 break;
3196
3197 case L2CAP_CONN_REQ:
3198 err = l2cap_connect_req(conn, &cmd, data);
3199 break;
3200
3201 case L2CAP_CONN_RSP:
3202 err = l2cap_connect_rsp(conn, &cmd, data);
3203 break;
3204
3205 case L2CAP_CONF_REQ:
3206 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3207 break;
3208
3209 case L2CAP_CONF_RSP:
3210 err = l2cap_config_rsp(conn, &cmd, data);
3211 break;
3212
3213 case L2CAP_DISCONN_REQ:
3214 err = l2cap_disconnect_req(conn, &cmd, data);
3215 break;
3216
3217 case L2CAP_DISCONN_RSP:
3218 err = l2cap_disconnect_rsp(conn, &cmd, data);
3219 break;
3220
3221 case L2CAP_ECHO_REQ:
3222 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3223 break;
3224
3225 case L2CAP_ECHO_RSP:
3226 break;
3227
3228 case L2CAP_INFO_REQ:
3229 err = l2cap_information_req(conn, &cmd, data);
3230 break;
3231
3232 case L2CAP_INFO_RSP:
3233 err = l2cap_information_rsp(conn, &cmd, data);
3234 break;
3235
3236 default:
3237 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3238 err = -EINVAL;
3239 break;
3240 }
3241
3242 if (err) {
3243 struct l2cap_cmd_rej rej;
3244 BT_DBG("error %d", err);
3245
3246 /* FIXME: Map err to a valid reason */
3247 rej.reason = cpu_to_le16(0);
3248 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3249 }
3250
3251 data += cmd_len;
3252 len -= cmd_len;
3253 }
3254
3255 kfree_skb(skb);
3256 }
3257
3258 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3259 {
3260 u16 our_fcs, rcv_fcs;
3261 int hdr_size = L2CAP_HDR_SIZE + 2;
3262
3263 if (pi->fcs == L2CAP_FCS_CRC16) {
3264 skb_trim(skb, skb->len - 2);
3265 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3266 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3267
3268 if (our_fcs != rcv_fcs)
3269 return -EBADMSG;
3270 }
3271 return 0;
3272 }
3273
3274 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3275 {
3276 struct l2cap_pinfo *pi = l2cap_pi(sk);
3277 u16 control = 0;
3278
3279 pi->frames_sent = 0;
3280
3281 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3282
3283 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3284 control |= L2CAP_SUPER_RCV_NOT_READY;
3285 l2cap_send_sframe(pi, control);
3286 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3287 }
3288
3289 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3290 l2cap_retransmit_frames(sk);
3291
3292 l2cap_ertm_send(sk);
3293
3294 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3295 pi->frames_sent == 0) {
3296 control |= L2CAP_SUPER_RCV_READY;
3297 l2cap_send_sframe(pi, control);
3298 }
3299 }
3300
3301 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3302 {
3303 struct sk_buff *next_skb;
3304 struct l2cap_pinfo *pi = l2cap_pi(sk);
3305 int tx_seq_offset, next_tx_seq_offset;
3306
3307 bt_cb(skb)->tx_seq = tx_seq;
3308 bt_cb(skb)->sar = sar;
3309
3310 next_skb = skb_peek(SREJ_QUEUE(sk));
3311 if (!next_skb) {
3312 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3313 return 0;
3314 }
3315
3316 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3317 if (tx_seq_offset < 0)
3318 tx_seq_offset += 64;
3319
3320 do {
3321 if (bt_cb(next_skb)->tx_seq == tx_seq)
3322 return -EINVAL;
3323
3324 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3325 pi->buffer_seq) % 64;
3326 if (next_tx_seq_offset < 0)
3327 next_tx_seq_offset += 64;
3328
3329 if (next_tx_seq_offset > tx_seq_offset) {
3330 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3331 return 0;
3332 }
3333
3334 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3335 break;
3336
3337 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3338
3339 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3340
3341 return 0;
3342 }
3343
3344 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3345 {
3346 struct l2cap_pinfo *pi = l2cap_pi(sk);
3347 struct sk_buff *_skb;
3348 int err;
3349
3350 switch (control & L2CAP_CTRL_SAR) {
3351 case L2CAP_SDU_UNSEGMENTED:
3352 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3353 goto drop;
3354
3355 err = sock_queue_rcv_skb(sk, skb);
3356 if (!err)
3357 return err;
3358
3359 break;
3360
3361 case L2CAP_SDU_START:
3362 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3363 goto drop;
3364
3365 pi->sdu_len = get_unaligned_le16(skb->data);
3366
3367 if (pi->sdu_len > pi->imtu)
3368 goto disconnect;
3369
3370 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3371 if (!pi->sdu)
3372 return -ENOMEM;
3373
3374 /* pull sdu_len bytes only after alloc, because of Local Busy
3375 * condition we have to be sure that this will be executed
3376 * only once, i.e., when alloc does not fail */
3377 skb_pull(skb, 2);
3378
3379 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3380
3381 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3382 pi->partial_sdu_len = skb->len;
3383 break;
3384
3385 case L2CAP_SDU_CONTINUE:
3386 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3387 goto disconnect;
3388
3389 if (!pi->sdu)
3390 goto disconnect;
3391
3392 pi->partial_sdu_len += skb->len;
3393 if (pi->partial_sdu_len > pi->sdu_len)
3394 goto drop;
3395
3396 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3397
3398 break;
3399
3400 case L2CAP_SDU_END:
3401 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3402 goto disconnect;
3403
3404 if (!pi->sdu)
3405 goto disconnect;
3406
3407 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3408 pi->partial_sdu_len += skb->len;
3409
3410 if (pi->partial_sdu_len > pi->imtu)
3411 goto drop;
3412
3413 if (pi->partial_sdu_len != pi->sdu_len)
3414 goto drop;
3415
3416 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3417 }
3418
3419 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3420 if (!_skb) {
3421 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3422 return -ENOMEM;
3423 }
3424
3425 err = sock_queue_rcv_skb(sk, _skb);
3426 if (err < 0) {
3427 kfree_skb(_skb);
3428 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3429 return err;
3430 }
3431
3432 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3433 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3434
3435 kfree_skb(pi->sdu);
3436 break;
3437 }
3438
3439 kfree_skb(skb);
3440 return 0;
3441
3442 drop:
3443 kfree_skb(pi->sdu);
3444 pi->sdu = NULL;
3445
3446 disconnect:
3447 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3448 kfree_skb(skb);
3449 return 0;
3450 }
3451
3452 static int l2cap_try_push_rx_skb(struct sock *sk)
3453 {
3454 struct l2cap_pinfo *pi = l2cap_pi(sk);
3455 struct sk_buff *skb;
3456 u16 control;
3457 int err;
3458
3459 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3460 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3461 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3462 if (err < 0) {
3463 skb_queue_head(BUSY_QUEUE(sk), skb);
3464 return -EBUSY;
3465 }
3466
3467 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3468 }
3469
3470 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3471 goto done;
3472
3473 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3474 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3475 l2cap_send_sframe(pi, control);
3476 l2cap_pi(sk)->retry_count = 1;
3477
3478 del_timer(&pi->retrans_timer);
3479 __mod_monitor_timer();
3480
3481 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3482
3483 done:
3484 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3485 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3486
3487 BT_DBG("sk %p, Exit local busy", sk);
3488
3489 return 0;
3490 }
3491
3492 static void l2cap_busy_work(struct work_struct *work)
3493 {
3494 DECLARE_WAITQUEUE(wait, current);
3495 struct l2cap_pinfo *pi =
3496 container_of(work, struct l2cap_pinfo, busy_work);
3497 struct sock *sk = (struct sock *)pi;
3498 int n_tries = 0, timeo = HZ/5, err;
3499 struct sk_buff *skb;
3500
3501 lock_sock(sk);
3502
3503 add_wait_queue(sk_sleep(sk), &wait);
3504 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3505 set_current_state(TASK_INTERRUPTIBLE);
3506
3507 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3508 err = -EBUSY;
3509 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3510 break;
3511 }
3512
3513 if (!timeo)
3514 timeo = HZ/5;
3515
3516 if (signal_pending(current)) {
3517 err = sock_intr_errno(timeo);
3518 break;
3519 }
3520
3521 release_sock(sk);
3522 timeo = schedule_timeout(timeo);
3523 lock_sock(sk);
3524
3525 err = sock_error(sk);
3526 if (err)
3527 break;
3528
3529 if (l2cap_try_push_rx_skb(sk) == 0)
3530 break;
3531 }
3532
3533 set_current_state(TASK_RUNNING);
3534 remove_wait_queue(sk_sleep(sk), &wait);
3535
3536 release_sock(sk);
3537 }
3538
3539 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3540 {
3541 struct l2cap_pinfo *pi = l2cap_pi(sk);
3542 int sctrl, err;
3543
3544 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3545 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3546 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3547 return l2cap_try_push_rx_skb(sk);
3548
3549
3550 }
3551
3552 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3553 if (err >= 0) {
3554 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3555 return err;
3556 }
3557
3558 /* Busy Condition */
3559 BT_DBG("sk %p, Enter local busy", sk);
3560
3561 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3562 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3563 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3564
3565 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3566 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3567 l2cap_send_sframe(pi, sctrl);
3568
3569 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3570
3571 del_timer(&pi->ack_timer);
3572
3573 queue_work(_busy_wq, &pi->busy_work);
3574
3575 return err;
3576 }
3577
3578 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3579 {
3580 struct l2cap_pinfo *pi = l2cap_pi(sk);
3581 struct sk_buff *_skb;
3582 int err = -EINVAL;
3583
3584 /*
3585 * TODO: We have to notify the userland if some data is lost with the
3586 * Streaming Mode.
3587 */
3588
3589 switch (control & L2CAP_CTRL_SAR) {
3590 case L2CAP_SDU_UNSEGMENTED:
3591 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3592 kfree_skb(pi->sdu);
3593 break;
3594 }
3595
3596 err = sock_queue_rcv_skb(sk, skb);
3597 if (!err)
3598 return 0;
3599
3600 break;
3601
3602 case L2CAP_SDU_START:
3603 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3604 kfree_skb(pi->sdu);
3605 break;
3606 }
3607
3608 pi->sdu_len = get_unaligned_le16(skb->data);
3609 skb_pull(skb, 2);
3610
3611 if (pi->sdu_len > pi->imtu) {
3612 err = -EMSGSIZE;
3613 break;
3614 }
3615
3616 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3617 if (!pi->sdu) {
3618 err = -ENOMEM;
3619 break;
3620 }
3621
3622 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3623
3624 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3625 pi->partial_sdu_len = skb->len;
3626 err = 0;
3627 break;
3628
3629 case L2CAP_SDU_CONTINUE:
3630 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3631 break;
3632
3633 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3634
3635 pi->partial_sdu_len += skb->len;
3636 if (pi->partial_sdu_len > pi->sdu_len)
3637 kfree_skb(pi->sdu);
3638 else
3639 err = 0;
3640
3641 break;
3642
3643 case L2CAP_SDU_END:
3644 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3645 break;
3646
3647 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3648
3649 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3650 pi->partial_sdu_len += skb->len;
3651
3652 if (pi->partial_sdu_len > pi->imtu)
3653 goto drop;
3654
3655 if (pi->partial_sdu_len == pi->sdu_len) {
3656 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3657 err = sock_queue_rcv_skb(sk, _skb);
3658 if (err < 0)
3659 kfree_skb(_skb);
3660 }
3661 err = 0;
3662
3663 drop:
3664 kfree_skb(pi->sdu);
3665 break;
3666 }
3667
3668 kfree_skb(skb);
3669 return err;
3670 }
3671
3672 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3673 {
3674 struct sk_buff *skb;
3675 u16 control;
3676
3677 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3678 if (bt_cb(skb)->tx_seq != tx_seq)
3679 break;
3680
3681 skb = skb_dequeue(SREJ_QUEUE(sk));
3682 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3683 l2cap_ertm_reassembly_sdu(sk, skb, control);
3684 l2cap_pi(sk)->buffer_seq_srej =
3685 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3686 tx_seq = (tx_seq + 1) % 64;
3687 }
3688 }
3689
3690 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3691 {
3692 struct l2cap_pinfo *pi = l2cap_pi(sk);
3693 struct srej_list *l, *tmp;
3694 u16 control;
3695
3696 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3697 if (l->tx_seq == tx_seq) {
3698 list_del(&l->list);
3699 kfree(l);
3700 return;
3701 }
3702 control = L2CAP_SUPER_SELECT_REJECT;
3703 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3704 l2cap_send_sframe(pi, control);
3705 list_del(&l->list);
3706 list_add_tail(&l->list, SREJ_LIST(sk));
3707 }
3708 }
3709
3710 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3711 {
3712 struct l2cap_pinfo *pi = l2cap_pi(sk);
3713 struct srej_list *new;
3714 u16 control;
3715
3716 while (tx_seq != pi->expected_tx_seq) {
3717 control = L2CAP_SUPER_SELECT_REJECT;
3718 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3719 l2cap_send_sframe(pi, control);
3720
3721 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3722 new->tx_seq = pi->expected_tx_seq;
3723 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3724 list_add_tail(&new->list, SREJ_LIST(sk));
3725 }
3726 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3727 }
3728
3729 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3730 {
3731 struct l2cap_pinfo *pi = l2cap_pi(sk);
3732 u8 tx_seq = __get_txseq(rx_control);
3733 u8 req_seq = __get_reqseq(rx_control);
3734 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3735 int tx_seq_offset, expected_tx_seq_offset;
3736 int num_to_ack = (pi->tx_win/6) + 1;
3737 int err = 0;
3738
3739 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3740 rx_control);
3741
3742 if (L2CAP_CTRL_FINAL & rx_control &&
3743 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3744 del_timer(&pi->monitor_timer);
3745 if (pi->unacked_frames > 0)
3746 __mod_retrans_timer();
3747 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3748 }
3749
3750 pi->expected_ack_seq = req_seq;
3751 l2cap_drop_acked_frames(sk);
3752
3753 if (tx_seq == pi->expected_tx_seq)
3754 goto expected;
3755
3756 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3757 if (tx_seq_offset < 0)
3758 tx_seq_offset += 64;
3759
3760 /* invalid tx_seq */
3761 if (tx_seq_offset >= pi->tx_win) {
3762 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3763 goto drop;
3764 }
3765
3766 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3767 goto drop;
3768
3769 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3770 struct srej_list *first;
3771
3772 first = list_first_entry(SREJ_LIST(sk),
3773 struct srej_list, list);
3774 if (tx_seq == first->tx_seq) {
3775 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3776 l2cap_check_srej_gap(sk, tx_seq);
3777
3778 list_del(&first->list);
3779 kfree(first);
3780
3781 if (list_empty(SREJ_LIST(sk))) {
3782 pi->buffer_seq = pi->buffer_seq_srej;
3783 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3784 l2cap_send_ack(pi);
3785 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3786 }
3787 } else {
3788 struct srej_list *l;
3789
3790 /* duplicated tx_seq */
3791 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3792 goto drop;
3793
3794 list_for_each_entry(l, SREJ_LIST(sk), list) {
3795 if (l->tx_seq == tx_seq) {
3796 l2cap_resend_srejframe(sk, tx_seq);
3797 return 0;
3798 }
3799 }
3800 l2cap_send_srejframe(sk, tx_seq);
3801 }
3802 } else {
3803 expected_tx_seq_offset =
3804 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3805 if (expected_tx_seq_offset < 0)
3806 expected_tx_seq_offset += 64;
3807
3808 /* duplicated tx_seq */
3809 if (tx_seq_offset < expected_tx_seq_offset)
3810 goto drop;
3811
3812 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3813
3814 BT_DBG("sk %p, Enter SREJ", sk);
3815
3816 INIT_LIST_HEAD(SREJ_LIST(sk));
3817 pi->buffer_seq_srej = pi->buffer_seq;
3818
3819 __skb_queue_head_init(SREJ_QUEUE(sk));
3820 __skb_queue_head_init(BUSY_QUEUE(sk));
3821 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3822
3823 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3824
3825 l2cap_send_srejframe(sk, tx_seq);
3826
3827 del_timer(&pi->ack_timer);
3828 }
3829 return 0;
3830
3831 expected:
3832 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3833
3834 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3835 bt_cb(skb)->tx_seq = tx_seq;
3836 bt_cb(skb)->sar = sar;
3837 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3838 return 0;
3839 }
3840
3841 err = l2cap_push_rx_skb(sk, skb, rx_control);
3842 if (err < 0)
3843 return 0;
3844
3845 if (rx_control & L2CAP_CTRL_FINAL) {
3846 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3847 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3848 else
3849 l2cap_retransmit_frames(sk);
3850 }
3851
3852 __mod_ack_timer();
3853
3854 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3855 if (pi->num_acked == num_to_ack - 1)
3856 l2cap_send_ack(pi);
3857
3858 return 0;
3859
3860 drop:
3861 kfree_skb(skb);
3862 return 0;
3863 }
3864
3865 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3866 {
3867 struct l2cap_pinfo *pi = l2cap_pi(sk);
3868
3869 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3870 rx_control);
3871
3872 pi->expected_ack_seq = __get_reqseq(rx_control);
3873 l2cap_drop_acked_frames(sk);
3874
3875 if (rx_control & L2CAP_CTRL_POLL) {
3876 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3877 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3878 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3879 (pi->unacked_frames > 0))
3880 __mod_retrans_timer();
3881
3882 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3883 l2cap_send_srejtail(sk);
3884 } else {
3885 l2cap_send_i_or_rr_or_rnr(sk);
3886 }
3887
3888 } else if (rx_control & L2CAP_CTRL_FINAL) {
3889 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3890
3891 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3892 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3893 else
3894 l2cap_retransmit_frames(sk);
3895
3896 } else {
3897 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3898 (pi->unacked_frames > 0))
3899 __mod_retrans_timer();
3900
3901 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3902 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3903 l2cap_send_ack(pi);
3904 else
3905 l2cap_ertm_send(sk);
3906 }
3907 }
3908
3909 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3910 {
3911 struct l2cap_pinfo *pi = l2cap_pi(sk);
3912 u8 tx_seq = __get_reqseq(rx_control);
3913
3914 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3915
3916 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3917
3918 pi->expected_ack_seq = tx_seq;
3919 l2cap_drop_acked_frames(sk);
3920
3921 if (rx_control & L2CAP_CTRL_FINAL) {
3922 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3923 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3924 else
3925 l2cap_retransmit_frames(sk);
3926 } else {
3927 l2cap_retransmit_frames(sk);
3928
3929 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3930 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3931 }
3932 }
3933 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3934 {
3935 struct l2cap_pinfo *pi = l2cap_pi(sk);
3936 u8 tx_seq = __get_reqseq(rx_control);
3937
3938 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3939
3940 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3941
3942 if (rx_control & L2CAP_CTRL_POLL) {
3943 pi->expected_ack_seq = tx_seq;
3944 l2cap_drop_acked_frames(sk);
3945
3946 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3947 l2cap_retransmit_one_frame(sk, tx_seq);
3948
3949 l2cap_ertm_send(sk);
3950
3951 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3952 pi->srej_save_reqseq = tx_seq;
3953 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3954 }
3955 } else if (rx_control & L2CAP_CTRL_FINAL) {
3956 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3957 pi->srej_save_reqseq == tx_seq)
3958 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3959 else
3960 l2cap_retransmit_one_frame(sk, tx_seq);
3961 } else {
3962 l2cap_retransmit_one_frame(sk, tx_seq);
3963 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3964 pi->srej_save_reqseq = tx_seq;
3965 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3966 }
3967 }
3968 }
3969
3970 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3971 {
3972 struct l2cap_pinfo *pi = l2cap_pi(sk);
3973 u8 tx_seq = __get_reqseq(rx_control);
3974
3975 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3976
3977 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3978 pi->expected_ack_seq = tx_seq;
3979 l2cap_drop_acked_frames(sk);
3980
3981 if (rx_control & L2CAP_CTRL_POLL)
3982 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3983
3984 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3985 del_timer(&pi->retrans_timer);
3986 if (rx_control & L2CAP_CTRL_POLL)
3987 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3988 return;
3989 }
3990
3991 if (rx_control & L2CAP_CTRL_POLL)
3992 l2cap_send_srejtail(sk);
3993 else
3994 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3995 }
3996
3997 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3998 {
3999 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4000
4001 if (L2CAP_CTRL_FINAL & rx_control &&
4002 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4003 del_timer(&l2cap_pi(sk)->monitor_timer);
4004 if (l2cap_pi(sk)->unacked_frames > 0)
4005 __mod_retrans_timer();
4006 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4007 }
4008
4009 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4010 case L2CAP_SUPER_RCV_READY:
4011 l2cap_data_channel_rrframe(sk, rx_control);
4012 break;
4013
4014 case L2CAP_SUPER_REJECT:
4015 l2cap_data_channel_rejframe(sk, rx_control);
4016 break;
4017
4018 case L2CAP_SUPER_SELECT_REJECT:
4019 l2cap_data_channel_srejframe(sk, rx_control);
4020 break;
4021
4022 case L2CAP_SUPER_RCV_NOT_READY:
4023 l2cap_data_channel_rnrframe(sk, rx_control);
4024 break;
4025 }
4026
4027 kfree_skb(skb);
4028 return 0;
4029 }
4030
4031 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4032 {
4033 struct l2cap_pinfo *pi = l2cap_pi(sk);
4034 u16 control;
4035 u8 req_seq;
4036 int len, next_tx_seq_offset, req_seq_offset;
4037
4038 control = get_unaligned_le16(skb->data);
4039 skb_pull(skb, 2);
4040 len = skb->len;
4041
4042 /*
4043 * We can just drop the corrupted I-frame here.
4044 * Receiver will miss it and start proper recovery
4045 * procedures and ask retransmission.
4046 */
4047 if (l2cap_check_fcs(pi, skb))
4048 goto drop;
4049
4050 if (__is_sar_start(control) && __is_iframe(control))
4051 len -= 2;
4052
4053 if (pi->fcs == L2CAP_FCS_CRC16)
4054 len -= 2;
4055
4056 if (len > pi->mps) {
4057 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4058 goto drop;
4059 }
4060
4061 req_seq = __get_reqseq(control);
4062 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4063 if (req_seq_offset < 0)
4064 req_seq_offset += 64;
4065
4066 next_tx_seq_offset =
4067 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4068 if (next_tx_seq_offset < 0)
4069 next_tx_seq_offset += 64;
4070
4071 /* check for invalid req-seq */
4072 if (req_seq_offset > next_tx_seq_offset) {
4073 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4074 goto drop;
4075 }
4076
4077 if (__is_iframe(control)) {
4078 if (len < 0) {
4079 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4080 goto drop;
4081 }
4082
4083 l2cap_data_channel_iframe(sk, control, skb);
4084 } else {
4085 if (len != 0) {
4086 BT_ERR("%d", len);
4087 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4088 goto drop;
4089 }
4090
4091 l2cap_data_channel_sframe(sk, control, skb);
4092 }
4093
4094 return 0;
4095
4096 drop:
4097 kfree_skb(skb);
4098 return 0;
4099 }
4100
4101 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4102 {
4103 struct sock *sk;
4104 struct l2cap_pinfo *pi;
4105 u16 control;
4106 u8 tx_seq;
4107 int len;
4108
4109 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4110 if (!sk) {
4111 BT_DBG("unknown cid 0x%4.4x", cid);
4112 goto drop;
4113 }
4114
4115 pi = l2cap_pi(sk);
4116
4117 BT_DBG("sk %p, len %d", sk, skb->len);
4118
4119 if (sk->sk_state != BT_CONNECTED)
4120 goto drop;
4121
4122 switch (pi->mode) {
4123 case L2CAP_MODE_BASIC:
4124 /* If socket recv buffers overflows we drop data here
4125 * which is *bad* because L2CAP has to be reliable.
4126 * But we don't have any other choice. L2CAP doesn't
4127 * provide flow control mechanism. */
4128
4129 if (pi->imtu < skb->len)
4130 goto drop;
4131
4132 if (!sock_queue_rcv_skb(sk, skb))
4133 goto done;
4134 break;
4135
4136 case L2CAP_MODE_ERTM:
4137 if (!sock_owned_by_user(sk)) {
4138 l2cap_ertm_data_rcv(sk, skb);
4139 } else {
4140 if (sk_add_backlog(sk, skb))
4141 goto drop;
4142 }
4143
4144 goto done;
4145
4146 case L2CAP_MODE_STREAMING:
4147 control = get_unaligned_le16(skb->data);
4148 skb_pull(skb, 2);
4149 len = skb->len;
4150
4151 if (l2cap_check_fcs(pi, skb))
4152 goto drop;
4153
4154 if (__is_sar_start(control))
4155 len -= 2;
4156
4157 if (pi->fcs == L2CAP_FCS_CRC16)
4158 len -= 2;
4159
4160 if (len > pi->mps || len < 0 || __is_sframe(control))
4161 goto drop;
4162
4163 tx_seq = __get_txseq(control);
4164
4165 if (pi->expected_tx_seq == tx_seq)
4166 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4167 else
4168 pi->expected_tx_seq = (tx_seq + 1) % 64;
4169
4170 l2cap_streaming_reassembly_sdu(sk, skb, control);
4171
4172 goto done;
4173
4174 default:
4175 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4176 break;
4177 }
4178
4179 drop:
4180 kfree_skb(skb);
4181
4182 done:
4183 if (sk)
4184 bh_unlock_sock(sk);
4185
4186 return 0;
4187 }
4188
4189 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4190 {
4191 struct sock *sk;
4192
4193 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4194 if (!sk)
4195 goto drop;
4196
4197 bh_lock_sock(sk);
4198
4199 BT_DBG("sk %p, len %d", sk, skb->len);
4200
4201 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4202 goto drop;
4203
4204 if (l2cap_pi(sk)->imtu < skb->len)
4205 goto drop;
4206
4207 if (!sock_queue_rcv_skb(sk, skb))
4208 goto done;
4209
4210 drop:
4211 kfree_skb(skb);
4212
4213 done:
4214 if (sk)
4215 bh_unlock_sock(sk);
4216 return 0;
4217 }
4218
4219 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4220 {
4221 struct l2cap_hdr *lh = (void *) skb->data;
4222 u16 cid, len;
4223 __le16 psm;
4224
4225 skb_pull(skb, L2CAP_HDR_SIZE);
4226 cid = __le16_to_cpu(lh->cid);
4227 len = __le16_to_cpu(lh->len);
4228
4229 if (len != skb->len) {
4230 kfree_skb(skb);
4231 return;
4232 }
4233
4234 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4235
4236 switch (cid) {
4237 case L2CAP_CID_SIGNALING:
4238 l2cap_sig_channel(conn, skb);
4239 break;
4240
4241 case L2CAP_CID_CONN_LESS:
4242 psm = get_unaligned_le16(skb->data);
4243 skb_pull(skb, 2);
4244 l2cap_conless_channel(conn, psm, skb);
4245 break;
4246
4247 default:
4248 l2cap_data_channel(conn, cid, skb);
4249 break;
4250 }
4251 }
4252
4253 /* ---- L2CAP interface with lower layer (HCI) ---- */
4254
4255 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4256 {
4257 int exact = 0, lm1 = 0, lm2 = 0;
4258 register struct sock *sk;
4259 struct hlist_node *node;
4260
4261 if (type != ACL_LINK)
4262 return -EINVAL;
4263
4264 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4265
4266 /* Find listening sockets and check their link_mode */
4267 read_lock(&l2cap_sk_list.lock);
4268 sk_for_each(sk, node, &l2cap_sk_list.head) {
4269 if (sk->sk_state != BT_LISTEN)
4270 continue;
4271
4272 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4273 lm1 |= HCI_LM_ACCEPT;
4274 if (l2cap_pi(sk)->role_switch)
4275 lm1 |= HCI_LM_MASTER;
4276 exact++;
4277 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4278 lm2 |= HCI_LM_ACCEPT;
4279 if (l2cap_pi(sk)->role_switch)
4280 lm2 |= HCI_LM_MASTER;
4281 }
4282 }
4283 read_unlock(&l2cap_sk_list.lock);
4284
4285 return exact ? lm1 : lm2;
4286 }
4287
4288 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4289 {
4290 struct l2cap_conn *conn;
4291
4292 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4293
4294 if (hcon->type != ACL_LINK)
4295 return -EINVAL;
4296
4297 if (!status) {
4298 conn = l2cap_conn_add(hcon, status);
4299 if (conn)
4300 l2cap_conn_ready(conn);
4301 } else
4302 l2cap_conn_del(hcon, bt_err(status));
4303
4304 return 0;
4305 }
4306
4307 static int l2cap_disconn_ind(struct hci_conn *hcon)
4308 {
4309 struct l2cap_conn *conn = hcon->l2cap_data;
4310
4311 BT_DBG("hcon %p", hcon);
4312
4313 if (hcon->type != ACL_LINK || !conn)
4314 return 0x13;
4315
4316 return conn->disc_reason;
4317 }
4318
4319 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4320 {
4321 BT_DBG("hcon %p reason %d", hcon, reason);
4322
4323 if (hcon->type != ACL_LINK)
4324 return -EINVAL;
4325
4326 l2cap_conn_del(hcon, bt_err(reason));
4327
4328 return 0;
4329 }
4330
4331 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4332 {
4333 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4334 return;
4335
4336 if (encrypt == 0x00) {
4337 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4338 l2cap_sock_clear_timer(sk);
4339 l2cap_sock_set_timer(sk, HZ * 5);
4340 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4341 __l2cap_sock_close(sk, ECONNREFUSED);
4342 } else {
4343 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4344 l2cap_sock_clear_timer(sk);
4345 }
4346 }
4347
4348 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4349 {
4350 struct l2cap_chan_list *l;
4351 struct l2cap_conn *conn = hcon->l2cap_data;
4352 struct sock *sk;
4353
4354 if (!conn)
4355 return 0;
4356
4357 l = &conn->chan_list;
4358
4359 BT_DBG("conn %p", conn);
4360
4361 read_lock(&l->lock);
4362
4363 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4364 bh_lock_sock(sk);
4365
4366 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4367 bh_unlock_sock(sk);
4368 continue;
4369 }
4370
4371 if (!status && (sk->sk_state == BT_CONNECTED ||
4372 sk->sk_state == BT_CONFIG)) {
4373 l2cap_check_encryption(sk, encrypt);
4374 bh_unlock_sock(sk);
4375 continue;
4376 }
4377
4378 if (sk->sk_state == BT_CONNECT) {
4379 if (!status) {
4380 struct l2cap_conn_req req;
4381 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4382 req.psm = l2cap_pi(sk)->psm;
4383
4384 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4385 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4386
4387 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4388 L2CAP_CONN_REQ, sizeof(req), &req);
4389 } else {
4390 l2cap_sock_clear_timer(sk);
4391 l2cap_sock_set_timer(sk, HZ / 10);
4392 }
4393 } else if (sk->sk_state == BT_CONNECT2) {
4394 struct l2cap_conn_rsp rsp;
4395 __u16 result;
4396
4397 if (!status) {
4398 sk->sk_state = BT_CONFIG;
4399 result = L2CAP_CR_SUCCESS;
4400 } else {
4401 sk->sk_state = BT_DISCONN;
4402 l2cap_sock_set_timer(sk, HZ / 10);
4403 result = L2CAP_CR_SEC_BLOCK;
4404 }
4405
4406 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4407 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4408 rsp.result = cpu_to_le16(result);
4409 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4410 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4411 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4412 }
4413
4414 bh_unlock_sock(sk);
4415 }
4416
4417 read_unlock(&l->lock);
4418
4419 return 0;
4420 }
4421
4422 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4423 {
4424 struct l2cap_conn *conn = hcon->l2cap_data;
4425
4426 if (!conn)
4427 conn = l2cap_conn_add(hcon, 0);
4428
4429 if (!conn)
4430 goto drop;
4431
4432 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4433
4434 if (!(flags & ACL_CONT)) {
4435 struct l2cap_hdr *hdr;
4436 struct sock *sk;
4437 u16 cid;
4438 int len;
4439
4440 if (conn->rx_len) {
4441 BT_ERR("Unexpected start frame (len %d)", skb->len);
4442 kfree_skb(conn->rx_skb);
4443 conn->rx_skb = NULL;
4444 conn->rx_len = 0;
4445 l2cap_conn_unreliable(conn, ECOMM);
4446 }
4447
4448 /* Start fragment always begin with Basic L2CAP header */
4449 if (skb->len < L2CAP_HDR_SIZE) {
4450 BT_ERR("Frame is too short (len %d)", skb->len);
4451 l2cap_conn_unreliable(conn, ECOMM);
4452 goto drop;
4453 }
4454
4455 hdr = (struct l2cap_hdr *) skb->data;
4456 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4457 cid = __le16_to_cpu(hdr->cid);
4458
4459 if (len == skb->len) {
4460 /* Complete frame received */
4461 l2cap_recv_frame(conn, skb);
4462 return 0;
4463 }
4464
4465 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4466
4467 if (skb->len > len) {
4468 BT_ERR("Frame is too long (len %d, expected len %d)",
4469 skb->len, len);
4470 l2cap_conn_unreliable(conn, ECOMM);
4471 goto drop;
4472 }
4473
4474 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4475
4476 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4477 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4478 len, l2cap_pi(sk)->imtu);
4479 bh_unlock_sock(sk);
4480 l2cap_conn_unreliable(conn, ECOMM);
4481 goto drop;
4482 }
4483
4484 if (sk)
4485 bh_unlock_sock(sk);
4486
4487 /* Allocate skb for the complete frame (with header) */
4488 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4489 if (!conn->rx_skb)
4490 goto drop;
4491
4492 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4493 skb->len);
4494 conn->rx_len = len - skb->len;
4495 } else {
4496 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4497
4498 if (!conn->rx_len) {
4499 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4500 l2cap_conn_unreliable(conn, ECOMM);
4501 goto drop;
4502 }
4503
4504 if (skb->len > conn->rx_len) {
4505 BT_ERR("Fragment is too long (len %d, expected %d)",
4506 skb->len, conn->rx_len);
4507 kfree_skb(conn->rx_skb);
4508 conn->rx_skb = NULL;
4509 conn->rx_len = 0;
4510 l2cap_conn_unreliable(conn, ECOMM);
4511 goto drop;
4512 }
4513
4514 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4515 skb->len);
4516 conn->rx_len -= skb->len;
4517
4518 if (!conn->rx_len) {
4519 /* Complete frame received */
4520 l2cap_recv_frame(conn, conn->rx_skb);
4521 conn->rx_skb = NULL;
4522 }
4523 }
4524
4525 drop:
4526 kfree_skb(skb);
4527 return 0;
4528 }
4529
4530 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4531 {
4532 struct sock *sk;
4533 struct hlist_node *node;
4534
4535 read_lock_bh(&l2cap_sk_list.lock);
4536
4537 sk_for_each(sk, node, &l2cap_sk_list.head) {
4538 struct l2cap_pinfo *pi = l2cap_pi(sk);
4539
4540 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4541 batostr(&bt_sk(sk)->src),
4542 batostr(&bt_sk(sk)->dst),
4543 sk->sk_state, __le16_to_cpu(pi->psm),
4544 pi->scid, pi->dcid,
4545 pi->imtu, pi->omtu, pi->sec_level);
4546 }
4547
4548 read_unlock_bh(&l2cap_sk_list.lock);
4549
4550 return 0;
4551 }
4552
4553 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4554 {
4555 return single_open(file, l2cap_debugfs_show, inode->i_private);
4556 }
4557
4558 static const struct file_operations l2cap_debugfs_fops = {
4559 .open = l2cap_debugfs_open,
4560 .read = seq_read,
4561 .llseek = seq_lseek,
4562 .release = single_release,
4563 };
4564
4565 static struct dentry *l2cap_debugfs;
4566
4567 static struct hci_proto l2cap_hci_proto = {
4568 .name = "L2CAP",
4569 .id = HCI_PROTO_L2CAP,
4570 .connect_ind = l2cap_connect_ind,
4571 .connect_cfm = l2cap_connect_cfm,
4572 .disconn_ind = l2cap_disconn_ind,
4573 .disconn_cfm = l2cap_disconn_cfm,
4574 .security_cfm = l2cap_security_cfm,
4575 .recv_acldata = l2cap_recv_acldata
4576 };
4577
4578 static int __init l2cap_init(void)
4579 {
4580 int err;
4581
4582 err = l2cap_init_sockets();
4583 if (err < 0)
4584 return err;
4585
4586 _busy_wq = create_singlethread_workqueue("l2cap");
4587 if (!_busy_wq) {
4588 err = -ENOMEM;
4589 goto error;
4590 }
4591
4592 err = hci_register_proto(&l2cap_hci_proto);
4593 if (err < 0) {
4594 BT_ERR("L2CAP protocol registration failed");
4595 bt_sock_unregister(BTPROTO_L2CAP);
4596 goto error;
4597 }
4598
4599 if (bt_debugfs) {
4600 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4601 bt_debugfs, NULL, &l2cap_debugfs_fops);
4602 if (!l2cap_debugfs)
4603 BT_ERR("Failed to create L2CAP debug file");
4604 }
4605
4606 BT_INFO("L2CAP ver %s", VERSION);
4607 BT_INFO("L2CAP socket layer initialized");
4608
4609 return 0;
4610
4611 error:
4612 destroy_workqueue(_busy_wq);
4613 l2cap_cleanup_sockets();
4614 return err;
4615 }
4616
4617 static void __exit l2cap_exit(void)
4618 {
4619 debugfs_remove(l2cap_debugfs);
4620
4621 flush_workqueue(_busy_wq);
4622 destroy_workqueue(_busy_wq);
4623
4624 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4625 BT_ERR("L2CAP protocol unregistration failed");
4626
4627 l2cap_cleanup_sockets();
4628 }
4629
4630 void l2cap_load(void)
4631 {
4632 /* Dummy function to trigger automatic L2CAP module loading by
4633 * other modules that use L2CAP sockets but don't use any other
4634 * symbols from it. */
4635 }
4636 EXPORT_SYMBOL(l2cap_load);
4637
4638 module_init(l2cap_init);
4639 module_exit(l2cap_exit);
4640
4641 module_param(disable_ertm, bool, 0644);
4642 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4643
4644 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4645 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4646 MODULE_VERSION(VERSION);
4647 MODULE_LICENSE("GPL");
4648 MODULE_ALIAS("bt-proto-0");