]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/l2cap.c
Bluetooth: Move set of P-bit to l2cap_send_sframe()
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64 static const struct proto_ops l2cap_sock_ops;
65
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
68 };
69
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
73
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
76
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
79 {
80 struct sock *sk = (struct sock *) arg;
81 int reason;
82
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
84
85 bh_lock_sock(sk);
86
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
92 else
93 reason = ETIMEDOUT;
94
95 __l2cap_sock_close(sk, reason);
96
97 bh_unlock_sock(sk);
98
99 l2cap_sock_kill(sk);
100 sock_put(sk);
101 }
102
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
104 {
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
107 }
108
109 static void l2cap_sock_clear_timer(struct sock *sk)
110 {
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
113 }
114
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
117 {
118 struct sock *s;
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
121 break;
122 }
123 return s;
124 }
125
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
127 {
128 struct sock *s;
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
131 break;
132 }
133 return s;
134 }
135
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 {
140 struct sock *s;
141 read_lock(&l->lock);
142 s = __l2cap_get_chan_by_scid(l, cid);
143 if (s)
144 bh_lock_sock(s);
145 read_unlock(&l->lock);
146 return s;
147 }
148
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
150 {
151 struct sock *s;
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
154 break;
155 }
156 return s;
157 }
158
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 {
161 struct sock *s;
162 read_lock(&l->lock);
163 s = __l2cap_get_chan_by_ident(l, ident);
164 if (s)
165 bh_lock_sock(s);
166 read_unlock(&l->lock);
167 return s;
168 }
169
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
171 {
172 u16 cid = L2CAP_CID_DYN_START;
173
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
176 return cid;
177 }
178
179 return 0;
180 }
181
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 {
184 sock_hold(sk);
185
186 if (l->head)
187 l2cap_pi(l->head)->prev_c = sk;
188
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
191 l->head = sk;
192 }
193
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
195 {
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
197
198 write_lock_bh(&l->lock);
199 if (sk == l->head)
200 l->head = next;
201
202 if (next)
203 l2cap_pi(next)->prev_c = prev;
204 if (prev)
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
207
208 __sock_put(sk);
209 }
210
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
212 {
213 struct l2cap_chan_list *l = &conn->chan_list;
214
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
217
218 conn->disc_reason = 0x13;
219
220 l2cap_pi(sk)->conn = conn;
221
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
230 } else {
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
235 }
236
237 __l2cap_chan_link(l, sk);
238
239 if (parent)
240 bt_accept_enqueue(parent, sk);
241 }
242
243 /* Delete channel.
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
246 {
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
249
250 l2cap_sock_clear_timer(sk);
251
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
253
254 if (conn) {
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
259 }
260
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
263
264 if (err)
265 sk->sk_err = err;
266
267 if (parent) {
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
270 } else
271 sk->sk_state_change(sk);
272 }
273
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
276 {
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
278 __u8 auth_type;
279
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
283 else
284 auth_type = HCI_AT_NO_BONDING;
285
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
288 } else {
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
292 break;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
295 break;
296 default:
297 auth_type = HCI_AT_NO_BONDING;
298 break;
299 }
300 }
301
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 auth_type);
304 }
305
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 {
308 u8 id;
309
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
314 */
315
316 spin_lock_bh(&conn->lock);
317
318 if (++conn->tx_ident > 128)
319 conn->tx_ident = 1;
320
321 id = conn->tx_ident;
322
323 spin_unlock_bh(&conn->lock);
324
325 return id;
326 }
327
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
329 {
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
331
332 BT_DBG("code 0x%2.2x", code);
333
334 if (!skb)
335 return -ENOMEM;
336
337 return hci_send_acl(conn->hcon, skb, 0);
338 }
339
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
341 {
342 struct sk_buff *skb;
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
346
347 if (pi->fcs == L2CAP_FCS_CRC16)
348 hlen += 2;
349
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
351
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
354
355 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
356 control |= L2CAP_CTRL_FINAL;
357 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
358 }
359
360 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
361 control |= L2CAP_CTRL_POLL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
363 }
364
365 skb = bt_skb_alloc(count, GFP_ATOMIC);
366 if (!skb)
367 return -ENOMEM;
368
369 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
370 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
371 lh->cid = cpu_to_le16(pi->dcid);
372 put_unaligned_le16(control, skb_put(skb, 2));
373
374 if (pi->fcs == L2CAP_FCS_CRC16) {
375 u16 fcs = crc16(0, (u8 *)lh, count - 2);
376 put_unaligned_le16(fcs, skb_put(skb, 2));
377 }
378
379 return hci_send_acl(pi->conn->hcon, skb, 0);
380 }
381
382 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
383 {
384 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
385 control |= L2CAP_SUPER_RCV_NOT_READY;
386 else
387 control |= L2CAP_SUPER_RCV_READY;
388
389 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
390
391 return l2cap_send_sframe(pi, control);
392 }
393
394 static void l2cap_do_start(struct sock *sk)
395 {
396 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
397
398 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
399 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
400 return;
401
402 if (l2cap_check_security(sk)) {
403 struct l2cap_conn_req req;
404 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
405 req.psm = l2cap_pi(sk)->psm;
406
407 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
408
409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
410 L2CAP_CONN_REQ, sizeof(req), &req);
411 }
412 } else {
413 struct l2cap_info_req req;
414 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
415
416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
417 conn->info_ident = l2cap_get_ident(conn);
418
419 mod_timer(&conn->info_timer, jiffies +
420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
421
422 l2cap_send_cmd(conn, conn->info_ident,
423 L2CAP_INFO_REQ, sizeof(req), &req);
424 }
425 }
426
427 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
428 {
429 struct l2cap_disconn_req req;
430
431 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 l2cap_send_cmd(conn, l2cap_get_ident(conn),
434 L2CAP_DISCONN_REQ, sizeof(req), &req);
435 }
436
437 /* ---- L2CAP connections ---- */
438 static void l2cap_conn_start(struct l2cap_conn *conn)
439 {
440 struct l2cap_chan_list *l = &conn->chan_list;
441 struct sock *sk;
442
443 BT_DBG("conn %p", conn);
444
445 read_lock(&l->lock);
446
447 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
448 bh_lock_sock(sk);
449
450 if (sk->sk_type != SOCK_SEQPACKET) {
451 bh_unlock_sock(sk);
452 continue;
453 }
454
455 if (sk->sk_state == BT_CONNECT) {
456 if (l2cap_check_security(sk)) {
457 struct l2cap_conn_req req;
458 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
459 req.psm = l2cap_pi(sk)->psm;
460
461 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
462
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_REQ, sizeof(req), &req);
465 }
466 } else if (sk->sk_state == BT_CONNECT2) {
467 struct l2cap_conn_rsp rsp;
468 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
469 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
470
471 if (l2cap_check_security(sk)) {
472 if (bt_sk(sk)->defer_setup) {
473 struct sock *parent = bt_sk(sk)->parent;
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
476 parent->sk_data_ready(parent, 0);
477
478 } else {
479 sk->sk_state = BT_CONFIG;
480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
482 }
483 } else {
484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
486 }
487
488 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
489 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
490 }
491
492 bh_unlock_sock(sk);
493 }
494
495 read_unlock(&l->lock);
496 }
497
498 static void l2cap_conn_ready(struct l2cap_conn *conn)
499 {
500 struct l2cap_chan_list *l = &conn->chan_list;
501 struct sock *sk;
502
503 BT_DBG("conn %p", conn);
504
505 read_lock(&l->lock);
506
507 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
508 bh_lock_sock(sk);
509
510 if (sk->sk_type != SOCK_SEQPACKET) {
511 l2cap_sock_clear_timer(sk);
512 sk->sk_state = BT_CONNECTED;
513 sk->sk_state_change(sk);
514 } else if (sk->sk_state == BT_CONNECT)
515 l2cap_do_start(sk);
516
517 bh_unlock_sock(sk);
518 }
519
520 read_unlock(&l->lock);
521 }
522
523 /* Notify sockets that we cannot guaranty reliability anymore */
524 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
525 {
526 struct l2cap_chan_list *l = &conn->chan_list;
527 struct sock *sk;
528
529 BT_DBG("conn %p", conn);
530
531 read_lock(&l->lock);
532
533 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
534 if (l2cap_pi(sk)->force_reliable)
535 sk->sk_err = err;
536 }
537
538 read_unlock(&l->lock);
539 }
540
541 static void l2cap_info_timeout(unsigned long arg)
542 {
543 struct l2cap_conn *conn = (void *) arg;
544
545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
546 conn->info_ident = 0;
547
548 l2cap_conn_start(conn);
549 }
550
551 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
552 {
553 struct l2cap_conn *conn = hcon->l2cap_data;
554
555 if (conn || status)
556 return conn;
557
558 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
559 if (!conn)
560 return NULL;
561
562 hcon->l2cap_data = conn;
563 conn->hcon = hcon;
564
565 BT_DBG("hcon %p conn %p", hcon, conn);
566
567 conn->mtu = hcon->hdev->acl_mtu;
568 conn->src = &hcon->hdev->bdaddr;
569 conn->dst = &hcon->dst;
570
571 conn->feat_mask = 0;
572
573 spin_lock_init(&conn->lock);
574 rwlock_init(&conn->chan_list.lock);
575
576 setup_timer(&conn->info_timer, l2cap_info_timeout,
577 (unsigned long) conn);
578
579 conn->disc_reason = 0x13;
580
581 return conn;
582 }
583
584 static void l2cap_conn_del(struct hci_conn *hcon, int err)
585 {
586 struct l2cap_conn *conn = hcon->l2cap_data;
587 struct sock *sk;
588
589 if (!conn)
590 return;
591
592 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
593
594 kfree_skb(conn->rx_skb);
595
596 /* Kill channels */
597 while ((sk = conn->chan_list.head)) {
598 bh_lock_sock(sk);
599 l2cap_chan_del(sk, err);
600 bh_unlock_sock(sk);
601 l2cap_sock_kill(sk);
602 }
603
604 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
605 del_timer_sync(&conn->info_timer);
606
607 hcon->l2cap_data = NULL;
608 kfree(conn);
609 }
610
611 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
612 {
613 struct l2cap_chan_list *l = &conn->chan_list;
614 write_lock_bh(&l->lock);
615 __l2cap_chan_add(conn, sk, parent);
616 write_unlock_bh(&l->lock);
617 }
618
619 /* ---- Socket interface ---- */
620 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
621 {
622 struct sock *sk;
623 struct hlist_node *node;
624 sk_for_each(sk, node, &l2cap_sk_list.head)
625 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
626 goto found;
627 sk = NULL;
628 found:
629 return sk;
630 }
631
632 /* Find socket with psm and source bdaddr.
633 * Returns closest match.
634 */
635 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
636 {
637 struct sock *sk = NULL, *sk1 = NULL;
638 struct hlist_node *node;
639
640 sk_for_each(sk, node, &l2cap_sk_list.head) {
641 if (state && sk->sk_state != state)
642 continue;
643
644 if (l2cap_pi(sk)->psm == psm) {
645 /* Exact match. */
646 if (!bacmp(&bt_sk(sk)->src, src))
647 break;
648
649 /* Closest match */
650 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
651 sk1 = sk;
652 }
653 }
654 return node ? sk : sk1;
655 }
656
657 /* Find socket with given address (psm, src).
658 * Returns locked socket */
659 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
660 {
661 struct sock *s;
662 read_lock(&l2cap_sk_list.lock);
663 s = __l2cap_get_sock_by_psm(state, psm, src);
664 if (s)
665 bh_lock_sock(s);
666 read_unlock(&l2cap_sk_list.lock);
667 return s;
668 }
669
670 static void l2cap_sock_destruct(struct sock *sk)
671 {
672 BT_DBG("sk %p", sk);
673
674 skb_queue_purge(&sk->sk_receive_queue);
675 skb_queue_purge(&sk->sk_write_queue);
676 }
677
678 static void l2cap_sock_cleanup_listen(struct sock *parent)
679 {
680 struct sock *sk;
681
682 BT_DBG("parent %p", parent);
683
684 /* Close not yet accepted channels */
685 while ((sk = bt_accept_dequeue(parent, NULL)))
686 l2cap_sock_close(sk);
687
688 parent->sk_state = BT_CLOSED;
689 sock_set_flag(parent, SOCK_ZAPPED);
690 }
691
692 /* Kill socket (only if zapped and orphan)
693 * Must be called on unlocked socket.
694 */
695 static void l2cap_sock_kill(struct sock *sk)
696 {
697 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
698 return;
699
700 BT_DBG("sk %p state %d", sk, sk->sk_state);
701
702 /* Kill poor orphan */
703 bt_sock_unlink(&l2cap_sk_list, sk);
704 sock_set_flag(sk, SOCK_DEAD);
705 sock_put(sk);
706 }
707
708 static void __l2cap_sock_close(struct sock *sk, int reason)
709 {
710 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
711
712 switch (sk->sk_state) {
713 case BT_LISTEN:
714 l2cap_sock_cleanup_listen(sk);
715 break;
716
717 case BT_CONNECTED:
718 case BT_CONFIG:
719 if (sk->sk_type == SOCK_SEQPACKET) {
720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
721
722 sk->sk_state = BT_DISCONN;
723 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
724 l2cap_send_disconn_req(conn, sk);
725 } else
726 l2cap_chan_del(sk, reason);
727 break;
728
729 case BT_CONNECT2:
730 if (sk->sk_type == SOCK_SEQPACKET) {
731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
732 struct l2cap_conn_rsp rsp;
733 __u16 result;
734
735 if (bt_sk(sk)->defer_setup)
736 result = L2CAP_CR_SEC_BLOCK;
737 else
738 result = L2CAP_CR_BAD_PSM;
739
740 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
741 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
742 rsp.result = cpu_to_le16(result);
743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
744 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
745 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
746 } else
747 l2cap_chan_del(sk, reason);
748 break;
749
750 case BT_CONNECT:
751 case BT_DISCONN:
752 l2cap_chan_del(sk, reason);
753 break;
754
755 default:
756 sock_set_flag(sk, SOCK_ZAPPED);
757 break;
758 }
759 }
760
761 /* Must be called on unlocked socket. */
762 static void l2cap_sock_close(struct sock *sk)
763 {
764 l2cap_sock_clear_timer(sk);
765 lock_sock(sk);
766 __l2cap_sock_close(sk, ECONNRESET);
767 release_sock(sk);
768 l2cap_sock_kill(sk);
769 }
770
771 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
772 {
773 struct l2cap_pinfo *pi = l2cap_pi(sk);
774
775 BT_DBG("sk %p", sk);
776
777 if (parent) {
778 sk->sk_type = parent->sk_type;
779 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
780
781 pi->imtu = l2cap_pi(parent)->imtu;
782 pi->omtu = l2cap_pi(parent)->omtu;
783 pi->mode = l2cap_pi(parent)->mode;
784 pi->fcs = l2cap_pi(parent)->fcs;
785 pi->sec_level = l2cap_pi(parent)->sec_level;
786 pi->role_switch = l2cap_pi(parent)->role_switch;
787 pi->force_reliable = l2cap_pi(parent)->force_reliable;
788 } else {
789 pi->imtu = L2CAP_DEFAULT_MTU;
790 pi->omtu = 0;
791 pi->mode = L2CAP_MODE_BASIC;
792 pi->fcs = L2CAP_FCS_CRC16;
793 pi->sec_level = BT_SECURITY_LOW;
794 pi->role_switch = 0;
795 pi->force_reliable = 0;
796 }
797
798 /* Default config options */
799 pi->conf_len = 0;
800 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
801 skb_queue_head_init(TX_QUEUE(sk));
802 skb_queue_head_init(SREJ_QUEUE(sk));
803 INIT_LIST_HEAD(SREJ_LIST(sk));
804 }
805
806 static struct proto l2cap_proto = {
807 .name = "L2CAP",
808 .owner = THIS_MODULE,
809 .obj_size = sizeof(struct l2cap_pinfo)
810 };
811
812 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
813 {
814 struct sock *sk;
815
816 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
817 if (!sk)
818 return NULL;
819
820 sock_init_data(sock, sk);
821 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
822
823 sk->sk_destruct = l2cap_sock_destruct;
824 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
825
826 sock_reset_flag(sk, SOCK_ZAPPED);
827
828 sk->sk_protocol = proto;
829 sk->sk_state = BT_OPEN;
830
831 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
832
833 bt_sock_link(&l2cap_sk_list, sk);
834 return sk;
835 }
836
837 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
838 int kern)
839 {
840 struct sock *sk;
841
842 BT_DBG("sock %p", sock);
843
844 sock->state = SS_UNCONNECTED;
845
846 if (sock->type != SOCK_SEQPACKET &&
847 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
848 return -ESOCKTNOSUPPORT;
849
850 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
851 return -EPERM;
852
853 sock->ops = &l2cap_sock_ops;
854
855 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
856 if (!sk)
857 return -ENOMEM;
858
859 l2cap_sock_init(sk, NULL);
860 return 0;
861 }
862
863 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
864 {
865 struct sock *sk = sock->sk;
866 struct sockaddr_l2 la;
867 int len, err = 0;
868
869 BT_DBG("sk %p", sk);
870
871 if (!addr || addr->sa_family != AF_BLUETOOTH)
872 return -EINVAL;
873
874 memset(&la, 0, sizeof(la));
875 len = min_t(unsigned int, sizeof(la), alen);
876 memcpy(&la, addr, len);
877
878 if (la.l2_cid)
879 return -EINVAL;
880
881 lock_sock(sk);
882
883 if (sk->sk_state != BT_OPEN) {
884 err = -EBADFD;
885 goto done;
886 }
887
888 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
889 !capable(CAP_NET_BIND_SERVICE)) {
890 err = -EACCES;
891 goto done;
892 }
893
894 write_lock_bh(&l2cap_sk_list.lock);
895
896 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
897 err = -EADDRINUSE;
898 } else {
899 /* Save source address */
900 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
901 l2cap_pi(sk)->psm = la.l2_psm;
902 l2cap_pi(sk)->sport = la.l2_psm;
903 sk->sk_state = BT_BOUND;
904
905 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
906 __le16_to_cpu(la.l2_psm) == 0x0003)
907 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
908 }
909
910 write_unlock_bh(&l2cap_sk_list.lock);
911
912 done:
913 release_sock(sk);
914 return err;
915 }
916
917 static int l2cap_do_connect(struct sock *sk)
918 {
919 bdaddr_t *src = &bt_sk(sk)->src;
920 bdaddr_t *dst = &bt_sk(sk)->dst;
921 struct l2cap_conn *conn;
922 struct hci_conn *hcon;
923 struct hci_dev *hdev;
924 __u8 auth_type;
925 int err;
926
927 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
928 l2cap_pi(sk)->psm);
929
930 hdev = hci_get_route(dst, src);
931 if (!hdev)
932 return -EHOSTUNREACH;
933
934 hci_dev_lock_bh(hdev);
935
936 err = -ENOMEM;
937
938 if (sk->sk_type == SOCK_RAW) {
939 switch (l2cap_pi(sk)->sec_level) {
940 case BT_SECURITY_HIGH:
941 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
942 break;
943 case BT_SECURITY_MEDIUM:
944 auth_type = HCI_AT_DEDICATED_BONDING;
945 break;
946 default:
947 auth_type = HCI_AT_NO_BONDING;
948 break;
949 }
950 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
951 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
952 auth_type = HCI_AT_NO_BONDING_MITM;
953 else
954 auth_type = HCI_AT_NO_BONDING;
955
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
957 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
958 } else {
959 switch (l2cap_pi(sk)->sec_level) {
960 case BT_SECURITY_HIGH:
961 auth_type = HCI_AT_GENERAL_BONDING_MITM;
962 break;
963 case BT_SECURITY_MEDIUM:
964 auth_type = HCI_AT_GENERAL_BONDING;
965 break;
966 default:
967 auth_type = HCI_AT_NO_BONDING;
968 break;
969 }
970 }
971
972 hcon = hci_connect(hdev, ACL_LINK, dst,
973 l2cap_pi(sk)->sec_level, auth_type);
974 if (!hcon)
975 goto done;
976
977 conn = l2cap_conn_add(hcon, 0);
978 if (!conn) {
979 hci_conn_put(hcon);
980 goto done;
981 }
982
983 err = 0;
984
985 /* Update source addr of the socket */
986 bacpy(src, conn->src);
987
988 l2cap_chan_add(conn, sk, NULL);
989
990 sk->sk_state = BT_CONNECT;
991 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
992
993 if (hcon->state == BT_CONNECTED) {
994 if (sk->sk_type != SOCK_SEQPACKET) {
995 l2cap_sock_clear_timer(sk);
996 sk->sk_state = BT_CONNECTED;
997 } else
998 l2cap_do_start(sk);
999 }
1000
1001 done:
1002 hci_dev_unlock_bh(hdev);
1003 hci_dev_put(hdev);
1004 return err;
1005 }
1006
1007 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1008 {
1009 struct sock *sk = sock->sk;
1010 struct sockaddr_l2 la;
1011 int len, err = 0;
1012
1013 BT_DBG("sk %p", sk);
1014
1015 if (!addr || alen < sizeof(addr->sa_family) ||
1016 addr->sa_family != AF_BLUETOOTH)
1017 return -EINVAL;
1018
1019 memset(&la, 0, sizeof(la));
1020 len = min_t(unsigned int, sizeof(la), alen);
1021 memcpy(&la, addr, len);
1022
1023 if (la.l2_cid)
1024 return -EINVAL;
1025
1026 lock_sock(sk);
1027
1028 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1029 err = -EINVAL;
1030 goto done;
1031 }
1032
1033 switch (l2cap_pi(sk)->mode) {
1034 case L2CAP_MODE_BASIC:
1035 break;
1036 case L2CAP_MODE_ERTM:
1037 case L2CAP_MODE_STREAMING:
1038 if (enable_ertm)
1039 break;
1040 /* fall through */
1041 default:
1042 err = -ENOTSUPP;
1043 goto done;
1044 }
1045
1046 switch (sk->sk_state) {
1047 case BT_CONNECT:
1048 case BT_CONNECT2:
1049 case BT_CONFIG:
1050 /* Already connecting */
1051 goto wait;
1052
1053 case BT_CONNECTED:
1054 /* Already connected */
1055 goto done;
1056
1057 case BT_OPEN:
1058 case BT_BOUND:
1059 /* Can connect */
1060 break;
1061
1062 default:
1063 err = -EBADFD;
1064 goto done;
1065 }
1066
1067 /* Set destination address and psm */
1068 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1069 l2cap_pi(sk)->psm = la.l2_psm;
1070
1071 err = l2cap_do_connect(sk);
1072 if (err)
1073 goto done;
1074
1075 wait:
1076 err = bt_sock_wait_state(sk, BT_CONNECTED,
1077 sock_sndtimeo(sk, flags & O_NONBLOCK));
1078 done:
1079 release_sock(sk);
1080 return err;
1081 }
1082
1083 static int l2cap_sock_listen(struct socket *sock, int backlog)
1084 {
1085 struct sock *sk = sock->sk;
1086 int err = 0;
1087
1088 BT_DBG("sk %p backlog %d", sk, backlog);
1089
1090 lock_sock(sk);
1091
1092 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1093 err = -EBADFD;
1094 goto done;
1095 }
1096
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1099 break;
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1102 if (enable_ertm)
1103 break;
1104 /* fall through */
1105 default:
1106 err = -ENOTSUPP;
1107 goto done;
1108 }
1109
1110 if (!l2cap_pi(sk)->psm) {
1111 bdaddr_t *src = &bt_sk(sk)->src;
1112 u16 psm;
1113
1114 err = -EINVAL;
1115
1116 write_lock_bh(&l2cap_sk_list.lock);
1117
1118 for (psm = 0x1001; psm < 0x1100; psm += 2)
1119 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1120 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1121 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1122 err = 0;
1123 break;
1124 }
1125
1126 write_unlock_bh(&l2cap_sk_list.lock);
1127
1128 if (err < 0)
1129 goto done;
1130 }
1131
1132 sk->sk_max_ack_backlog = backlog;
1133 sk->sk_ack_backlog = 0;
1134 sk->sk_state = BT_LISTEN;
1135
1136 done:
1137 release_sock(sk);
1138 return err;
1139 }
1140
1141 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1142 {
1143 DECLARE_WAITQUEUE(wait, current);
1144 struct sock *sk = sock->sk, *nsk;
1145 long timeo;
1146 int err = 0;
1147
1148 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1149
1150 if (sk->sk_state != BT_LISTEN) {
1151 err = -EBADFD;
1152 goto done;
1153 }
1154
1155 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1156
1157 BT_DBG("sk %p timeo %ld", sk, timeo);
1158
1159 /* Wait for an incoming connection. (wake-one). */
1160 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1161 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1162 set_current_state(TASK_INTERRUPTIBLE);
1163 if (!timeo) {
1164 err = -EAGAIN;
1165 break;
1166 }
1167
1168 release_sock(sk);
1169 timeo = schedule_timeout(timeo);
1170 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1171
1172 if (sk->sk_state != BT_LISTEN) {
1173 err = -EBADFD;
1174 break;
1175 }
1176
1177 if (signal_pending(current)) {
1178 err = sock_intr_errno(timeo);
1179 break;
1180 }
1181 }
1182 set_current_state(TASK_RUNNING);
1183 remove_wait_queue(sk_sleep(sk), &wait);
1184
1185 if (err)
1186 goto done;
1187
1188 newsock->state = SS_CONNECTED;
1189
1190 BT_DBG("new socket %p", nsk);
1191
1192 done:
1193 release_sock(sk);
1194 return err;
1195 }
1196
1197 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1198 {
1199 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1200 struct sock *sk = sock->sk;
1201
1202 BT_DBG("sock %p, sk %p", sock, sk);
1203
1204 addr->sa_family = AF_BLUETOOTH;
1205 *len = sizeof(struct sockaddr_l2);
1206
1207 if (peer) {
1208 la->l2_psm = l2cap_pi(sk)->psm;
1209 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1210 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1211 } else {
1212 la->l2_psm = l2cap_pi(sk)->sport;
1213 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1214 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1215 }
1216
1217 return 0;
1218 }
1219
1220 static void l2cap_monitor_timeout(unsigned long arg)
1221 {
1222 struct sock *sk = (void *) arg;
1223 u16 control;
1224
1225 bh_lock_sock(sk);
1226 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1227 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1228 bh_unlock_sock(sk);
1229 return;
1230 }
1231
1232 l2cap_pi(sk)->retry_count++;
1233 __mod_monitor_timer();
1234
1235 control = L2CAP_CTRL_POLL;
1236 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1237 bh_unlock_sock(sk);
1238 }
1239
1240 static void l2cap_retrans_timeout(unsigned long arg)
1241 {
1242 struct sock *sk = (void *) arg;
1243 u16 control;
1244
1245 bh_lock_sock(sk);
1246 l2cap_pi(sk)->retry_count = 1;
1247 __mod_monitor_timer();
1248
1249 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1250
1251 control = L2CAP_CTRL_POLL;
1252 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1253 bh_unlock_sock(sk);
1254 }
1255
1256 static void l2cap_drop_acked_frames(struct sock *sk)
1257 {
1258 struct sk_buff *skb;
1259
1260 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1261 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1262 break;
1263
1264 skb = skb_dequeue(TX_QUEUE(sk));
1265 kfree_skb(skb);
1266
1267 l2cap_pi(sk)->unacked_frames--;
1268 }
1269
1270 if (!l2cap_pi(sk)->unacked_frames)
1271 del_timer(&l2cap_pi(sk)->retrans_timer);
1272
1273 return;
1274 }
1275
1276 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1277 {
1278 struct l2cap_pinfo *pi = l2cap_pi(sk);
1279 int err;
1280
1281 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1282
1283 err = hci_send_acl(pi->conn->hcon, skb, 0);
1284 if (err < 0)
1285 kfree_skb(skb);
1286
1287 return err;
1288 }
1289
1290 static int l2cap_streaming_send(struct sock *sk)
1291 {
1292 struct sk_buff *skb, *tx_skb;
1293 struct l2cap_pinfo *pi = l2cap_pi(sk);
1294 u16 control, fcs;
1295 int err;
1296
1297 while ((skb = sk->sk_send_head)) {
1298 tx_skb = skb_clone(skb, GFP_ATOMIC);
1299
1300 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1301 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1302 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1303
1304 if (pi->fcs == L2CAP_FCS_CRC16) {
1305 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1306 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1307 }
1308
1309 err = l2cap_do_send(sk, tx_skb);
1310 if (err < 0) {
1311 l2cap_send_disconn_req(pi->conn, sk);
1312 return err;
1313 }
1314
1315 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1316
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 sk->sk_send_head = NULL;
1319 else
1320 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1321
1322 skb = skb_dequeue(TX_QUEUE(sk));
1323 kfree_skb(skb);
1324 }
1325 return 0;
1326 }
1327
1328 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1329 {
1330 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 struct sk_buff *skb, *tx_skb;
1332 u16 control, fcs;
1333 int err;
1334
1335 skb = skb_peek(TX_QUEUE(sk));
1336 do {
1337 if (bt_cb(skb)->tx_seq != tx_seq) {
1338 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1339 break;
1340 skb = skb_queue_next(TX_QUEUE(sk), skb);
1341 continue;
1342 }
1343
1344 if (pi->remote_max_tx &&
1345 bt_cb(skb)->retries == pi->remote_max_tx) {
1346 l2cap_send_disconn_req(pi->conn, sk);
1347 break;
1348 }
1349
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1352 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1353 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1354 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1355 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1356
1357 if (pi->fcs == L2CAP_FCS_CRC16) {
1358 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1359 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1360 }
1361
1362 err = l2cap_do_send(sk, tx_skb);
1363 if (err < 0) {
1364 l2cap_send_disconn_req(pi->conn, sk);
1365 return err;
1366 }
1367 break;
1368 } while(1);
1369 return 0;
1370 }
1371
1372 static int l2cap_ertm_send(struct sock *sk)
1373 {
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1376 u16 control, fcs;
1377 int err, nsent = 0;
1378
1379 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1380 return 0;
1381
1382 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1383 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1384
1385 if (pi->remote_max_tx &&
1386 bt_cb(skb)->retries == pi->remote_max_tx) {
1387 l2cap_send_disconn_req(pi->conn, sk);
1388 break;
1389 }
1390
1391 tx_skb = skb_clone(skb, GFP_ATOMIC);
1392
1393 bt_cb(skb)->retries++;
1394
1395 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1396 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1397 control |= L2CAP_CTRL_FINAL;
1398 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1399 }
1400 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1401 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1402 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403
1404
1405 if (pi->fcs == L2CAP_FCS_CRC16) {
1406 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1407 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1408 }
1409
1410 err = l2cap_do_send(sk, tx_skb);
1411 if (err < 0) {
1412 l2cap_send_disconn_req(pi->conn, sk);
1413 return err;
1414 }
1415 __mod_retrans_timer();
1416
1417 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1418 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1419
1420 pi->unacked_frames++;
1421 pi->frames_sent++;
1422
1423 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1424 sk->sk_send_head = NULL;
1425 else
1426 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1427
1428 nsent++;
1429 }
1430
1431 return nsent;
1432 }
1433
1434 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1435 {
1436 struct sock *sk = (struct sock *)pi;
1437 u16 control = 0;
1438
1439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1440
1441 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1442 control |= L2CAP_SUPER_RCV_NOT_READY;
1443 return l2cap_send_sframe(pi, control);
1444 } else if (l2cap_ertm_send(sk) == 0) {
1445 control |= L2CAP_SUPER_RCV_READY;
1446 return l2cap_send_sframe(pi, control);
1447 }
1448 return 0;
1449 }
1450
1451 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1452 {
1453 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1454 struct sk_buff **frag;
1455 int err, sent = 0;
1456
1457 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1458 return -EFAULT;
1459 }
1460
1461 sent += count;
1462 len -= count;
1463
1464 /* Continuation fragments (no L2CAP header) */
1465 frag = &skb_shinfo(skb)->frag_list;
1466 while (len) {
1467 count = min_t(unsigned int, conn->mtu, len);
1468
1469 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1470 if (!*frag)
1471 return -EFAULT;
1472 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1473 return -EFAULT;
1474
1475 sent += count;
1476 len -= count;
1477
1478 frag = &(*frag)->next;
1479 }
1480
1481 return sent;
1482 }
1483
1484 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1485 {
1486 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1487 struct sk_buff *skb;
1488 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1489 struct l2cap_hdr *lh;
1490
1491 BT_DBG("sk %p len %d", sk, (int)len);
1492
1493 count = min_t(unsigned int, (conn->mtu - hlen), len);
1494 skb = bt_skb_send_alloc(sk, count + hlen,
1495 msg->msg_flags & MSG_DONTWAIT, &err);
1496 if (!skb)
1497 return ERR_PTR(-ENOMEM);
1498
1499 /* Create L2CAP header */
1500 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1501 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1503 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1504
1505 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1506 if (unlikely(err < 0)) {
1507 kfree_skb(skb);
1508 return ERR_PTR(err);
1509 }
1510 return skb;
1511 }
1512
1513 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1514 {
1515 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1516 struct sk_buff *skb;
1517 int err, count, hlen = L2CAP_HDR_SIZE;
1518 struct l2cap_hdr *lh;
1519
1520 BT_DBG("sk %p len %d", sk, (int)len);
1521
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1525 if (!skb)
1526 return ERR_PTR(-ENOMEM);
1527
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1532
1533 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1534 if (unlikely(err < 0)) {
1535 kfree_skb(skb);
1536 return ERR_PTR(err);
1537 }
1538 return skb;
1539 }
1540
1541 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1542 {
1543 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1544 struct sk_buff *skb;
1545 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1546 struct l2cap_hdr *lh;
1547
1548 BT_DBG("sk %p len %d", sk, (int)len);
1549
1550 if (sdulen)
1551 hlen += 2;
1552
1553 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1554 hlen += 2;
1555
1556 count = min_t(unsigned int, (conn->mtu - hlen), len);
1557 skb = bt_skb_send_alloc(sk, count + hlen,
1558 msg->msg_flags & MSG_DONTWAIT, &err);
1559 if (!skb)
1560 return ERR_PTR(-ENOMEM);
1561
1562 /* Create L2CAP header */
1563 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1564 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1565 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1566 put_unaligned_le16(control, skb_put(skb, 2));
1567 if (sdulen)
1568 put_unaligned_le16(sdulen, skb_put(skb, 2));
1569
1570 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1571 if (unlikely(err < 0)) {
1572 kfree_skb(skb);
1573 return ERR_PTR(err);
1574 }
1575
1576 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1577 put_unaligned_le16(0, skb_put(skb, 2));
1578
1579 bt_cb(skb)->retries = 0;
1580 return skb;
1581 }
1582
1583 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1584 {
1585 struct l2cap_pinfo *pi = l2cap_pi(sk);
1586 struct sk_buff *skb;
1587 struct sk_buff_head sar_queue;
1588 u16 control;
1589 size_t size = 0;
1590
1591 __skb_queue_head_init(&sar_queue);
1592 control = L2CAP_SDU_START;
1593 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1594 if (IS_ERR(skb))
1595 return PTR_ERR(skb);
1596
1597 __skb_queue_tail(&sar_queue, skb);
1598 len -= pi->max_pdu_size;
1599 size +=pi->max_pdu_size;
1600 control = 0;
1601
1602 while (len > 0) {
1603 size_t buflen;
1604
1605 if (len > pi->max_pdu_size) {
1606 control |= L2CAP_SDU_CONTINUE;
1607 buflen = pi->max_pdu_size;
1608 } else {
1609 control |= L2CAP_SDU_END;
1610 buflen = len;
1611 }
1612
1613 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1614 if (IS_ERR(skb)) {
1615 skb_queue_purge(&sar_queue);
1616 return PTR_ERR(skb);
1617 }
1618
1619 __skb_queue_tail(&sar_queue, skb);
1620 len -= buflen;
1621 size += buflen;
1622 control = 0;
1623 }
1624 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1625 if (sk->sk_send_head == NULL)
1626 sk->sk_send_head = sar_queue.next;
1627
1628 return size;
1629 }
1630
1631 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1632 {
1633 struct sock *sk = sock->sk;
1634 struct l2cap_pinfo *pi = l2cap_pi(sk);
1635 struct sk_buff *skb;
1636 u16 control;
1637 int err;
1638
1639 BT_DBG("sock %p, sk %p", sock, sk);
1640
1641 err = sock_error(sk);
1642 if (err)
1643 return err;
1644
1645 if (msg->msg_flags & MSG_OOB)
1646 return -EOPNOTSUPP;
1647
1648 lock_sock(sk);
1649
1650 if (sk->sk_state != BT_CONNECTED) {
1651 err = -ENOTCONN;
1652 goto done;
1653 }
1654
1655 /* Connectionless channel */
1656 if (sk->sk_type == SOCK_DGRAM) {
1657 skb = l2cap_create_connless_pdu(sk, msg, len);
1658 if (IS_ERR(skb))
1659 err = PTR_ERR(skb);
1660 else
1661 err = l2cap_do_send(sk, skb);
1662 goto done;
1663 }
1664
1665 switch (pi->mode) {
1666 case L2CAP_MODE_BASIC:
1667 /* Check outgoing MTU */
1668 if (len > pi->omtu) {
1669 err = -EINVAL;
1670 goto done;
1671 }
1672
1673 /* Create a basic PDU */
1674 skb = l2cap_create_basic_pdu(sk, msg, len);
1675 if (IS_ERR(skb)) {
1676 err = PTR_ERR(skb);
1677 goto done;
1678 }
1679
1680 err = l2cap_do_send(sk, skb);
1681 if (!err)
1682 err = len;
1683 break;
1684
1685 case L2CAP_MODE_ERTM:
1686 case L2CAP_MODE_STREAMING:
1687 /* Entire SDU fits into one PDU */
1688 if (len <= pi->max_pdu_size) {
1689 control = L2CAP_SDU_UNSEGMENTED;
1690 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1691 if (IS_ERR(skb)) {
1692 err = PTR_ERR(skb);
1693 goto done;
1694 }
1695 __skb_queue_tail(TX_QUEUE(sk), skb);
1696 if (sk->sk_send_head == NULL)
1697 sk->sk_send_head = skb;
1698 } else {
1699 /* Segment SDU into multiples PDUs */
1700 err = l2cap_sar_segment_sdu(sk, msg, len);
1701 if (err < 0)
1702 goto done;
1703 }
1704
1705 if (pi->mode == L2CAP_MODE_STREAMING)
1706 err = l2cap_streaming_send(sk);
1707 else
1708 err = l2cap_ertm_send(sk);
1709
1710 if (err >= 0)
1711 err = len;
1712 break;
1713
1714 default:
1715 BT_DBG("bad state %1.1x", pi->mode);
1716 err = -EINVAL;
1717 }
1718
1719 done:
1720 release_sock(sk);
1721 return err;
1722 }
1723
1724 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1725 {
1726 struct sock *sk = sock->sk;
1727
1728 lock_sock(sk);
1729
1730 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1731 struct l2cap_conn_rsp rsp;
1732
1733 sk->sk_state = BT_CONFIG;
1734
1735 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1736 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1737 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1738 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1739 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1740 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1741
1742 release_sock(sk);
1743 return 0;
1744 }
1745
1746 release_sock(sk);
1747
1748 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1749 }
1750
1751 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1752 {
1753 struct sock *sk = sock->sk;
1754 struct l2cap_options opts;
1755 int len, err = 0;
1756 u32 opt;
1757
1758 BT_DBG("sk %p", sk);
1759
1760 lock_sock(sk);
1761
1762 switch (optname) {
1763 case L2CAP_OPTIONS:
1764 opts.imtu = l2cap_pi(sk)->imtu;
1765 opts.omtu = l2cap_pi(sk)->omtu;
1766 opts.flush_to = l2cap_pi(sk)->flush_to;
1767 opts.mode = l2cap_pi(sk)->mode;
1768 opts.fcs = l2cap_pi(sk)->fcs;
1769
1770 len = min_t(unsigned int, sizeof(opts), optlen);
1771 if (copy_from_user((char *) &opts, optval, len)) {
1772 err = -EFAULT;
1773 break;
1774 }
1775
1776 l2cap_pi(sk)->imtu = opts.imtu;
1777 l2cap_pi(sk)->omtu = opts.omtu;
1778 l2cap_pi(sk)->mode = opts.mode;
1779 l2cap_pi(sk)->fcs = opts.fcs;
1780 break;
1781
1782 case L2CAP_LM:
1783 if (get_user(opt, (u32 __user *) optval)) {
1784 err = -EFAULT;
1785 break;
1786 }
1787
1788 if (opt & L2CAP_LM_AUTH)
1789 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1790 if (opt & L2CAP_LM_ENCRYPT)
1791 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1792 if (opt & L2CAP_LM_SECURE)
1793 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1794
1795 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1796 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1797 break;
1798
1799 default:
1800 err = -ENOPROTOOPT;
1801 break;
1802 }
1803
1804 release_sock(sk);
1805 return err;
1806 }
1807
1808 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1809 {
1810 struct sock *sk = sock->sk;
1811 struct bt_security sec;
1812 int len, err = 0;
1813 u32 opt;
1814
1815 BT_DBG("sk %p", sk);
1816
1817 if (level == SOL_L2CAP)
1818 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1819
1820 if (level != SOL_BLUETOOTH)
1821 return -ENOPROTOOPT;
1822
1823 lock_sock(sk);
1824
1825 switch (optname) {
1826 case BT_SECURITY:
1827 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1828 err = -EINVAL;
1829 break;
1830 }
1831
1832 sec.level = BT_SECURITY_LOW;
1833
1834 len = min_t(unsigned int, sizeof(sec), optlen);
1835 if (copy_from_user((char *) &sec, optval, len)) {
1836 err = -EFAULT;
1837 break;
1838 }
1839
1840 if (sec.level < BT_SECURITY_LOW ||
1841 sec.level > BT_SECURITY_HIGH) {
1842 err = -EINVAL;
1843 break;
1844 }
1845
1846 l2cap_pi(sk)->sec_level = sec.level;
1847 break;
1848
1849 case BT_DEFER_SETUP:
1850 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1851 err = -EINVAL;
1852 break;
1853 }
1854
1855 if (get_user(opt, (u32 __user *) optval)) {
1856 err = -EFAULT;
1857 break;
1858 }
1859
1860 bt_sk(sk)->defer_setup = opt;
1861 break;
1862
1863 default:
1864 err = -ENOPROTOOPT;
1865 break;
1866 }
1867
1868 release_sock(sk);
1869 return err;
1870 }
1871
1872 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1873 {
1874 struct sock *sk = sock->sk;
1875 struct l2cap_options opts;
1876 struct l2cap_conninfo cinfo;
1877 int len, err = 0;
1878 u32 opt;
1879
1880 BT_DBG("sk %p", sk);
1881
1882 if (get_user(len, optlen))
1883 return -EFAULT;
1884
1885 lock_sock(sk);
1886
1887 switch (optname) {
1888 case L2CAP_OPTIONS:
1889 opts.imtu = l2cap_pi(sk)->imtu;
1890 opts.omtu = l2cap_pi(sk)->omtu;
1891 opts.flush_to = l2cap_pi(sk)->flush_to;
1892 opts.mode = l2cap_pi(sk)->mode;
1893 opts.fcs = l2cap_pi(sk)->fcs;
1894
1895 len = min_t(unsigned int, len, sizeof(opts));
1896 if (copy_to_user(optval, (char *) &opts, len))
1897 err = -EFAULT;
1898
1899 break;
1900
1901 case L2CAP_LM:
1902 switch (l2cap_pi(sk)->sec_level) {
1903 case BT_SECURITY_LOW:
1904 opt = L2CAP_LM_AUTH;
1905 break;
1906 case BT_SECURITY_MEDIUM:
1907 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1908 break;
1909 case BT_SECURITY_HIGH:
1910 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1911 L2CAP_LM_SECURE;
1912 break;
1913 default:
1914 opt = 0;
1915 break;
1916 }
1917
1918 if (l2cap_pi(sk)->role_switch)
1919 opt |= L2CAP_LM_MASTER;
1920
1921 if (l2cap_pi(sk)->force_reliable)
1922 opt |= L2CAP_LM_RELIABLE;
1923
1924 if (put_user(opt, (u32 __user *) optval))
1925 err = -EFAULT;
1926 break;
1927
1928 case L2CAP_CONNINFO:
1929 if (sk->sk_state != BT_CONNECTED &&
1930 !(sk->sk_state == BT_CONNECT2 &&
1931 bt_sk(sk)->defer_setup)) {
1932 err = -ENOTCONN;
1933 break;
1934 }
1935
1936 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1937 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1938
1939 len = min_t(unsigned int, len, sizeof(cinfo));
1940 if (copy_to_user(optval, (char *) &cinfo, len))
1941 err = -EFAULT;
1942
1943 break;
1944
1945 default:
1946 err = -ENOPROTOOPT;
1947 break;
1948 }
1949
1950 release_sock(sk);
1951 return err;
1952 }
1953
1954 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1955 {
1956 struct sock *sk = sock->sk;
1957 struct bt_security sec;
1958 int len, err = 0;
1959
1960 BT_DBG("sk %p", sk);
1961
1962 if (level == SOL_L2CAP)
1963 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1964
1965 if (level != SOL_BLUETOOTH)
1966 return -ENOPROTOOPT;
1967
1968 if (get_user(len, optlen))
1969 return -EFAULT;
1970
1971 lock_sock(sk);
1972
1973 switch (optname) {
1974 case BT_SECURITY:
1975 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1976 err = -EINVAL;
1977 break;
1978 }
1979
1980 sec.level = l2cap_pi(sk)->sec_level;
1981
1982 len = min_t(unsigned int, len, sizeof(sec));
1983 if (copy_to_user(optval, (char *) &sec, len))
1984 err = -EFAULT;
1985
1986 break;
1987
1988 case BT_DEFER_SETUP:
1989 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1990 err = -EINVAL;
1991 break;
1992 }
1993
1994 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1995 err = -EFAULT;
1996
1997 break;
1998
1999 default:
2000 err = -ENOPROTOOPT;
2001 break;
2002 }
2003
2004 release_sock(sk);
2005 return err;
2006 }
2007
2008 static int l2cap_sock_shutdown(struct socket *sock, int how)
2009 {
2010 struct sock *sk = sock->sk;
2011 int err = 0;
2012
2013 BT_DBG("sock %p, sk %p", sock, sk);
2014
2015 if (!sk)
2016 return 0;
2017
2018 lock_sock(sk);
2019 if (!sk->sk_shutdown) {
2020 sk->sk_shutdown = SHUTDOWN_MASK;
2021 l2cap_sock_clear_timer(sk);
2022 __l2cap_sock_close(sk, 0);
2023
2024 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2025 err = bt_sock_wait_state(sk, BT_CLOSED,
2026 sk->sk_lingertime);
2027 }
2028 release_sock(sk);
2029 return err;
2030 }
2031
2032 static int l2cap_sock_release(struct socket *sock)
2033 {
2034 struct sock *sk = sock->sk;
2035 int err;
2036
2037 BT_DBG("sock %p, sk %p", sock, sk);
2038
2039 if (!sk)
2040 return 0;
2041
2042 err = l2cap_sock_shutdown(sock, 2);
2043
2044 sock_orphan(sk);
2045 l2cap_sock_kill(sk);
2046 return err;
2047 }
2048
2049 static void l2cap_chan_ready(struct sock *sk)
2050 {
2051 struct sock *parent = bt_sk(sk)->parent;
2052
2053 BT_DBG("sk %p, parent %p", sk, parent);
2054
2055 l2cap_pi(sk)->conf_state = 0;
2056 l2cap_sock_clear_timer(sk);
2057
2058 if (!parent) {
2059 /* Outgoing channel.
2060 * Wake up socket sleeping on connect.
2061 */
2062 sk->sk_state = BT_CONNECTED;
2063 sk->sk_state_change(sk);
2064 } else {
2065 /* Incoming channel.
2066 * Wake up socket sleeping on accept.
2067 */
2068 parent->sk_data_ready(parent, 0);
2069 }
2070 }
2071
2072 /* Copy frame to all raw sockets on that connection */
2073 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2074 {
2075 struct l2cap_chan_list *l = &conn->chan_list;
2076 struct sk_buff *nskb;
2077 struct sock *sk;
2078
2079 BT_DBG("conn %p", conn);
2080
2081 read_lock(&l->lock);
2082 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2083 if (sk->sk_type != SOCK_RAW)
2084 continue;
2085
2086 /* Don't send frame to the socket it came from */
2087 if (skb->sk == sk)
2088 continue;
2089 nskb = skb_clone(skb, GFP_ATOMIC);
2090 if (!nskb)
2091 continue;
2092
2093 if (sock_queue_rcv_skb(sk, nskb))
2094 kfree_skb(nskb);
2095 }
2096 read_unlock(&l->lock);
2097 }
2098
2099 /* ---- L2CAP signalling commands ---- */
2100 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2101 u8 code, u8 ident, u16 dlen, void *data)
2102 {
2103 struct sk_buff *skb, **frag;
2104 struct l2cap_cmd_hdr *cmd;
2105 struct l2cap_hdr *lh;
2106 int len, count;
2107
2108 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2109 conn, code, ident, dlen);
2110
2111 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2112 count = min_t(unsigned int, conn->mtu, len);
2113
2114 skb = bt_skb_alloc(count, GFP_ATOMIC);
2115 if (!skb)
2116 return NULL;
2117
2118 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2119 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2120 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2121
2122 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2123 cmd->code = code;
2124 cmd->ident = ident;
2125 cmd->len = cpu_to_le16(dlen);
2126
2127 if (dlen) {
2128 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2129 memcpy(skb_put(skb, count), data, count);
2130 data += count;
2131 }
2132
2133 len -= skb->len;
2134
2135 /* Continuation fragments (no L2CAP header) */
2136 frag = &skb_shinfo(skb)->frag_list;
2137 while (len) {
2138 count = min_t(unsigned int, conn->mtu, len);
2139
2140 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2141 if (!*frag)
2142 goto fail;
2143
2144 memcpy(skb_put(*frag, count), data, count);
2145
2146 len -= count;
2147 data += count;
2148
2149 frag = &(*frag)->next;
2150 }
2151
2152 return skb;
2153
2154 fail:
2155 kfree_skb(skb);
2156 return NULL;
2157 }
2158
2159 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2160 {
2161 struct l2cap_conf_opt *opt = *ptr;
2162 int len;
2163
2164 len = L2CAP_CONF_OPT_SIZE + opt->len;
2165 *ptr += len;
2166
2167 *type = opt->type;
2168 *olen = opt->len;
2169
2170 switch (opt->len) {
2171 case 1:
2172 *val = *((u8 *) opt->val);
2173 break;
2174
2175 case 2:
2176 *val = __le16_to_cpu(*((__le16 *) opt->val));
2177 break;
2178
2179 case 4:
2180 *val = __le32_to_cpu(*((__le32 *) opt->val));
2181 break;
2182
2183 default:
2184 *val = (unsigned long) opt->val;
2185 break;
2186 }
2187
2188 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2189 return len;
2190 }
2191
2192 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2193 {
2194 struct l2cap_conf_opt *opt = *ptr;
2195
2196 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2197
2198 opt->type = type;
2199 opt->len = len;
2200
2201 switch (len) {
2202 case 1:
2203 *((u8 *) opt->val) = val;
2204 break;
2205
2206 case 2:
2207 *((__le16 *) opt->val) = cpu_to_le16(val);
2208 break;
2209
2210 case 4:
2211 *((__le32 *) opt->val) = cpu_to_le32(val);
2212 break;
2213
2214 default:
2215 memcpy(opt->val, (void *) val, len);
2216 break;
2217 }
2218
2219 *ptr += L2CAP_CONF_OPT_SIZE + len;
2220 }
2221
2222 static inline void l2cap_ertm_init(struct sock *sk)
2223 {
2224 l2cap_pi(sk)->expected_ack_seq = 0;
2225 l2cap_pi(sk)->unacked_frames = 0;
2226 l2cap_pi(sk)->buffer_seq = 0;
2227 l2cap_pi(sk)->num_to_ack = 0;
2228 l2cap_pi(sk)->frames_sent = 0;
2229
2230 setup_timer(&l2cap_pi(sk)->retrans_timer,
2231 l2cap_retrans_timeout, (unsigned long) sk);
2232 setup_timer(&l2cap_pi(sk)->monitor_timer,
2233 l2cap_monitor_timeout, (unsigned long) sk);
2234
2235 __skb_queue_head_init(SREJ_QUEUE(sk));
2236 }
2237
2238 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2239 {
2240 u32 local_feat_mask = l2cap_feat_mask;
2241 if (enable_ertm)
2242 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2243
2244 switch (mode) {
2245 case L2CAP_MODE_ERTM:
2246 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2247 case L2CAP_MODE_STREAMING:
2248 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2249 default:
2250 return 0x00;
2251 }
2252 }
2253
2254 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2255 {
2256 switch (mode) {
2257 case L2CAP_MODE_STREAMING:
2258 case L2CAP_MODE_ERTM:
2259 if (l2cap_mode_supported(mode, remote_feat_mask))
2260 return mode;
2261 /* fall through */
2262 default:
2263 return L2CAP_MODE_BASIC;
2264 }
2265 }
2266
2267 static int l2cap_build_conf_req(struct sock *sk, void *data)
2268 {
2269 struct l2cap_pinfo *pi = l2cap_pi(sk);
2270 struct l2cap_conf_req *req = data;
2271 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2272 void *ptr = req->data;
2273
2274 BT_DBG("sk %p", sk);
2275
2276 if (pi->num_conf_req || pi->num_conf_rsp)
2277 goto done;
2278
2279 switch (pi->mode) {
2280 case L2CAP_MODE_STREAMING:
2281 case L2CAP_MODE_ERTM:
2282 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2283 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2284 l2cap_send_disconn_req(pi->conn, sk);
2285 break;
2286 default:
2287 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2288 break;
2289 }
2290
2291 done:
2292 switch (pi->mode) {
2293 case L2CAP_MODE_BASIC:
2294 if (pi->imtu != L2CAP_DEFAULT_MTU)
2295 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2296 break;
2297
2298 case L2CAP_MODE_ERTM:
2299 rfc.mode = L2CAP_MODE_ERTM;
2300 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2301 rfc.max_transmit = max_transmit;
2302 rfc.retrans_timeout = 0;
2303 rfc.monitor_timeout = 0;
2304 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2305 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2306 rfc.max_pdu_size = pi->conn->mtu - 10;
2307
2308 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2309 sizeof(rfc), (unsigned long) &rfc);
2310
2311 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2312 break;
2313
2314 if (pi->fcs == L2CAP_FCS_NONE ||
2315 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2316 pi->fcs = L2CAP_FCS_NONE;
2317 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2318 }
2319 break;
2320
2321 case L2CAP_MODE_STREAMING:
2322 rfc.mode = L2CAP_MODE_STREAMING;
2323 rfc.txwin_size = 0;
2324 rfc.max_transmit = 0;
2325 rfc.retrans_timeout = 0;
2326 rfc.monitor_timeout = 0;
2327 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2328 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2329 rfc.max_pdu_size = pi->conn->mtu - 10;
2330
2331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2332 sizeof(rfc), (unsigned long) &rfc);
2333
2334 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2335 break;
2336
2337 if (pi->fcs == L2CAP_FCS_NONE ||
2338 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2339 pi->fcs = L2CAP_FCS_NONE;
2340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2341 }
2342 break;
2343 }
2344
2345 /* FIXME: Need actual value of the flush timeout */
2346 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2347 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2348
2349 req->dcid = cpu_to_le16(pi->dcid);
2350 req->flags = cpu_to_le16(0);
2351
2352 return ptr - data;
2353 }
2354
2355 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2356 {
2357 struct l2cap_pinfo *pi = l2cap_pi(sk);
2358 struct l2cap_conf_rsp *rsp = data;
2359 void *ptr = rsp->data;
2360 void *req = pi->conf_req;
2361 int len = pi->conf_len;
2362 int type, hint, olen;
2363 unsigned long val;
2364 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2365 u16 mtu = L2CAP_DEFAULT_MTU;
2366 u16 result = L2CAP_CONF_SUCCESS;
2367
2368 BT_DBG("sk %p", sk);
2369
2370 while (len >= L2CAP_CONF_OPT_SIZE) {
2371 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2372
2373 hint = type & L2CAP_CONF_HINT;
2374 type &= L2CAP_CONF_MASK;
2375
2376 switch (type) {
2377 case L2CAP_CONF_MTU:
2378 mtu = val;
2379 break;
2380
2381 case L2CAP_CONF_FLUSH_TO:
2382 pi->flush_to = val;
2383 break;
2384
2385 case L2CAP_CONF_QOS:
2386 break;
2387
2388 case L2CAP_CONF_RFC:
2389 if (olen == sizeof(rfc))
2390 memcpy(&rfc, (void *) val, olen);
2391 break;
2392
2393 case L2CAP_CONF_FCS:
2394 if (val == L2CAP_FCS_NONE)
2395 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2396
2397 break;
2398
2399 default:
2400 if (hint)
2401 break;
2402
2403 result = L2CAP_CONF_UNKNOWN;
2404 *((u8 *) ptr++) = type;
2405 break;
2406 }
2407 }
2408
2409 if (pi->num_conf_rsp || pi->num_conf_req)
2410 goto done;
2411
2412 switch (pi->mode) {
2413 case L2CAP_MODE_STREAMING:
2414 case L2CAP_MODE_ERTM:
2415 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2416 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2417 return -ECONNREFUSED;
2418 break;
2419 default:
2420 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2421 break;
2422 }
2423
2424 done:
2425 if (pi->mode != rfc.mode) {
2426 result = L2CAP_CONF_UNACCEPT;
2427 rfc.mode = pi->mode;
2428
2429 if (pi->num_conf_rsp == 1)
2430 return -ECONNREFUSED;
2431
2432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2433 sizeof(rfc), (unsigned long) &rfc);
2434 }
2435
2436
2437 if (result == L2CAP_CONF_SUCCESS) {
2438 /* Configure output options and let the other side know
2439 * which ones we don't like. */
2440
2441 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2442 result = L2CAP_CONF_UNACCEPT;
2443 else {
2444 pi->omtu = mtu;
2445 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2446 }
2447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2448
2449 switch (rfc.mode) {
2450 case L2CAP_MODE_BASIC:
2451 pi->fcs = L2CAP_FCS_NONE;
2452 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2453 break;
2454
2455 case L2CAP_MODE_ERTM:
2456 pi->remote_tx_win = rfc.txwin_size;
2457 pi->remote_max_tx = rfc.max_transmit;
2458 pi->max_pdu_size = rfc.max_pdu_size;
2459
2460 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2461 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2462
2463 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2464
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2466 sizeof(rfc), (unsigned long) &rfc);
2467
2468 break;
2469
2470 case L2CAP_MODE_STREAMING:
2471 pi->remote_tx_win = rfc.txwin_size;
2472 pi->max_pdu_size = rfc.max_pdu_size;
2473
2474 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2475
2476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2477 sizeof(rfc), (unsigned long) &rfc);
2478
2479 break;
2480
2481 default:
2482 result = L2CAP_CONF_UNACCEPT;
2483
2484 memset(&rfc, 0, sizeof(rfc));
2485 rfc.mode = pi->mode;
2486 }
2487
2488 if (result == L2CAP_CONF_SUCCESS)
2489 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2490 }
2491 rsp->scid = cpu_to_le16(pi->dcid);
2492 rsp->result = cpu_to_le16(result);
2493 rsp->flags = cpu_to_le16(0x0000);
2494
2495 return ptr - data;
2496 }
2497
2498 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2499 {
2500 struct l2cap_pinfo *pi = l2cap_pi(sk);
2501 struct l2cap_conf_req *req = data;
2502 void *ptr = req->data;
2503 int type, olen;
2504 unsigned long val;
2505 struct l2cap_conf_rfc rfc;
2506
2507 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2508
2509 while (len >= L2CAP_CONF_OPT_SIZE) {
2510 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2511
2512 switch (type) {
2513 case L2CAP_CONF_MTU:
2514 if (val < L2CAP_DEFAULT_MIN_MTU) {
2515 *result = L2CAP_CONF_UNACCEPT;
2516 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2517 } else
2518 pi->omtu = val;
2519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2520 break;
2521
2522 case L2CAP_CONF_FLUSH_TO:
2523 pi->flush_to = val;
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2525 2, pi->flush_to);
2526 break;
2527
2528 case L2CAP_CONF_RFC:
2529 if (olen == sizeof(rfc))
2530 memcpy(&rfc, (void *)val, olen);
2531
2532 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2533 rfc.mode != pi->mode)
2534 return -ECONNREFUSED;
2535
2536 pi->mode = rfc.mode;
2537 pi->fcs = 0;
2538
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2540 sizeof(rfc), (unsigned long) &rfc);
2541 break;
2542 }
2543 }
2544
2545 if (*result == L2CAP_CONF_SUCCESS) {
2546 switch (rfc.mode) {
2547 case L2CAP_MODE_ERTM:
2548 pi->remote_tx_win = rfc.txwin_size;
2549 pi->retrans_timeout = rfc.retrans_timeout;
2550 pi->monitor_timeout = rfc.monitor_timeout;
2551 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2552 break;
2553 case L2CAP_MODE_STREAMING:
2554 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2555 break;
2556 }
2557 }
2558
2559 req->dcid = cpu_to_le16(pi->dcid);
2560 req->flags = cpu_to_le16(0x0000);
2561
2562 return ptr - data;
2563 }
2564
2565 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2566 {
2567 struct l2cap_conf_rsp *rsp = data;
2568 void *ptr = rsp->data;
2569
2570 BT_DBG("sk %p", sk);
2571
2572 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2573 rsp->result = cpu_to_le16(result);
2574 rsp->flags = cpu_to_le16(flags);
2575
2576 return ptr - data;
2577 }
2578
2579 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2580 {
2581 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2582
2583 if (rej->reason != 0x0000)
2584 return 0;
2585
2586 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2587 cmd->ident == conn->info_ident) {
2588 del_timer(&conn->info_timer);
2589
2590 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2591 conn->info_ident = 0;
2592
2593 l2cap_conn_start(conn);
2594 }
2595
2596 return 0;
2597 }
2598
2599 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2600 {
2601 struct l2cap_chan_list *list = &conn->chan_list;
2602 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2603 struct l2cap_conn_rsp rsp;
2604 struct sock *sk, *parent;
2605 int result, status = L2CAP_CS_NO_INFO;
2606
2607 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2608 __le16 psm = req->psm;
2609
2610 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2611
2612 /* Check if we have socket listening on psm */
2613 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2614 if (!parent) {
2615 result = L2CAP_CR_BAD_PSM;
2616 goto sendresp;
2617 }
2618
2619 /* Check if the ACL is secure enough (if not SDP) */
2620 if (psm != cpu_to_le16(0x0001) &&
2621 !hci_conn_check_link_mode(conn->hcon)) {
2622 conn->disc_reason = 0x05;
2623 result = L2CAP_CR_SEC_BLOCK;
2624 goto response;
2625 }
2626
2627 result = L2CAP_CR_NO_MEM;
2628
2629 /* Check for backlog size */
2630 if (sk_acceptq_is_full(parent)) {
2631 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2632 goto response;
2633 }
2634
2635 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2636 if (!sk)
2637 goto response;
2638
2639 write_lock_bh(&list->lock);
2640
2641 /* Check if we already have channel with that dcid */
2642 if (__l2cap_get_chan_by_dcid(list, scid)) {
2643 write_unlock_bh(&list->lock);
2644 sock_set_flag(sk, SOCK_ZAPPED);
2645 l2cap_sock_kill(sk);
2646 goto response;
2647 }
2648
2649 hci_conn_hold(conn->hcon);
2650
2651 l2cap_sock_init(sk, parent);
2652 bacpy(&bt_sk(sk)->src, conn->src);
2653 bacpy(&bt_sk(sk)->dst, conn->dst);
2654 l2cap_pi(sk)->psm = psm;
2655 l2cap_pi(sk)->dcid = scid;
2656
2657 __l2cap_chan_add(conn, sk, parent);
2658 dcid = l2cap_pi(sk)->scid;
2659
2660 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2661
2662 l2cap_pi(sk)->ident = cmd->ident;
2663
2664 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2665 if (l2cap_check_security(sk)) {
2666 if (bt_sk(sk)->defer_setup) {
2667 sk->sk_state = BT_CONNECT2;
2668 result = L2CAP_CR_PEND;
2669 status = L2CAP_CS_AUTHOR_PEND;
2670 parent->sk_data_ready(parent, 0);
2671 } else {
2672 sk->sk_state = BT_CONFIG;
2673 result = L2CAP_CR_SUCCESS;
2674 status = L2CAP_CS_NO_INFO;
2675 }
2676 } else {
2677 sk->sk_state = BT_CONNECT2;
2678 result = L2CAP_CR_PEND;
2679 status = L2CAP_CS_AUTHEN_PEND;
2680 }
2681 } else {
2682 sk->sk_state = BT_CONNECT2;
2683 result = L2CAP_CR_PEND;
2684 status = L2CAP_CS_NO_INFO;
2685 }
2686
2687 write_unlock_bh(&list->lock);
2688
2689 response:
2690 bh_unlock_sock(parent);
2691
2692 sendresp:
2693 rsp.scid = cpu_to_le16(scid);
2694 rsp.dcid = cpu_to_le16(dcid);
2695 rsp.result = cpu_to_le16(result);
2696 rsp.status = cpu_to_le16(status);
2697 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2698
2699 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2700 struct l2cap_info_req info;
2701 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2702
2703 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2704 conn->info_ident = l2cap_get_ident(conn);
2705
2706 mod_timer(&conn->info_timer, jiffies +
2707 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2708
2709 l2cap_send_cmd(conn, conn->info_ident,
2710 L2CAP_INFO_REQ, sizeof(info), &info);
2711 }
2712
2713 return 0;
2714 }
2715
2716 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2717 {
2718 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2719 u16 scid, dcid, result, status;
2720 struct sock *sk;
2721 u8 req[128];
2722
2723 scid = __le16_to_cpu(rsp->scid);
2724 dcid = __le16_to_cpu(rsp->dcid);
2725 result = __le16_to_cpu(rsp->result);
2726 status = __le16_to_cpu(rsp->status);
2727
2728 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2729
2730 if (scid) {
2731 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2732 if (!sk)
2733 return 0;
2734 } else {
2735 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2736 if (!sk)
2737 return 0;
2738 }
2739
2740 switch (result) {
2741 case L2CAP_CR_SUCCESS:
2742 sk->sk_state = BT_CONFIG;
2743 l2cap_pi(sk)->ident = 0;
2744 l2cap_pi(sk)->dcid = dcid;
2745 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2746
2747 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2748
2749 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2750 l2cap_build_conf_req(sk, req), req);
2751 l2cap_pi(sk)->num_conf_req++;
2752 break;
2753
2754 case L2CAP_CR_PEND:
2755 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2756 break;
2757
2758 default:
2759 l2cap_chan_del(sk, ECONNREFUSED);
2760 break;
2761 }
2762
2763 bh_unlock_sock(sk);
2764 return 0;
2765 }
2766
2767 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2768 {
2769 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2770 u16 dcid, flags;
2771 u8 rsp[64];
2772 struct sock *sk;
2773 int len;
2774
2775 dcid = __le16_to_cpu(req->dcid);
2776 flags = __le16_to_cpu(req->flags);
2777
2778 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2779
2780 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2781 if (!sk)
2782 return -ENOENT;
2783
2784 if (sk->sk_state == BT_DISCONN)
2785 goto unlock;
2786
2787 /* Reject if config buffer is too small. */
2788 len = cmd_len - sizeof(*req);
2789 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2791 l2cap_build_conf_rsp(sk, rsp,
2792 L2CAP_CONF_REJECT, flags), rsp);
2793 goto unlock;
2794 }
2795
2796 /* Store config. */
2797 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2798 l2cap_pi(sk)->conf_len += len;
2799
2800 if (flags & 0x0001) {
2801 /* Incomplete config. Send empty response. */
2802 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2803 l2cap_build_conf_rsp(sk, rsp,
2804 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2805 goto unlock;
2806 }
2807
2808 /* Complete config. */
2809 len = l2cap_parse_conf_req(sk, rsp);
2810 if (len < 0) {
2811 l2cap_send_disconn_req(conn, sk);
2812 goto unlock;
2813 }
2814
2815 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2816 l2cap_pi(sk)->num_conf_rsp++;
2817
2818 /* Reset config buffer. */
2819 l2cap_pi(sk)->conf_len = 0;
2820
2821 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2822 goto unlock;
2823
2824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2825 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2826 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2827 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2828
2829 sk->sk_state = BT_CONNECTED;
2830
2831 l2cap_pi(sk)->next_tx_seq = 0;
2832 l2cap_pi(sk)->expected_tx_seq = 0;
2833 __skb_queue_head_init(TX_QUEUE(sk));
2834 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2835 l2cap_ertm_init(sk);
2836
2837 l2cap_chan_ready(sk);
2838 goto unlock;
2839 }
2840
2841 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2842 u8 buf[64];
2843 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2844 l2cap_build_conf_req(sk, buf), buf);
2845 l2cap_pi(sk)->num_conf_req++;
2846 }
2847
2848 unlock:
2849 bh_unlock_sock(sk);
2850 return 0;
2851 }
2852
2853 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2854 {
2855 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2856 u16 scid, flags, result;
2857 struct sock *sk;
2858
2859 scid = __le16_to_cpu(rsp->scid);
2860 flags = __le16_to_cpu(rsp->flags);
2861 result = __le16_to_cpu(rsp->result);
2862
2863 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2864 scid, flags, result);
2865
2866 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2867 if (!sk)
2868 return 0;
2869
2870 switch (result) {
2871 case L2CAP_CONF_SUCCESS:
2872 break;
2873
2874 case L2CAP_CONF_UNACCEPT:
2875 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2876 int len = cmd->len - sizeof(*rsp);
2877 char req[64];
2878
2879 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2880 l2cap_send_disconn_req(conn, sk);
2881 goto done;
2882 }
2883
2884 /* throw out any old stored conf requests */
2885 result = L2CAP_CONF_SUCCESS;
2886 len = l2cap_parse_conf_rsp(sk, rsp->data,
2887 len, req, &result);
2888 if (len < 0) {
2889 l2cap_send_disconn_req(conn, sk);
2890 goto done;
2891 }
2892
2893 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2894 L2CAP_CONF_REQ, len, req);
2895 l2cap_pi(sk)->num_conf_req++;
2896 if (result != L2CAP_CONF_SUCCESS)
2897 goto done;
2898 break;
2899 }
2900
2901 default:
2902 sk->sk_state = BT_DISCONN;
2903 sk->sk_err = ECONNRESET;
2904 l2cap_sock_set_timer(sk, HZ * 5);
2905 l2cap_send_disconn_req(conn, sk);
2906 goto done;
2907 }
2908
2909 if (flags & 0x01)
2910 goto done;
2911
2912 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2913
2914 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2915 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2916 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2917 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2918
2919 sk->sk_state = BT_CONNECTED;
2920 l2cap_pi(sk)->next_tx_seq = 0;
2921 l2cap_pi(sk)->expected_tx_seq = 0;
2922 __skb_queue_head_init(TX_QUEUE(sk));
2923 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2924 l2cap_ertm_init(sk);
2925
2926 l2cap_chan_ready(sk);
2927 }
2928
2929 done:
2930 bh_unlock_sock(sk);
2931 return 0;
2932 }
2933
2934 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2935 {
2936 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2937 struct l2cap_disconn_rsp rsp;
2938 u16 dcid, scid;
2939 struct sock *sk;
2940
2941 scid = __le16_to_cpu(req->scid);
2942 dcid = __le16_to_cpu(req->dcid);
2943
2944 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2945
2946 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2947 if (!sk)
2948 return 0;
2949
2950 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2951 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2952 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2953
2954 sk->sk_shutdown = SHUTDOWN_MASK;
2955
2956 skb_queue_purge(TX_QUEUE(sk));
2957
2958 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2959 skb_queue_purge(SREJ_QUEUE(sk));
2960 del_timer(&l2cap_pi(sk)->retrans_timer);
2961 del_timer(&l2cap_pi(sk)->monitor_timer);
2962 }
2963
2964 l2cap_chan_del(sk, ECONNRESET);
2965 bh_unlock_sock(sk);
2966
2967 l2cap_sock_kill(sk);
2968 return 0;
2969 }
2970
2971 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2972 {
2973 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2974 u16 dcid, scid;
2975 struct sock *sk;
2976
2977 scid = __le16_to_cpu(rsp->scid);
2978 dcid = __le16_to_cpu(rsp->dcid);
2979
2980 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2981
2982 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2983 if (!sk)
2984 return 0;
2985
2986 skb_queue_purge(TX_QUEUE(sk));
2987
2988 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2989 skb_queue_purge(SREJ_QUEUE(sk));
2990 del_timer(&l2cap_pi(sk)->retrans_timer);
2991 del_timer(&l2cap_pi(sk)->monitor_timer);
2992 }
2993
2994 l2cap_chan_del(sk, 0);
2995 bh_unlock_sock(sk);
2996
2997 l2cap_sock_kill(sk);
2998 return 0;
2999 }
3000
3001 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3002 {
3003 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3004 u16 type;
3005
3006 type = __le16_to_cpu(req->type);
3007
3008 BT_DBG("type 0x%4.4x", type);
3009
3010 if (type == L2CAP_IT_FEAT_MASK) {
3011 u8 buf[8];
3012 u32 feat_mask = l2cap_feat_mask;
3013 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3014 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3015 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3016 if (enable_ertm)
3017 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3018 | L2CAP_FEAT_FCS;
3019 put_unaligned_le32(feat_mask, rsp->data);
3020 l2cap_send_cmd(conn, cmd->ident,
3021 L2CAP_INFO_RSP, sizeof(buf), buf);
3022 } else if (type == L2CAP_IT_FIXED_CHAN) {
3023 u8 buf[12];
3024 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3025 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3026 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3027 memcpy(buf + 4, l2cap_fixed_chan, 8);
3028 l2cap_send_cmd(conn, cmd->ident,
3029 L2CAP_INFO_RSP, sizeof(buf), buf);
3030 } else {
3031 struct l2cap_info_rsp rsp;
3032 rsp.type = cpu_to_le16(type);
3033 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3034 l2cap_send_cmd(conn, cmd->ident,
3035 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3036 }
3037
3038 return 0;
3039 }
3040
3041 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3042 {
3043 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3044 u16 type, result;
3045
3046 type = __le16_to_cpu(rsp->type);
3047 result = __le16_to_cpu(rsp->result);
3048
3049 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3050
3051 del_timer(&conn->info_timer);
3052
3053 if (type == L2CAP_IT_FEAT_MASK) {
3054 conn->feat_mask = get_unaligned_le32(rsp->data);
3055
3056 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3057 struct l2cap_info_req req;
3058 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3059
3060 conn->info_ident = l2cap_get_ident(conn);
3061
3062 l2cap_send_cmd(conn, conn->info_ident,
3063 L2CAP_INFO_REQ, sizeof(req), &req);
3064 } else {
3065 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3066 conn->info_ident = 0;
3067
3068 l2cap_conn_start(conn);
3069 }
3070 } else if (type == L2CAP_IT_FIXED_CHAN) {
3071 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3072 conn->info_ident = 0;
3073
3074 l2cap_conn_start(conn);
3075 }
3076
3077 return 0;
3078 }
3079
3080 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3081 {
3082 u8 *data = skb->data;
3083 int len = skb->len;
3084 struct l2cap_cmd_hdr cmd;
3085 int err = 0;
3086
3087 l2cap_raw_recv(conn, skb);
3088
3089 while (len >= L2CAP_CMD_HDR_SIZE) {
3090 u16 cmd_len;
3091 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3092 data += L2CAP_CMD_HDR_SIZE;
3093 len -= L2CAP_CMD_HDR_SIZE;
3094
3095 cmd_len = le16_to_cpu(cmd.len);
3096
3097 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3098
3099 if (cmd_len > len || !cmd.ident) {
3100 BT_DBG("corrupted command");
3101 break;
3102 }
3103
3104 switch (cmd.code) {
3105 case L2CAP_COMMAND_REJ:
3106 l2cap_command_rej(conn, &cmd, data);
3107 break;
3108
3109 case L2CAP_CONN_REQ:
3110 err = l2cap_connect_req(conn, &cmd, data);
3111 break;
3112
3113 case L2CAP_CONN_RSP:
3114 err = l2cap_connect_rsp(conn, &cmd, data);
3115 break;
3116
3117 case L2CAP_CONF_REQ:
3118 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3119 break;
3120
3121 case L2CAP_CONF_RSP:
3122 err = l2cap_config_rsp(conn, &cmd, data);
3123 break;
3124
3125 case L2CAP_DISCONN_REQ:
3126 err = l2cap_disconnect_req(conn, &cmd, data);
3127 break;
3128
3129 case L2CAP_DISCONN_RSP:
3130 err = l2cap_disconnect_rsp(conn, &cmd, data);
3131 break;
3132
3133 case L2CAP_ECHO_REQ:
3134 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3135 break;
3136
3137 case L2CAP_ECHO_RSP:
3138 break;
3139
3140 case L2CAP_INFO_REQ:
3141 err = l2cap_information_req(conn, &cmd, data);
3142 break;
3143
3144 case L2CAP_INFO_RSP:
3145 err = l2cap_information_rsp(conn, &cmd, data);
3146 break;
3147
3148 default:
3149 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3150 err = -EINVAL;
3151 break;
3152 }
3153
3154 if (err) {
3155 struct l2cap_cmd_rej rej;
3156 BT_DBG("error %d", err);
3157
3158 /* FIXME: Map err to a valid reason */
3159 rej.reason = cpu_to_le16(0);
3160 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3161 }
3162
3163 data += cmd_len;
3164 len -= cmd_len;
3165 }
3166
3167 kfree_skb(skb);
3168 }
3169
3170 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3171 {
3172 u16 our_fcs, rcv_fcs;
3173 int hdr_size = L2CAP_HDR_SIZE + 2;
3174
3175 if (pi->fcs == L2CAP_FCS_CRC16) {
3176 skb_trim(skb, skb->len - 2);
3177 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3178 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3179
3180 if (our_fcs != rcv_fcs)
3181 return -EINVAL;
3182 }
3183 return 0;
3184 }
3185
3186 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3187 {
3188 struct l2cap_pinfo *pi = l2cap_pi(sk);
3189 u16 control = 0;
3190
3191 pi->frames_sent = 0;
3192 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3193
3194 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3195
3196 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3197 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3198 l2cap_send_sframe(pi, control);
3199 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3200 }
3201
3202 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3203 __mod_retrans_timer();
3204
3205 l2cap_ertm_send(sk);
3206
3207 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3208 pi->frames_sent == 0) {
3209 control |= L2CAP_SUPER_RCV_READY;
3210 l2cap_send_sframe(pi, control);
3211 }
3212 }
3213
3214 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3215 {
3216 struct sk_buff *next_skb;
3217
3218 bt_cb(skb)->tx_seq = tx_seq;
3219 bt_cb(skb)->sar = sar;
3220
3221 next_skb = skb_peek(SREJ_QUEUE(sk));
3222 if (!next_skb) {
3223 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3224 return;
3225 }
3226
3227 do {
3228 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3229 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3230 return;
3231 }
3232
3233 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3234 break;
3235
3236 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3237
3238 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3239 }
3240
3241 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3242 {
3243 struct l2cap_pinfo *pi = l2cap_pi(sk);
3244 struct sk_buff *_skb;
3245 int err = -EINVAL;
3246
3247 switch (control & L2CAP_CTRL_SAR) {
3248 case L2CAP_SDU_UNSEGMENTED:
3249 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3250 kfree_skb(pi->sdu);
3251 break;
3252 }
3253
3254 err = sock_queue_rcv_skb(sk, skb);
3255 if (!err)
3256 return 0;
3257
3258 break;
3259
3260 case L2CAP_SDU_START:
3261 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3262 kfree_skb(pi->sdu);
3263 break;
3264 }
3265
3266 pi->sdu_len = get_unaligned_le16(skb->data);
3267 skb_pull(skb, 2);
3268
3269 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3270 if (!pi->sdu) {
3271 err = -ENOMEM;
3272 break;
3273 }
3274
3275 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3276
3277 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3278 pi->partial_sdu_len = skb->len;
3279 err = 0;
3280 break;
3281
3282 case L2CAP_SDU_CONTINUE:
3283 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3284 break;
3285
3286 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3287
3288 pi->partial_sdu_len += skb->len;
3289 if (pi->partial_sdu_len > pi->sdu_len)
3290 kfree_skb(pi->sdu);
3291 else
3292 err = 0;
3293
3294 break;
3295
3296 case L2CAP_SDU_END:
3297 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3298 break;
3299
3300 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3301
3302 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3303 pi->partial_sdu_len += skb->len;
3304
3305 if (pi->partial_sdu_len > pi->imtu)
3306 goto drop;
3307
3308 if (pi->partial_sdu_len == pi->sdu_len) {
3309 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3310 err = sock_queue_rcv_skb(sk, _skb);
3311 if (err < 0)
3312 kfree_skb(_skb);
3313 }
3314 err = 0;
3315
3316 drop:
3317 kfree_skb(pi->sdu);
3318 break;
3319 }
3320
3321 kfree_skb(skb);
3322 return err;
3323 }
3324
3325 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3326 {
3327 struct sk_buff *skb;
3328 u16 control = 0;
3329
3330 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3331 if (bt_cb(skb)->tx_seq != tx_seq)
3332 break;
3333
3334 skb = skb_dequeue(SREJ_QUEUE(sk));
3335 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3336 l2cap_sar_reassembly_sdu(sk, skb, control);
3337 l2cap_pi(sk)->buffer_seq_srej =
3338 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3339 tx_seq++;
3340 }
3341 }
3342
3343 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3344 {
3345 struct l2cap_pinfo *pi = l2cap_pi(sk);
3346 struct srej_list *l, *tmp;
3347 u16 control;
3348
3349 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3350 if (l->tx_seq == tx_seq) {
3351 list_del(&l->list);
3352 kfree(l);
3353 return;
3354 }
3355 control = L2CAP_SUPER_SELECT_REJECT;
3356 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3357 l2cap_send_sframe(pi, control);
3358 list_del(&l->list);
3359 list_add_tail(&l->list, SREJ_LIST(sk));
3360 }
3361 }
3362
3363 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3364 {
3365 struct l2cap_pinfo *pi = l2cap_pi(sk);
3366 struct srej_list *new;
3367 u16 control;
3368
3369 while (tx_seq != pi->expected_tx_seq) {
3370 control = L2CAP_SUPER_SELECT_REJECT;
3371 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3372 l2cap_send_sframe(pi, control);
3373
3374 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3375 new->tx_seq = pi->expected_tx_seq++;
3376 list_add_tail(&new->list, SREJ_LIST(sk));
3377 }
3378 pi->expected_tx_seq++;
3379 }
3380
3381 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3382 {
3383 struct l2cap_pinfo *pi = l2cap_pi(sk);
3384 u8 tx_seq = __get_txseq(rx_control);
3385 u8 req_seq = __get_reqseq(rx_control);
3386 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3387 int err = 0;
3388
3389 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3390
3391 if (L2CAP_CTRL_FINAL & rx_control) {
3392 del_timer(&pi->monitor_timer);
3393 if (pi->unacked_frames > 0)
3394 __mod_retrans_timer();
3395 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3396 }
3397
3398 pi->expected_ack_seq = req_seq;
3399 l2cap_drop_acked_frames(sk);
3400
3401 if (tx_seq == pi->expected_tx_seq)
3402 goto expected;
3403
3404 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3405 struct srej_list *first;
3406
3407 first = list_first_entry(SREJ_LIST(sk),
3408 struct srej_list, list);
3409 if (tx_seq == first->tx_seq) {
3410 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3411 l2cap_check_srej_gap(sk, tx_seq);
3412
3413 list_del(&first->list);
3414 kfree(first);
3415
3416 if (list_empty(SREJ_LIST(sk))) {
3417 pi->buffer_seq = pi->buffer_seq_srej;
3418 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3419 }
3420 } else {
3421 struct srej_list *l;
3422 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3423
3424 list_for_each_entry(l, SREJ_LIST(sk), list) {
3425 if (l->tx_seq == tx_seq) {
3426 l2cap_resend_srejframe(sk, tx_seq);
3427 return 0;
3428 }
3429 }
3430 l2cap_send_srejframe(sk, tx_seq);
3431 }
3432 } else {
3433 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3434
3435 INIT_LIST_HEAD(SREJ_LIST(sk));
3436 pi->buffer_seq_srej = pi->buffer_seq;
3437
3438 __skb_queue_head_init(SREJ_QUEUE(sk));
3439 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3440
3441 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3442
3443 l2cap_send_srejframe(sk, tx_seq);
3444 }
3445 return 0;
3446
3447 expected:
3448 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3449
3450 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3451 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3452 return 0;
3453 }
3454
3455 if (rx_control & L2CAP_CTRL_FINAL) {
3456 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3457 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3458 else {
3459 sk->sk_send_head = TX_QUEUE(sk)->next;
3460 pi->next_tx_seq = pi->expected_ack_seq;
3461 l2cap_ertm_send(sk);
3462 }
3463 }
3464
3465 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3466
3467 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3468 if (err < 0)
3469 return err;
3470
3471 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3472 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1)
3473 l2cap_send_ack(pi);
3474
3475 return 0;
3476 }
3477
3478 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3479 {
3480 struct l2cap_pinfo *pi = l2cap_pi(sk);
3481 u8 tx_seq = __get_reqseq(rx_control);
3482
3483 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3484
3485 if (L2CAP_CTRL_FINAL & rx_control) {
3486 del_timer(&pi->monitor_timer);
3487 if (pi->unacked_frames > 0)
3488 __mod_retrans_timer();
3489 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3490 }
3491
3492 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3493 case L2CAP_SUPER_RCV_READY:
3494 if (rx_control & L2CAP_CTRL_POLL) {
3495 l2cap_send_i_or_rr_or_rnr(sk);
3496 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3497
3498 } else if (rx_control & L2CAP_CTRL_FINAL) {
3499 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3500 pi->expected_ack_seq = tx_seq;
3501 l2cap_drop_acked_frames(sk);
3502
3503 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3504 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3505 else {
3506 sk->sk_send_head = TX_QUEUE(sk)->next;
3507 pi->next_tx_seq = pi->expected_ack_seq;
3508 l2cap_ertm_send(sk);
3509 }
3510
3511 } else {
3512 pi->expected_ack_seq = tx_seq;
3513 l2cap_drop_acked_frames(sk);
3514
3515 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3516 (pi->unacked_frames > 0))
3517 __mod_retrans_timer();
3518
3519 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3520 l2cap_ertm_send(sk);
3521 }
3522 break;
3523
3524 case L2CAP_SUPER_REJECT:
3525 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3526
3527 pi->expected_ack_seq = __get_reqseq(rx_control);
3528 l2cap_drop_acked_frames(sk);
3529
3530 if (rx_control & L2CAP_CTRL_FINAL) {
3531 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3532 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3533 else {
3534 sk->sk_send_head = TX_QUEUE(sk)->next;
3535 pi->next_tx_seq = pi->expected_ack_seq;
3536 l2cap_ertm_send(sk);
3537 }
3538 } else {
3539 sk->sk_send_head = TX_QUEUE(sk)->next;
3540 pi->next_tx_seq = pi->expected_ack_seq;
3541 l2cap_ertm_send(sk);
3542
3543 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3544 pi->srej_save_reqseq = tx_seq;
3545 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3546 }
3547 }
3548
3549 break;
3550
3551 case L2CAP_SUPER_SELECT_REJECT:
3552 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3553
3554 if (rx_control & L2CAP_CTRL_POLL) {
3555 pi->expected_ack_seq = tx_seq;
3556 l2cap_drop_acked_frames(sk);
3557 l2cap_retransmit_frame(sk, tx_seq);
3558 l2cap_ertm_send(sk);
3559 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3560 pi->srej_save_reqseq = tx_seq;
3561 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3562 }
3563 } else if (rx_control & L2CAP_CTRL_FINAL) {
3564 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3565 pi->srej_save_reqseq == tx_seq)
3566 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3567 else
3568 l2cap_retransmit_frame(sk, tx_seq);
3569 }
3570 else {
3571 l2cap_retransmit_frame(sk, tx_seq);
3572 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3573 pi->srej_save_reqseq = tx_seq;
3574 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3575 }
3576 }
3577 break;
3578
3579 case L2CAP_SUPER_RCV_NOT_READY:
3580 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3581 pi->expected_ack_seq = tx_seq;
3582 l2cap_drop_acked_frames(sk);
3583
3584 del_timer(&pi->retrans_timer);
3585 if (rx_control & L2CAP_CTRL_POLL) {
3586 u16 control = L2CAP_CTRL_FINAL;
3587 l2cap_send_rr_or_rnr(pi, control);
3588 }
3589 break;
3590 }
3591
3592 kfree_skb(skb);
3593 return 0;
3594 }
3595
3596 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3597 {
3598 struct sock *sk;
3599 struct l2cap_pinfo *pi;
3600 u16 control, len;
3601 u8 tx_seq;
3602
3603 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3604 if (!sk) {
3605 BT_DBG("unknown cid 0x%4.4x", cid);
3606 goto drop;
3607 }
3608
3609 pi = l2cap_pi(sk);
3610
3611 BT_DBG("sk %p, len %d", sk, skb->len);
3612
3613 if (sk->sk_state != BT_CONNECTED)
3614 goto drop;
3615
3616 switch (pi->mode) {
3617 case L2CAP_MODE_BASIC:
3618 /* If socket recv buffers overflows we drop data here
3619 * which is *bad* because L2CAP has to be reliable.
3620 * But we don't have any other choice. L2CAP doesn't
3621 * provide flow control mechanism. */
3622
3623 if (pi->imtu < skb->len)
3624 goto drop;
3625
3626 if (!sock_queue_rcv_skb(sk, skb))
3627 goto done;
3628 break;
3629
3630 case L2CAP_MODE_ERTM:
3631 control = get_unaligned_le16(skb->data);
3632 skb_pull(skb, 2);
3633 len = skb->len;
3634
3635 if (__is_sar_start(control))
3636 len -= 2;
3637
3638 if (pi->fcs == L2CAP_FCS_CRC16)
3639 len -= 2;
3640
3641 /*
3642 * We can just drop the corrupted I-frame here.
3643 * Receiver will miss it and start proper recovery
3644 * procedures and ask retransmission.
3645 */
3646 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3647 goto drop;
3648
3649 if (l2cap_check_fcs(pi, skb))
3650 goto drop;
3651
3652 if (__is_iframe(control)) {
3653 if (len < 4)
3654 goto drop;
3655
3656 l2cap_data_channel_iframe(sk, control, skb);
3657 } else {
3658 if (len != 0)
3659 goto drop;
3660
3661 l2cap_data_channel_sframe(sk, control, skb);
3662 }
3663
3664 goto done;
3665
3666 case L2CAP_MODE_STREAMING:
3667 control = get_unaligned_le16(skb->data);
3668 skb_pull(skb, 2);
3669 len = skb->len;
3670
3671 if (__is_sar_start(control))
3672 len -= 2;
3673
3674 if (pi->fcs == L2CAP_FCS_CRC16)
3675 len -= 2;
3676
3677 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || len < 4
3678 || __is_sframe(control))
3679 goto drop;
3680
3681 if (l2cap_check_fcs(pi, skb))
3682 goto drop;
3683
3684 tx_seq = __get_txseq(control);
3685
3686 if (pi->expected_tx_seq == tx_seq)
3687 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3688 else
3689 pi->expected_tx_seq = (tx_seq + 1) % 64;
3690
3691 l2cap_sar_reassembly_sdu(sk, skb, control);
3692
3693 goto done;
3694
3695 default:
3696 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3697 break;
3698 }
3699
3700 drop:
3701 kfree_skb(skb);
3702
3703 done:
3704 if (sk)
3705 bh_unlock_sock(sk);
3706
3707 return 0;
3708 }
3709
3710 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3711 {
3712 struct sock *sk;
3713
3714 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3715 if (!sk)
3716 goto drop;
3717
3718 BT_DBG("sk %p, len %d", sk, skb->len);
3719
3720 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3721 goto drop;
3722
3723 if (l2cap_pi(sk)->imtu < skb->len)
3724 goto drop;
3725
3726 if (!sock_queue_rcv_skb(sk, skb))
3727 goto done;
3728
3729 drop:
3730 kfree_skb(skb);
3731
3732 done:
3733 if (sk)
3734 bh_unlock_sock(sk);
3735 return 0;
3736 }
3737
3738 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3739 {
3740 struct l2cap_hdr *lh = (void *) skb->data;
3741 u16 cid, len;
3742 __le16 psm;
3743
3744 skb_pull(skb, L2CAP_HDR_SIZE);
3745 cid = __le16_to_cpu(lh->cid);
3746 len = __le16_to_cpu(lh->len);
3747
3748 if (len != skb->len) {
3749 kfree_skb(skb);
3750 return;
3751 }
3752
3753 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3754
3755 switch (cid) {
3756 case L2CAP_CID_SIGNALING:
3757 l2cap_sig_channel(conn, skb);
3758 break;
3759
3760 case L2CAP_CID_CONN_LESS:
3761 psm = get_unaligned_le16(skb->data);
3762 skb_pull(skb, 2);
3763 l2cap_conless_channel(conn, psm, skb);
3764 break;
3765
3766 default:
3767 l2cap_data_channel(conn, cid, skb);
3768 break;
3769 }
3770 }
3771
3772 /* ---- L2CAP interface with lower layer (HCI) ---- */
3773
3774 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3775 {
3776 int exact = 0, lm1 = 0, lm2 = 0;
3777 register struct sock *sk;
3778 struct hlist_node *node;
3779
3780 if (type != ACL_LINK)
3781 return 0;
3782
3783 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3784
3785 /* Find listening sockets and check their link_mode */
3786 read_lock(&l2cap_sk_list.lock);
3787 sk_for_each(sk, node, &l2cap_sk_list.head) {
3788 if (sk->sk_state != BT_LISTEN)
3789 continue;
3790
3791 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3792 lm1 |= HCI_LM_ACCEPT;
3793 if (l2cap_pi(sk)->role_switch)
3794 lm1 |= HCI_LM_MASTER;
3795 exact++;
3796 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3797 lm2 |= HCI_LM_ACCEPT;
3798 if (l2cap_pi(sk)->role_switch)
3799 lm2 |= HCI_LM_MASTER;
3800 }
3801 }
3802 read_unlock(&l2cap_sk_list.lock);
3803
3804 return exact ? lm1 : lm2;
3805 }
3806
3807 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3808 {
3809 struct l2cap_conn *conn;
3810
3811 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3812
3813 if (hcon->type != ACL_LINK)
3814 return 0;
3815
3816 if (!status) {
3817 conn = l2cap_conn_add(hcon, status);
3818 if (conn)
3819 l2cap_conn_ready(conn);
3820 } else
3821 l2cap_conn_del(hcon, bt_err(status));
3822
3823 return 0;
3824 }
3825
3826 static int l2cap_disconn_ind(struct hci_conn *hcon)
3827 {
3828 struct l2cap_conn *conn = hcon->l2cap_data;
3829
3830 BT_DBG("hcon %p", hcon);
3831
3832 if (hcon->type != ACL_LINK || !conn)
3833 return 0x13;
3834
3835 return conn->disc_reason;
3836 }
3837
3838 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3839 {
3840 BT_DBG("hcon %p reason %d", hcon, reason);
3841
3842 if (hcon->type != ACL_LINK)
3843 return 0;
3844
3845 l2cap_conn_del(hcon, bt_err(reason));
3846
3847 return 0;
3848 }
3849
3850 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3851 {
3852 if (sk->sk_type != SOCK_SEQPACKET)
3853 return;
3854
3855 if (encrypt == 0x00) {
3856 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3857 l2cap_sock_clear_timer(sk);
3858 l2cap_sock_set_timer(sk, HZ * 5);
3859 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3860 __l2cap_sock_close(sk, ECONNREFUSED);
3861 } else {
3862 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3863 l2cap_sock_clear_timer(sk);
3864 }
3865 }
3866
3867 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3868 {
3869 struct l2cap_chan_list *l;
3870 struct l2cap_conn *conn = hcon->l2cap_data;
3871 struct sock *sk;
3872
3873 if (!conn)
3874 return 0;
3875
3876 l = &conn->chan_list;
3877
3878 BT_DBG("conn %p", conn);
3879
3880 read_lock(&l->lock);
3881
3882 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3883 bh_lock_sock(sk);
3884
3885 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3886 bh_unlock_sock(sk);
3887 continue;
3888 }
3889
3890 if (!status && (sk->sk_state == BT_CONNECTED ||
3891 sk->sk_state == BT_CONFIG)) {
3892 l2cap_check_encryption(sk, encrypt);
3893 bh_unlock_sock(sk);
3894 continue;
3895 }
3896
3897 if (sk->sk_state == BT_CONNECT) {
3898 if (!status) {
3899 struct l2cap_conn_req req;
3900 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3901 req.psm = l2cap_pi(sk)->psm;
3902
3903 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3904
3905 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3906 L2CAP_CONN_REQ, sizeof(req), &req);
3907 } else {
3908 l2cap_sock_clear_timer(sk);
3909 l2cap_sock_set_timer(sk, HZ / 10);
3910 }
3911 } else if (sk->sk_state == BT_CONNECT2) {
3912 struct l2cap_conn_rsp rsp;
3913 __u16 result;
3914
3915 if (!status) {
3916 sk->sk_state = BT_CONFIG;
3917 result = L2CAP_CR_SUCCESS;
3918 } else {
3919 sk->sk_state = BT_DISCONN;
3920 l2cap_sock_set_timer(sk, HZ / 10);
3921 result = L2CAP_CR_SEC_BLOCK;
3922 }
3923
3924 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3925 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3926 rsp.result = cpu_to_le16(result);
3927 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3928 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3929 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3930 }
3931
3932 bh_unlock_sock(sk);
3933 }
3934
3935 read_unlock(&l->lock);
3936
3937 return 0;
3938 }
3939
3940 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3941 {
3942 struct l2cap_conn *conn = hcon->l2cap_data;
3943
3944 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3945 goto drop;
3946
3947 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3948
3949 if (flags & ACL_START) {
3950 struct l2cap_hdr *hdr;
3951 int len;
3952
3953 if (conn->rx_len) {
3954 BT_ERR("Unexpected start frame (len %d)", skb->len);
3955 kfree_skb(conn->rx_skb);
3956 conn->rx_skb = NULL;
3957 conn->rx_len = 0;
3958 l2cap_conn_unreliable(conn, ECOMM);
3959 }
3960
3961 if (skb->len < 2) {
3962 BT_ERR("Frame is too short (len %d)", skb->len);
3963 l2cap_conn_unreliable(conn, ECOMM);
3964 goto drop;
3965 }
3966
3967 hdr = (struct l2cap_hdr *) skb->data;
3968 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3969
3970 if (len == skb->len) {
3971 /* Complete frame received */
3972 l2cap_recv_frame(conn, skb);
3973 return 0;
3974 }
3975
3976 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3977
3978 if (skb->len > len) {
3979 BT_ERR("Frame is too long (len %d, expected len %d)",
3980 skb->len, len);
3981 l2cap_conn_unreliable(conn, ECOMM);
3982 goto drop;
3983 }
3984
3985 /* Allocate skb for the complete frame (with header) */
3986 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3987 if (!conn->rx_skb)
3988 goto drop;
3989
3990 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3991 skb->len);
3992 conn->rx_len = len - skb->len;
3993 } else {
3994 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3995
3996 if (!conn->rx_len) {
3997 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3998 l2cap_conn_unreliable(conn, ECOMM);
3999 goto drop;
4000 }
4001
4002 if (skb->len > conn->rx_len) {
4003 BT_ERR("Fragment is too long (len %d, expected %d)",
4004 skb->len, conn->rx_len);
4005 kfree_skb(conn->rx_skb);
4006 conn->rx_skb = NULL;
4007 conn->rx_len = 0;
4008 l2cap_conn_unreliable(conn, ECOMM);
4009 goto drop;
4010 }
4011
4012 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4013 skb->len);
4014 conn->rx_len -= skb->len;
4015
4016 if (!conn->rx_len) {
4017 /* Complete frame received */
4018 l2cap_recv_frame(conn, conn->rx_skb);
4019 conn->rx_skb = NULL;
4020 }
4021 }
4022
4023 drop:
4024 kfree_skb(skb);
4025 return 0;
4026 }
4027
4028 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4029 {
4030 struct sock *sk;
4031 struct hlist_node *node;
4032
4033 read_lock_bh(&l2cap_sk_list.lock);
4034
4035 sk_for_each(sk, node, &l2cap_sk_list.head) {
4036 struct l2cap_pinfo *pi = l2cap_pi(sk);
4037
4038 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4039 batostr(&bt_sk(sk)->src),
4040 batostr(&bt_sk(sk)->dst),
4041 sk->sk_state, __le16_to_cpu(pi->psm),
4042 pi->scid, pi->dcid,
4043 pi->imtu, pi->omtu, pi->sec_level);
4044 }
4045
4046 read_unlock_bh(&l2cap_sk_list.lock);
4047
4048 return 0;
4049 }
4050
4051 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4052 {
4053 return single_open(file, l2cap_debugfs_show, inode->i_private);
4054 }
4055
4056 static const struct file_operations l2cap_debugfs_fops = {
4057 .open = l2cap_debugfs_open,
4058 .read = seq_read,
4059 .llseek = seq_lseek,
4060 .release = single_release,
4061 };
4062
4063 static struct dentry *l2cap_debugfs;
4064
4065 static const struct proto_ops l2cap_sock_ops = {
4066 .family = PF_BLUETOOTH,
4067 .owner = THIS_MODULE,
4068 .release = l2cap_sock_release,
4069 .bind = l2cap_sock_bind,
4070 .connect = l2cap_sock_connect,
4071 .listen = l2cap_sock_listen,
4072 .accept = l2cap_sock_accept,
4073 .getname = l2cap_sock_getname,
4074 .sendmsg = l2cap_sock_sendmsg,
4075 .recvmsg = l2cap_sock_recvmsg,
4076 .poll = bt_sock_poll,
4077 .ioctl = bt_sock_ioctl,
4078 .mmap = sock_no_mmap,
4079 .socketpair = sock_no_socketpair,
4080 .shutdown = l2cap_sock_shutdown,
4081 .setsockopt = l2cap_sock_setsockopt,
4082 .getsockopt = l2cap_sock_getsockopt
4083 };
4084
4085 static const struct net_proto_family l2cap_sock_family_ops = {
4086 .family = PF_BLUETOOTH,
4087 .owner = THIS_MODULE,
4088 .create = l2cap_sock_create,
4089 };
4090
4091 static struct hci_proto l2cap_hci_proto = {
4092 .name = "L2CAP",
4093 .id = HCI_PROTO_L2CAP,
4094 .connect_ind = l2cap_connect_ind,
4095 .connect_cfm = l2cap_connect_cfm,
4096 .disconn_ind = l2cap_disconn_ind,
4097 .disconn_cfm = l2cap_disconn_cfm,
4098 .security_cfm = l2cap_security_cfm,
4099 .recv_acldata = l2cap_recv_acldata
4100 };
4101
4102 static int __init l2cap_init(void)
4103 {
4104 int err;
4105
4106 err = proto_register(&l2cap_proto, 0);
4107 if (err < 0)
4108 return err;
4109
4110 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4111 if (err < 0) {
4112 BT_ERR("L2CAP socket registration failed");
4113 goto error;
4114 }
4115
4116 err = hci_register_proto(&l2cap_hci_proto);
4117 if (err < 0) {
4118 BT_ERR("L2CAP protocol registration failed");
4119 bt_sock_unregister(BTPROTO_L2CAP);
4120 goto error;
4121 }
4122
4123 if (bt_debugfs) {
4124 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4125 bt_debugfs, NULL, &l2cap_debugfs_fops);
4126 if (!l2cap_debugfs)
4127 BT_ERR("Failed to create L2CAP debug file");
4128 }
4129
4130 BT_INFO("L2CAP ver %s", VERSION);
4131 BT_INFO("L2CAP socket layer initialized");
4132
4133 return 0;
4134
4135 error:
4136 proto_unregister(&l2cap_proto);
4137 return err;
4138 }
4139
4140 static void __exit l2cap_exit(void)
4141 {
4142 debugfs_remove(l2cap_debugfs);
4143
4144 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4145 BT_ERR("L2CAP socket unregistration failed");
4146
4147 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4148 BT_ERR("L2CAP protocol unregistration failed");
4149
4150 proto_unregister(&l2cap_proto);
4151 }
4152
4153 void l2cap_load(void)
4154 {
4155 /* Dummy function to trigger automatic L2CAP module loading by
4156 * other modules that use L2CAP sockets but don't use any other
4157 * symbols from it. */
4158 return;
4159 }
4160 EXPORT_SYMBOL(l2cap_load);
4161
4162 module_init(l2cap_init);
4163 module_exit(l2cap_exit);
4164
4165 module_param(enable_ertm, bool, 0644);
4166 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4167
4168 module_param(max_transmit, uint, 0644);
4169 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4170
4171 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4172 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4173 MODULE_VERSION(VERSION);
4174 MODULE_LICENSE("GPL");
4175 MODULE_ALIAS("bt-proto-0");