]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/l2cap.c
Bluetooth: Check the tx_window size on setsockopt
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static const struct proto_ops l2cap_sock_ops;
64
65 static struct workqueue_struct *_busy_wq;
66
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
76
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
79
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
82 {
83 struct sock *sk = (struct sock *) arg;
84 int reason;
85
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
87
88 bh_lock_sock(sk);
89
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
97
98 __l2cap_sock_close(sk, reason);
99
100 bh_unlock_sock(sk);
101
102 l2cap_sock_kill(sk);
103 sock_put(sk);
104 }
105
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
107 {
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 }
111
112 static void l2cap_sock_clear_timer(struct sock *sk)
113 {
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
116 }
117
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 {
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
125 }
126 return s;
127 }
128
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 {
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
135 }
136 return s;
137 }
138
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 {
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
150 }
151
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 {
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
158 }
159 return s;
160 }
161
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 {
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
171 }
172
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
174 {
175 u16 cid = L2CAP_CID_DYN_START;
176
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
180 }
181
182 return 0;
183 }
184
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
186 {
187 sock_hold(sk);
188
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
191
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
195 }
196
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
198 {
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
200
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
204
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
210
211 __sock_put(sk);
212 }
213
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
215 {
216 struct l2cap_chan_list *l = &conn->chan_list;
217
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
220
221 conn->disc_reason = 0x13;
222
223 l2cap_pi(sk)->conn = conn;
224
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 }
239
240 __l2cap_chan_link(l, sk);
241
242 if (parent)
243 bt_accept_enqueue(parent, sk);
244 }
245
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
249 {
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
252
253 l2cap_sock_clear_timer(sk);
254
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
262 }
263
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
266
267 if (err)
268 sk->sk_err = err;
269
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
275
276 skb_queue_purge(TX_QUEUE(sk));
277
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
280
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
284
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
287
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
289 list_del(&l->list);
290 kfree(l);
291 }
292 }
293 }
294
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
297 {
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
299 __u8 auth_type;
300
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
304 else
305 auth_type = HCI_AT_NO_BONDING;
306
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
309 } else {
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
313 break;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
316 break;
317 default:
318 auth_type = HCI_AT_NO_BONDING;
319 break;
320 }
321 }
322
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
324 auth_type);
325 }
326
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
328 {
329 u8 id;
330
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
335 */
336
337 spin_lock_bh(&conn->lock);
338
339 if (++conn->tx_ident > 128)
340 conn->tx_ident = 1;
341
342 id = conn->tx_ident;
343
344 spin_unlock_bh(&conn->lock);
345
346 return id;
347 }
348
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
350 {
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
352
353 BT_DBG("code 0x%2.2x", code);
354
355 if (!skb)
356 return;
357
358 hci_send_acl(conn->hcon, skb, 0);
359 }
360
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
362 {
363 struct sk_buff *skb;
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
368
369 if (sk->sk_state != BT_CONNECTED)
370 return;
371
372 if (pi->fcs == L2CAP_FCS_CRC16)
373 hlen += 2;
374
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
376
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
379
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
383 }
384
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
388 }
389
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
391 if (!skb)
392 return;
393
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
398
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
402 }
403
404 hci_send_acl(pi->conn->hcon, skb, 0);
405 }
406
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
408 {
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
412 } else
413 control |= L2CAP_SUPER_RCV_READY;
414
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
416
417 l2cap_send_sframe(pi, control);
418 }
419
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
421 {
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
423 }
424
425 static void l2cap_do_start(struct sock *sk)
426 {
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
428
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
431 return;
432
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
437
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
440
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
443 }
444 } else {
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
447
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
450
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
453
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
456 }
457 }
458
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
460 {
461 struct l2cap_disconn_req req;
462
463 if (!conn)
464 return;
465
466 skb_queue_purge(TX_QUEUE(sk));
467
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
472 }
473
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
478
479 sk->sk_state = BT_DISCONN;
480 }
481
482 /* ---- L2CAP connections ---- */
483 static void l2cap_conn_start(struct l2cap_conn *conn)
484 {
485 struct l2cap_chan_list *l = &conn->chan_list;
486 struct sock *sk;
487
488 BT_DBG("conn %p", conn);
489
490 read_lock(&l->lock);
491
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
493 bh_lock_sock(sk);
494
495 if (sk->sk_type != SOCK_SEQPACKET &&
496 sk->sk_type != SOCK_STREAM) {
497 bh_unlock_sock(sk);
498 continue;
499 }
500
501 if (sk->sk_state == BT_CONNECT) {
502 if (l2cap_check_security(sk) &&
503 __l2cap_no_conn_pending(sk)) {
504 struct l2cap_conn_req req;
505 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
506 req.psm = l2cap_pi(sk)->psm;
507
508 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
509 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
510
511 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
512 L2CAP_CONN_REQ, sizeof(req), &req);
513 }
514 } else if (sk->sk_state == BT_CONNECT2) {
515 struct l2cap_conn_rsp rsp;
516 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
517 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
518
519 if (l2cap_check_security(sk)) {
520 if (bt_sk(sk)->defer_setup) {
521 struct sock *parent = bt_sk(sk)->parent;
522 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
523 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
524 parent->sk_data_ready(parent, 0);
525
526 } else {
527 sk->sk_state = BT_CONFIG;
528 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
529 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
530 }
531 } else {
532 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
533 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
534 }
535
536 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
537 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
538 }
539
540 bh_unlock_sock(sk);
541 }
542
543 read_unlock(&l->lock);
544 }
545
546 static void l2cap_conn_ready(struct l2cap_conn *conn)
547 {
548 struct l2cap_chan_list *l = &conn->chan_list;
549 struct sock *sk;
550
551 BT_DBG("conn %p", conn);
552
553 read_lock(&l->lock);
554
555 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
556 bh_lock_sock(sk);
557
558 if (sk->sk_type != SOCK_SEQPACKET &&
559 sk->sk_type != SOCK_STREAM) {
560 l2cap_sock_clear_timer(sk);
561 sk->sk_state = BT_CONNECTED;
562 sk->sk_state_change(sk);
563 } else if (sk->sk_state == BT_CONNECT)
564 l2cap_do_start(sk);
565
566 bh_unlock_sock(sk);
567 }
568
569 read_unlock(&l->lock);
570 }
571
572 /* Notify sockets that we cannot guaranty reliability anymore */
573 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
574 {
575 struct l2cap_chan_list *l = &conn->chan_list;
576 struct sock *sk;
577
578 BT_DBG("conn %p", conn);
579
580 read_lock(&l->lock);
581
582 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
583 if (l2cap_pi(sk)->force_reliable)
584 sk->sk_err = err;
585 }
586
587 read_unlock(&l->lock);
588 }
589
590 static void l2cap_info_timeout(unsigned long arg)
591 {
592 struct l2cap_conn *conn = (void *) arg;
593
594 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
595 conn->info_ident = 0;
596
597 l2cap_conn_start(conn);
598 }
599
600 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
601 {
602 struct l2cap_conn *conn = hcon->l2cap_data;
603
604 if (conn || status)
605 return conn;
606
607 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
608 if (!conn)
609 return NULL;
610
611 hcon->l2cap_data = conn;
612 conn->hcon = hcon;
613
614 BT_DBG("hcon %p conn %p", hcon, conn);
615
616 conn->mtu = hcon->hdev->acl_mtu;
617 conn->src = &hcon->hdev->bdaddr;
618 conn->dst = &hcon->dst;
619
620 conn->feat_mask = 0;
621
622 spin_lock_init(&conn->lock);
623 rwlock_init(&conn->chan_list.lock);
624
625 setup_timer(&conn->info_timer, l2cap_info_timeout,
626 (unsigned long) conn);
627
628 conn->disc_reason = 0x13;
629
630 return conn;
631 }
632
633 static void l2cap_conn_del(struct hci_conn *hcon, int err)
634 {
635 struct l2cap_conn *conn = hcon->l2cap_data;
636 struct sock *sk;
637
638 if (!conn)
639 return;
640
641 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
642
643 kfree_skb(conn->rx_skb);
644
645 /* Kill channels */
646 while ((sk = conn->chan_list.head)) {
647 bh_lock_sock(sk);
648 l2cap_chan_del(sk, err);
649 bh_unlock_sock(sk);
650 l2cap_sock_kill(sk);
651 }
652
653 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
654 del_timer_sync(&conn->info_timer);
655
656 hcon->l2cap_data = NULL;
657 kfree(conn);
658 }
659
660 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
661 {
662 struct l2cap_chan_list *l = &conn->chan_list;
663 write_lock_bh(&l->lock);
664 __l2cap_chan_add(conn, sk, parent);
665 write_unlock_bh(&l->lock);
666 }
667
668 /* ---- Socket interface ---- */
669 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
670 {
671 struct sock *sk;
672 struct hlist_node *node;
673 sk_for_each(sk, node, &l2cap_sk_list.head)
674 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
675 goto found;
676 sk = NULL;
677 found:
678 return sk;
679 }
680
681 /* Find socket with psm and source bdaddr.
682 * Returns closest match.
683 */
684 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
685 {
686 struct sock *sk = NULL, *sk1 = NULL;
687 struct hlist_node *node;
688
689 sk_for_each(sk, node, &l2cap_sk_list.head) {
690 if (state && sk->sk_state != state)
691 continue;
692
693 if (l2cap_pi(sk)->psm == psm) {
694 /* Exact match. */
695 if (!bacmp(&bt_sk(sk)->src, src))
696 break;
697
698 /* Closest match */
699 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
700 sk1 = sk;
701 }
702 }
703 return node ? sk : sk1;
704 }
705
706 /* Find socket with given address (psm, src).
707 * Returns locked socket */
708 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
709 {
710 struct sock *s;
711 read_lock(&l2cap_sk_list.lock);
712 s = __l2cap_get_sock_by_psm(state, psm, src);
713 if (s)
714 bh_lock_sock(s);
715 read_unlock(&l2cap_sk_list.lock);
716 return s;
717 }
718
719 static void l2cap_sock_destruct(struct sock *sk)
720 {
721 BT_DBG("sk %p", sk);
722
723 skb_queue_purge(&sk->sk_receive_queue);
724 skb_queue_purge(&sk->sk_write_queue);
725 }
726
727 static void l2cap_sock_cleanup_listen(struct sock *parent)
728 {
729 struct sock *sk;
730
731 BT_DBG("parent %p", parent);
732
733 /* Close not yet accepted channels */
734 while ((sk = bt_accept_dequeue(parent, NULL)))
735 l2cap_sock_close(sk);
736
737 parent->sk_state = BT_CLOSED;
738 sock_set_flag(parent, SOCK_ZAPPED);
739 }
740
741 /* Kill socket (only if zapped and orphan)
742 * Must be called on unlocked socket.
743 */
744 static void l2cap_sock_kill(struct sock *sk)
745 {
746 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
747 return;
748
749 BT_DBG("sk %p state %d", sk, sk->sk_state);
750
751 /* Kill poor orphan */
752 bt_sock_unlink(&l2cap_sk_list, sk);
753 sock_set_flag(sk, SOCK_DEAD);
754 sock_put(sk);
755 }
756
757 static void __l2cap_sock_close(struct sock *sk, int reason)
758 {
759 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
760
761 switch (sk->sk_state) {
762 case BT_LISTEN:
763 l2cap_sock_cleanup_listen(sk);
764 break;
765
766 case BT_CONNECTED:
767 case BT_CONFIG:
768 if (sk->sk_type == SOCK_SEQPACKET ||
769 sk->sk_type == SOCK_STREAM) {
770 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
771
772 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
773 l2cap_send_disconn_req(conn, sk);
774 } else
775 l2cap_chan_del(sk, reason);
776 break;
777
778 case BT_CONNECT2:
779 if (sk->sk_type == SOCK_SEQPACKET ||
780 sk->sk_type == SOCK_STREAM) {
781 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
782 struct l2cap_conn_rsp rsp;
783 __u16 result;
784
785 if (bt_sk(sk)->defer_setup)
786 result = L2CAP_CR_SEC_BLOCK;
787 else
788 result = L2CAP_CR_BAD_PSM;
789
790 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
791 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
792 rsp.result = cpu_to_le16(result);
793 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
794 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
795 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
796 } else
797 l2cap_chan_del(sk, reason);
798 break;
799
800 case BT_CONNECT:
801 case BT_DISCONN:
802 l2cap_chan_del(sk, reason);
803 break;
804
805 default:
806 sock_set_flag(sk, SOCK_ZAPPED);
807 break;
808 }
809 }
810
811 /* Must be called on unlocked socket. */
812 static void l2cap_sock_close(struct sock *sk)
813 {
814 l2cap_sock_clear_timer(sk);
815 lock_sock(sk);
816 __l2cap_sock_close(sk, ECONNRESET);
817 release_sock(sk);
818 l2cap_sock_kill(sk);
819 }
820
821 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
822 {
823 struct l2cap_pinfo *pi = l2cap_pi(sk);
824
825 BT_DBG("sk %p", sk);
826
827 if (parent) {
828 sk->sk_type = parent->sk_type;
829 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
830
831 pi->imtu = l2cap_pi(parent)->imtu;
832 pi->omtu = l2cap_pi(parent)->omtu;
833 pi->mode = l2cap_pi(parent)->mode;
834 pi->fcs = l2cap_pi(parent)->fcs;
835 pi->max_tx = l2cap_pi(parent)->max_tx;
836 pi->tx_win = l2cap_pi(parent)->tx_win;
837 pi->sec_level = l2cap_pi(parent)->sec_level;
838 pi->role_switch = l2cap_pi(parent)->role_switch;
839 pi->force_reliable = l2cap_pi(parent)->force_reliable;
840 } else {
841 pi->imtu = L2CAP_DEFAULT_MTU;
842 pi->omtu = 0;
843 if (enable_ertm && sk->sk_type == SOCK_STREAM)
844 pi->mode = L2CAP_MODE_ERTM;
845 else
846 pi->mode = L2CAP_MODE_BASIC;
847 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
848 pi->fcs = L2CAP_FCS_CRC16;
849 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
850 pi->sec_level = BT_SECURITY_LOW;
851 pi->role_switch = 0;
852 pi->force_reliable = 0;
853 }
854
855 /* Default config options */
856 pi->conf_len = 0;
857 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
858 skb_queue_head_init(TX_QUEUE(sk));
859 skb_queue_head_init(SREJ_QUEUE(sk));
860 skb_queue_head_init(BUSY_QUEUE(sk));
861 INIT_LIST_HEAD(SREJ_LIST(sk));
862 }
863
864 static struct proto l2cap_proto = {
865 .name = "L2CAP",
866 .owner = THIS_MODULE,
867 .obj_size = sizeof(struct l2cap_pinfo)
868 };
869
870 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
871 {
872 struct sock *sk;
873
874 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
875 if (!sk)
876 return NULL;
877
878 sock_init_data(sock, sk);
879 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
880
881 sk->sk_destruct = l2cap_sock_destruct;
882 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
883
884 sock_reset_flag(sk, SOCK_ZAPPED);
885
886 sk->sk_protocol = proto;
887 sk->sk_state = BT_OPEN;
888
889 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
890
891 bt_sock_link(&l2cap_sk_list, sk);
892 return sk;
893 }
894
895 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
896 int kern)
897 {
898 struct sock *sk;
899
900 BT_DBG("sock %p", sock);
901
902 sock->state = SS_UNCONNECTED;
903
904 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
905 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
906 return -ESOCKTNOSUPPORT;
907
908 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
909 return -EPERM;
910
911 sock->ops = &l2cap_sock_ops;
912
913 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
914 if (!sk)
915 return -ENOMEM;
916
917 l2cap_sock_init(sk, NULL);
918 return 0;
919 }
920
921 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
922 {
923 struct sock *sk = sock->sk;
924 struct sockaddr_l2 la;
925 int len, err = 0;
926
927 BT_DBG("sk %p", sk);
928
929 if (!addr || addr->sa_family != AF_BLUETOOTH)
930 return -EINVAL;
931
932 memset(&la, 0, sizeof(la));
933 len = min_t(unsigned int, sizeof(la), alen);
934 memcpy(&la, addr, len);
935
936 if (la.l2_cid)
937 return -EINVAL;
938
939 lock_sock(sk);
940
941 if (sk->sk_state != BT_OPEN) {
942 err = -EBADFD;
943 goto done;
944 }
945
946 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
947 !capable(CAP_NET_BIND_SERVICE)) {
948 err = -EACCES;
949 goto done;
950 }
951
952 write_lock_bh(&l2cap_sk_list.lock);
953
954 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
955 err = -EADDRINUSE;
956 } else {
957 /* Save source address */
958 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
959 l2cap_pi(sk)->psm = la.l2_psm;
960 l2cap_pi(sk)->sport = la.l2_psm;
961 sk->sk_state = BT_BOUND;
962
963 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
964 __le16_to_cpu(la.l2_psm) == 0x0003)
965 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
966 }
967
968 write_unlock_bh(&l2cap_sk_list.lock);
969
970 done:
971 release_sock(sk);
972 return err;
973 }
974
975 static int l2cap_do_connect(struct sock *sk)
976 {
977 bdaddr_t *src = &bt_sk(sk)->src;
978 bdaddr_t *dst = &bt_sk(sk)->dst;
979 struct l2cap_conn *conn;
980 struct hci_conn *hcon;
981 struct hci_dev *hdev;
982 __u8 auth_type;
983 int err;
984
985 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
986 l2cap_pi(sk)->psm);
987
988 hdev = hci_get_route(dst, src);
989 if (!hdev)
990 return -EHOSTUNREACH;
991
992 hci_dev_lock_bh(hdev);
993
994 err = -ENOMEM;
995
996 if (sk->sk_type == SOCK_RAW) {
997 switch (l2cap_pi(sk)->sec_level) {
998 case BT_SECURITY_HIGH:
999 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1000 break;
1001 case BT_SECURITY_MEDIUM:
1002 auth_type = HCI_AT_DEDICATED_BONDING;
1003 break;
1004 default:
1005 auth_type = HCI_AT_NO_BONDING;
1006 break;
1007 }
1008 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1009 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1010 auth_type = HCI_AT_NO_BONDING_MITM;
1011 else
1012 auth_type = HCI_AT_NO_BONDING;
1013
1014 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1015 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1016 } else {
1017 switch (l2cap_pi(sk)->sec_level) {
1018 case BT_SECURITY_HIGH:
1019 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1020 break;
1021 case BT_SECURITY_MEDIUM:
1022 auth_type = HCI_AT_GENERAL_BONDING;
1023 break;
1024 default:
1025 auth_type = HCI_AT_NO_BONDING;
1026 break;
1027 }
1028 }
1029
1030 hcon = hci_connect(hdev, ACL_LINK, dst,
1031 l2cap_pi(sk)->sec_level, auth_type);
1032 if (!hcon)
1033 goto done;
1034
1035 conn = l2cap_conn_add(hcon, 0);
1036 if (!conn) {
1037 hci_conn_put(hcon);
1038 goto done;
1039 }
1040
1041 err = 0;
1042
1043 /* Update source addr of the socket */
1044 bacpy(src, conn->src);
1045
1046 l2cap_chan_add(conn, sk, NULL);
1047
1048 sk->sk_state = BT_CONNECT;
1049 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1050
1051 if (hcon->state == BT_CONNECTED) {
1052 if (sk->sk_type != SOCK_SEQPACKET &&
1053 sk->sk_type != SOCK_STREAM) {
1054 l2cap_sock_clear_timer(sk);
1055 sk->sk_state = BT_CONNECTED;
1056 } else
1057 l2cap_do_start(sk);
1058 }
1059
1060 done:
1061 hci_dev_unlock_bh(hdev);
1062 hci_dev_put(hdev);
1063 return err;
1064 }
1065
1066 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1067 {
1068 struct sock *sk = sock->sk;
1069 struct sockaddr_l2 la;
1070 int len, err = 0;
1071
1072 BT_DBG("sk %p", sk);
1073
1074 if (!addr || alen < sizeof(addr->sa_family) ||
1075 addr->sa_family != AF_BLUETOOTH)
1076 return -EINVAL;
1077
1078 memset(&la, 0, sizeof(la));
1079 len = min_t(unsigned int, sizeof(la), alen);
1080 memcpy(&la, addr, len);
1081
1082 if (la.l2_cid)
1083 return -EINVAL;
1084
1085 lock_sock(sk);
1086
1087 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1088 && !la.l2_psm) {
1089 err = -EINVAL;
1090 goto done;
1091 }
1092
1093 switch (l2cap_pi(sk)->mode) {
1094 case L2CAP_MODE_BASIC:
1095 break;
1096 case L2CAP_MODE_ERTM:
1097 case L2CAP_MODE_STREAMING:
1098 if (enable_ertm)
1099 break;
1100 /* fall through */
1101 default:
1102 err = -ENOTSUPP;
1103 goto done;
1104 }
1105
1106 switch (sk->sk_state) {
1107 case BT_CONNECT:
1108 case BT_CONNECT2:
1109 case BT_CONFIG:
1110 /* Already connecting */
1111 goto wait;
1112
1113 case BT_CONNECTED:
1114 /* Already connected */
1115 goto done;
1116
1117 case BT_OPEN:
1118 case BT_BOUND:
1119 /* Can connect */
1120 break;
1121
1122 default:
1123 err = -EBADFD;
1124 goto done;
1125 }
1126
1127 /* Set destination address and psm */
1128 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1129 l2cap_pi(sk)->psm = la.l2_psm;
1130
1131 err = l2cap_do_connect(sk);
1132 if (err)
1133 goto done;
1134
1135 wait:
1136 err = bt_sock_wait_state(sk, BT_CONNECTED,
1137 sock_sndtimeo(sk, flags & O_NONBLOCK));
1138 done:
1139 release_sock(sk);
1140 return err;
1141 }
1142
1143 static int l2cap_sock_listen(struct socket *sock, int backlog)
1144 {
1145 struct sock *sk = sock->sk;
1146 int err = 0;
1147
1148 BT_DBG("sk %p backlog %d", sk, backlog);
1149
1150 lock_sock(sk);
1151
1152 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1153 || sk->sk_state != BT_BOUND) {
1154 err = -EBADFD;
1155 goto done;
1156 }
1157
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1160 break;
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1163 if (enable_ertm)
1164 break;
1165 /* fall through */
1166 default:
1167 err = -ENOTSUPP;
1168 goto done;
1169 }
1170
1171 if (!l2cap_pi(sk)->psm) {
1172 bdaddr_t *src = &bt_sk(sk)->src;
1173 u16 psm;
1174
1175 err = -EINVAL;
1176
1177 write_lock_bh(&l2cap_sk_list.lock);
1178
1179 for (psm = 0x1001; psm < 0x1100; psm += 2)
1180 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1181 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1182 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1183 err = 0;
1184 break;
1185 }
1186
1187 write_unlock_bh(&l2cap_sk_list.lock);
1188
1189 if (err < 0)
1190 goto done;
1191 }
1192
1193 sk->sk_max_ack_backlog = backlog;
1194 sk->sk_ack_backlog = 0;
1195 sk->sk_state = BT_LISTEN;
1196
1197 done:
1198 release_sock(sk);
1199 return err;
1200 }
1201
1202 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1203 {
1204 DECLARE_WAITQUEUE(wait, current);
1205 struct sock *sk = sock->sk, *nsk;
1206 long timeo;
1207 int err = 0;
1208
1209 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1210
1211 if (sk->sk_state != BT_LISTEN) {
1212 err = -EBADFD;
1213 goto done;
1214 }
1215
1216 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1217
1218 BT_DBG("sk %p timeo %ld", sk, timeo);
1219
1220 /* Wait for an incoming connection. (wake-one). */
1221 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1222 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1223 set_current_state(TASK_INTERRUPTIBLE);
1224 if (!timeo) {
1225 err = -EAGAIN;
1226 break;
1227 }
1228
1229 release_sock(sk);
1230 timeo = schedule_timeout(timeo);
1231 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1232
1233 if (sk->sk_state != BT_LISTEN) {
1234 err = -EBADFD;
1235 break;
1236 }
1237
1238 if (signal_pending(current)) {
1239 err = sock_intr_errno(timeo);
1240 break;
1241 }
1242 }
1243 set_current_state(TASK_RUNNING);
1244 remove_wait_queue(sk_sleep(sk), &wait);
1245
1246 if (err)
1247 goto done;
1248
1249 newsock->state = SS_CONNECTED;
1250
1251 BT_DBG("new socket %p", nsk);
1252
1253 done:
1254 release_sock(sk);
1255 return err;
1256 }
1257
1258 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1259 {
1260 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1261 struct sock *sk = sock->sk;
1262
1263 BT_DBG("sock %p, sk %p", sock, sk);
1264
1265 addr->sa_family = AF_BLUETOOTH;
1266 *len = sizeof(struct sockaddr_l2);
1267
1268 if (peer) {
1269 la->l2_psm = l2cap_pi(sk)->psm;
1270 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1271 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1272 } else {
1273 la->l2_psm = l2cap_pi(sk)->sport;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1276 }
1277
1278 return 0;
1279 }
1280
1281 static int __l2cap_wait_ack(struct sock *sk)
1282 {
1283 DECLARE_WAITQUEUE(wait, current);
1284 int err = 0;
1285 int timeo = HZ/5;
1286
1287 add_wait_queue(sk_sleep(sk), &wait);
1288 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1290
1291 if (!timeo)
1292 timeo = HZ/5;
1293
1294 if (signal_pending(current)) {
1295 err = sock_intr_errno(timeo);
1296 break;
1297 }
1298
1299 release_sock(sk);
1300 timeo = schedule_timeout(timeo);
1301 lock_sock(sk);
1302
1303 err = sock_error(sk);
1304 if (err)
1305 break;
1306 }
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1309 return err;
1310 }
1311
1312 static void l2cap_monitor_timeout(unsigned long arg)
1313 {
1314 struct sock *sk = (void *) arg;
1315
1316 bh_lock_sock(sk);
1317 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1318 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1319 bh_unlock_sock(sk);
1320 return;
1321 }
1322
1323 l2cap_pi(sk)->retry_count++;
1324 __mod_monitor_timer();
1325
1326 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1327 bh_unlock_sock(sk);
1328 }
1329
1330 static void l2cap_retrans_timeout(unsigned long arg)
1331 {
1332 struct sock *sk = (void *) arg;
1333
1334 bh_lock_sock(sk);
1335 l2cap_pi(sk)->retry_count = 1;
1336 __mod_monitor_timer();
1337
1338 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1339
1340 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1341 bh_unlock_sock(sk);
1342 }
1343
1344 static void l2cap_drop_acked_frames(struct sock *sk)
1345 {
1346 struct sk_buff *skb;
1347
1348 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1349 l2cap_pi(sk)->unacked_frames) {
1350 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1351 break;
1352
1353 skb = skb_dequeue(TX_QUEUE(sk));
1354 kfree_skb(skb);
1355
1356 l2cap_pi(sk)->unacked_frames--;
1357 }
1358
1359 if (!l2cap_pi(sk)->unacked_frames)
1360 del_timer(&l2cap_pi(sk)->retrans_timer);
1361 }
1362
1363 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1364 {
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1366
1367 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1368
1369 hci_send_acl(pi->conn->hcon, skb, 0);
1370 }
1371
1372 static int l2cap_streaming_send(struct sock *sk)
1373 {
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1376 u16 control, fcs;
1377
1378 while ((skb = sk->sk_send_head)) {
1379 tx_skb = skb_clone(skb, GFP_ATOMIC);
1380
1381 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1382 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1383 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1384
1385 if (pi->fcs == L2CAP_FCS_CRC16) {
1386 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1387 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1388 }
1389
1390 l2cap_do_send(sk, tx_skb);
1391
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1393
1394 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1395 sk->sk_send_head = NULL;
1396 else
1397 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1398
1399 skb = skb_dequeue(TX_QUEUE(sk));
1400 kfree_skb(skb);
1401 }
1402 return 0;
1403 }
1404
1405 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1406 {
1407 struct l2cap_pinfo *pi = l2cap_pi(sk);
1408 struct sk_buff *skb, *tx_skb;
1409 u16 control, fcs;
1410
1411 skb = skb_peek(TX_QUEUE(sk));
1412 if (!skb)
1413 return;
1414
1415 do {
1416 if (bt_cb(skb)->tx_seq == tx_seq)
1417 break;
1418
1419 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1420 return;
1421
1422 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1423
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1427 return;
1428 }
1429
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1431 bt_cb(skb)->retries++;
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1433
1434 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1435 control |= L2CAP_CTRL_FINAL;
1436 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1437 }
1438
1439 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1440 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1441
1442 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1443
1444 if (pi->fcs == L2CAP_FCS_CRC16) {
1445 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1446 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1447 }
1448
1449 l2cap_do_send(sk, tx_skb);
1450 }
1451
1452 static int l2cap_ertm_send(struct sock *sk)
1453 {
1454 struct sk_buff *skb, *tx_skb;
1455 struct l2cap_pinfo *pi = l2cap_pi(sk);
1456 u16 control, fcs;
1457 int nsent = 0;
1458
1459 if (sk->sk_state != BT_CONNECTED)
1460 return -ENOTCONN;
1461
1462 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1463
1464 if (pi->remote_max_tx &&
1465 bt_cb(skb)->retries == pi->remote_max_tx) {
1466 l2cap_send_disconn_req(pi->conn, sk);
1467 break;
1468 }
1469
1470 tx_skb = skb_clone(skb, GFP_ATOMIC);
1471
1472 bt_cb(skb)->retries++;
1473
1474 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1475 control &= L2CAP_CTRL_SAR;
1476
1477 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1478 control |= L2CAP_CTRL_FINAL;
1479 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1480 }
1481 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1482 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1483 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1484
1485
1486 if (pi->fcs == L2CAP_FCS_CRC16) {
1487 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1488 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1489 }
1490
1491 l2cap_do_send(sk, tx_skb);
1492
1493 __mod_retrans_timer();
1494
1495 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1496 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1497
1498 pi->unacked_frames++;
1499 pi->frames_sent++;
1500
1501 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1502 sk->sk_send_head = NULL;
1503 else
1504 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1505
1506 nsent++;
1507 }
1508
1509 return nsent;
1510 }
1511
1512 static int l2cap_retransmit_frames(struct sock *sk)
1513 {
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1515 int ret;
1516
1517 spin_lock_bh(&pi->send_lock);
1518
1519 if (!skb_queue_empty(TX_QUEUE(sk)))
1520 sk->sk_send_head = TX_QUEUE(sk)->next;
1521
1522 pi->next_tx_seq = pi->expected_ack_seq;
1523 ret = l2cap_ertm_send(sk);
1524
1525 spin_unlock_bh(&pi->send_lock);
1526
1527 return ret;
1528 }
1529
1530 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1531 {
1532 struct sock *sk = (struct sock *)pi;
1533 u16 control = 0;
1534 int nframes;
1535
1536 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1537
1538 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1539 control |= L2CAP_SUPER_RCV_NOT_READY;
1540 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1541 l2cap_send_sframe(pi, control);
1542 return;
1543 }
1544
1545 spin_lock_bh(&pi->send_lock);
1546 nframes = l2cap_ertm_send(sk);
1547 spin_unlock_bh(&pi->send_lock);
1548
1549 if (nframes > 0)
1550 return;
1551
1552 control |= L2CAP_SUPER_RCV_READY;
1553 l2cap_send_sframe(pi, control);
1554 }
1555
1556 static void l2cap_send_srejtail(struct sock *sk)
1557 {
1558 struct srej_list *tail;
1559 u16 control;
1560
1561 control = L2CAP_SUPER_SELECT_REJECT;
1562 control |= L2CAP_CTRL_FINAL;
1563
1564 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1565 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1566
1567 l2cap_send_sframe(l2cap_pi(sk), control);
1568 }
1569
1570 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1571 {
1572 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1573 struct sk_buff **frag;
1574 int err, sent = 0;
1575
1576 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1577 return -EFAULT;
1578
1579 sent += count;
1580 len -= count;
1581
1582 /* Continuation fragments (no L2CAP header) */
1583 frag = &skb_shinfo(skb)->frag_list;
1584 while (len) {
1585 count = min_t(unsigned int, conn->mtu, len);
1586
1587 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1588 if (!*frag)
1589 return -EFAULT;
1590 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1591 return -EFAULT;
1592
1593 sent += count;
1594 len -= count;
1595
1596 frag = &(*frag)->next;
1597 }
1598
1599 return sent;
1600 }
1601
1602 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1603 {
1604 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1605 struct sk_buff *skb;
1606 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1607 struct l2cap_hdr *lh;
1608
1609 BT_DBG("sk %p len %d", sk, (int)len);
1610
1611 count = min_t(unsigned int, (conn->mtu - hlen), len);
1612 skb = bt_skb_send_alloc(sk, count + hlen,
1613 msg->msg_flags & MSG_DONTWAIT, &err);
1614 if (!skb)
1615 return ERR_PTR(-ENOMEM);
1616
1617 /* Create L2CAP header */
1618 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1619 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1620 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1621 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1622
1623 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1624 if (unlikely(err < 0)) {
1625 kfree_skb(skb);
1626 return ERR_PTR(err);
1627 }
1628 return skb;
1629 }
1630
1631 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1632 {
1633 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1634 struct sk_buff *skb;
1635 int err, count, hlen = L2CAP_HDR_SIZE;
1636 struct l2cap_hdr *lh;
1637
1638 BT_DBG("sk %p len %d", sk, (int)len);
1639
1640 count = min_t(unsigned int, (conn->mtu - hlen), len);
1641 skb = bt_skb_send_alloc(sk, count + hlen,
1642 msg->msg_flags & MSG_DONTWAIT, &err);
1643 if (!skb)
1644 return ERR_PTR(-ENOMEM);
1645
1646 /* Create L2CAP header */
1647 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1648 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1649 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1650
1651 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1652 if (unlikely(err < 0)) {
1653 kfree_skb(skb);
1654 return ERR_PTR(err);
1655 }
1656 return skb;
1657 }
1658
1659 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1660 {
1661 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1662 struct sk_buff *skb;
1663 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1664 struct l2cap_hdr *lh;
1665
1666 BT_DBG("sk %p len %d", sk, (int)len);
1667
1668 if (!conn)
1669 return ERR_PTR(-ENOTCONN);
1670
1671 if (sdulen)
1672 hlen += 2;
1673
1674 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1675 hlen += 2;
1676
1677 count = min_t(unsigned int, (conn->mtu - hlen), len);
1678 skb = bt_skb_send_alloc(sk, count + hlen,
1679 msg->msg_flags & MSG_DONTWAIT, &err);
1680 if (!skb)
1681 return ERR_PTR(-ENOMEM);
1682
1683 /* Create L2CAP header */
1684 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1685 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1687 put_unaligned_le16(control, skb_put(skb, 2));
1688 if (sdulen)
1689 put_unaligned_le16(sdulen, skb_put(skb, 2));
1690
1691 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1692 if (unlikely(err < 0)) {
1693 kfree_skb(skb);
1694 return ERR_PTR(err);
1695 }
1696
1697 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1698 put_unaligned_le16(0, skb_put(skb, 2));
1699
1700 bt_cb(skb)->retries = 0;
1701 return skb;
1702 }
1703
1704 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1705 {
1706 struct l2cap_pinfo *pi = l2cap_pi(sk);
1707 struct sk_buff *skb;
1708 struct sk_buff_head sar_queue;
1709 u16 control;
1710 size_t size = 0;
1711
1712 skb_queue_head_init(&sar_queue);
1713 control = L2CAP_SDU_START;
1714 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1715 if (IS_ERR(skb))
1716 return PTR_ERR(skb);
1717
1718 __skb_queue_tail(&sar_queue, skb);
1719 len -= pi->remote_mps;
1720 size += pi->remote_mps;
1721
1722 while (len > 0) {
1723 size_t buflen;
1724
1725 if (len > pi->remote_mps) {
1726 control = L2CAP_SDU_CONTINUE;
1727 buflen = pi->remote_mps;
1728 } else {
1729 control = L2CAP_SDU_END;
1730 buflen = len;
1731 }
1732
1733 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1734 if (IS_ERR(skb)) {
1735 skb_queue_purge(&sar_queue);
1736 return PTR_ERR(skb);
1737 }
1738
1739 __skb_queue_tail(&sar_queue, skb);
1740 len -= buflen;
1741 size += buflen;
1742 }
1743 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1744 spin_lock_bh(&pi->send_lock);
1745 if (sk->sk_send_head == NULL)
1746 sk->sk_send_head = sar_queue.next;
1747 spin_unlock_bh(&pi->send_lock);
1748
1749 return size;
1750 }
1751
1752 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1753 {
1754 struct sock *sk = sock->sk;
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1757 u16 control;
1758 int err;
1759
1760 BT_DBG("sock %p, sk %p", sock, sk);
1761
1762 err = sock_error(sk);
1763 if (err)
1764 return err;
1765
1766 if (msg->msg_flags & MSG_OOB)
1767 return -EOPNOTSUPP;
1768
1769 lock_sock(sk);
1770
1771 if (sk->sk_state != BT_CONNECTED) {
1772 err = -ENOTCONN;
1773 goto done;
1774 }
1775
1776 /* Connectionless channel */
1777 if (sk->sk_type == SOCK_DGRAM) {
1778 skb = l2cap_create_connless_pdu(sk, msg, len);
1779 if (IS_ERR(skb)) {
1780 err = PTR_ERR(skb);
1781 } else {
1782 l2cap_do_send(sk, skb);
1783 err = len;
1784 }
1785 goto done;
1786 }
1787
1788 switch (pi->mode) {
1789 case L2CAP_MODE_BASIC:
1790 /* Check outgoing MTU */
1791 if (len > pi->omtu) {
1792 err = -EINVAL;
1793 goto done;
1794 }
1795
1796 /* Create a basic PDU */
1797 skb = l2cap_create_basic_pdu(sk, msg, len);
1798 if (IS_ERR(skb)) {
1799 err = PTR_ERR(skb);
1800 goto done;
1801 }
1802
1803 l2cap_do_send(sk, skb);
1804 err = len;
1805 break;
1806
1807 case L2CAP_MODE_ERTM:
1808 case L2CAP_MODE_STREAMING:
1809 /* Entire SDU fits into one PDU */
1810 if (len <= pi->remote_mps) {
1811 control = L2CAP_SDU_UNSEGMENTED;
1812 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1813 if (IS_ERR(skb)) {
1814 err = PTR_ERR(skb);
1815 goto done;
1816 }
1817 __skb_queue_tail(TX_QUEUE(sk), skb);
1818
1819 if (pi->mode == L2CAP_MODE_ERTM)
1820 spin_lock_bh(&pi->send_lock);
1821
1822 if (sk->sk_send_head == NULL)
1823 sk->sk_send_head = skb;
1824
1825 if (pi->mode == L2CAP_MODE_ERTM)
1826 spin_unlock_bh(&pi->send_lock);
1827 } else {
1828 /* Segment SDU into multiples PDUs */
1829 err = l2cap_sar_segment_sdu(sk, msg, len);
1830 if (err < 0)
1831 goto done;
1832 }
1833
1834 if (pi->mode == L2CAP_MODE_STREAMING) {
1835 err = l2cap_streaming_send(sk);
1836 } else {
1837 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1838 pi->conn_state && L2CAP_CONN_WAIT_F) {
1839 err = len;
1840 break;
1841 }
1842 spin_lock_bh(&pi->send_lock);
1843 err = l2cap_ertm_send(sk);
1844 spin_unlock_bh(&pi->send_lock);
1845 }
1846
1847 if (err >= 0)
1848 err = len;
1849 break;
1850
1851 default:
1852 BT_DBG("bad state %1.1x", pi->mode);
1853 err = -EINVAL;
1854 }
1855
1856 done:
1857 release_sock(sk);
1858 return err;
1859 }
1860
1861 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1862 {
1863 struct sock *sk = sock->sk;
1864
1865 lock_sock(sk);
1866
1867 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1868 struct l2cap_conn_rsp rsp;
1869
1870 sk->sk_state = BT_CONFIG;
1871
1872 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1873 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1874 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1875 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1876 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1877 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1878
1879 release_sock(sk);
1880 return 0;
1881 }
1882
1883 release_sock(sk);
1884
1885 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1886 }
1887
1888 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1889 {
1890 struct sock *sk = sock->sk;
1891 struct l2cap_options opts;
1892 int len, err = 0;
1893 u32 opt;
1894
1895 BT_DBG("sk %p", sk);
1896
1897 lock_sock(sk);
1898
1899 switch (optname) {
1900 case L2CAP_OPTIONS:
1901 opts.imtu = l2cap_pi(sk)->imtu;
1902 opts.omtu = l2cap_pi(sk)->omtu;
1903 opts.flush_to = l2cap_pi(sk)->flush_to;
1904 opts.mode = l2cap_pi(sk)->mode;
1905 opts.fcs = l2cap_pi(sk)->fcs;
1906 opts.max_tx = l2cap_pi(sk)->max_tx;
1907 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1908
1909 len = min_t(unsigned int, sizeof(opts), optlen);
1910 if (copy_from_user((char *) &opts, optval, len)) {
1911 err = -EFAULT;
1912 break;
1913 }
1914
1915 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1916 err = -EINVAL;
1917 break;
1918 }
1919
1920 l2cap_pi(sk)->mode = opts.mode;
1921 switch (l2cap_pi(sk)->mode) {
1922 case L2CAP_MODE_BASIC:
1923 break;
1924 case L2CAP_MODE_ERTM:
1925 case L2CAP_MODE_STREAMING:
1926 if (enable_ertm)
1927 break;
1928 /* fall through */
1929 default:
1930 err = -EINVAL;
1931 break;
1932 }
1933
1934 l2cap_pi(sk)->imtu = opts.imtu;
1935 l2cap_pi(sk)->omtu = opts.omtu;
1936 l2cap_pi(sk)->fcs = opts.fcs;
1937 l2cap_pi(sk)->max_tx = opts.max_tx;
1938 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1939 break;
1940
1941 case L2CAP_LM:
1942 if (get_user(opt, (u32 __user *) optval)) {
1943 err = -EFAULT;
1944 break;
1945 }
1946
1947 if (opt & L2CAP_LM_AUTH)
1948 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1949 if (opt & L2CAP_LM_ENCRYPT)
1950 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1951 if (opt & L2CAP_LM_SECURE)
1952 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1953
1954 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1955 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1956 break;
1957
1958 default:
1959 err = -ENOPROTOOPT;
1960 break;
1961 }
1962
1963 release_sock(sk);
1964 return err;
1965 }
1966
1967 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1968 {
1969 struct sock *sk = sock->sk;
1970 struct bt_security sec;
1971 int len, err = 0;
1972 u32 opt;
1973
1974 BT_DBG("sk %p", sk);
1975
1976 if (level == SOL_L2CAP)
1977 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1978
1979 if (level != SOL_BLUETOOTH)
1980 return -ENOPROTOOPT;
1981
1982 lock_sock(sk);
1983
1984 switch (optname) {
1985 case BT_SECURITY:
1986 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1987 && sk->sk_type != SOCK_RAW) {
1988 err = -EINVAL;
1989 break;
1990 }
1991
1992 sec.level = BT_SECURITY_LOW;
1993
1994 len = min_t(unsigned int, sizeof(sec), optlen);
1995 if (copy_from_user((char *) &sec, optval, len)) {
1996 err = -EFAULT;
1997 break;
1998 }
1999
2000 if (sec.level < BT_SECURITY_LOW ||
2001 sec.level > BT_SECURITY_HIGH) {
2002 err = -EINVAL;
2003 break;
2004 }
2005
2006 l2cap_pi(sk)->sec_level = sec.level;
2007 break;
2008
2009 case BT_DEFER_SETUP:
2010 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2011 err = -EINVAL;
2012 break;
2013 }
2014
2015 if (get_user(opt, (u32 __user *) optval)) {
2016 err = -EFAULT;
2017 break;
2018 }
2019
2020 bt_sk(sk)->defer_setup = opt;
2021 break;
2022
2023 default:
2024 err = -ENOPROTOOPT;
2025 break;
2026 }
2027
2028 release_sock(sk);
2029 return err;
2030 }
2031
2032 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2033 {
2034 struct sock *sk = sock->sk;
2035 struct l2cap_options opts;
2036 struct l2cap_conninfo cinfo;
2037 int len, err = 0;
2038 u32 opt;
2039
2040 BT_DBG("sk %p", sk);
2041
2042 if (get_user(len, optlen))
2043 return -EFAULT;
2044
2045 lock_sock(sk);
2046
2047 switch (optname) {
2048 case L2CAP_OPTIONS:
2049 opts.imtu = l2cap_pi(sk)->imtu;
2050 opts.omtu = l2cap_pi(sk)->omtu;
2051 opts.flush_to = l2cap_pi(sk)->flush_to;
2052 opts.mode = l2cap_pi(sk)->mode;
2053 opts.fcs = l2cap_pi(sk)->fcs;
2054 opts.max_tx = l2cap_pi(sk)->max_tx;
2055 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2056
2057 len = min_t(unsigned int, len, sizeof(opts));
2058 if (copy_to_user(optval, (char *) &opts, len))
2059 err = -EFAULT;
2060
2061 break;
2062
2063 case L2CAP_LM:
2064 switch (l2cap_pi(sk)->sec_level) {
2065 case BT_SECURITY_LOW:
2066 opt = L2CAP_LM_AUTH;
2067 break;
2068 case BT_SECURITY_MEDIUM:
2069 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2070 break;
2071 case BT_SECURITY_HIGH:
2072 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2073 L2CAP_LM_SECURE;
2074 break;
2075 default:
2076 opt = 0;
2077 break;
2078 }
2079
2080 if (l2cap_pi(sk)->role_switch)
2081 opt |= L2CAP_LM_MASTER;
2082
2083 if (l2cap_pi(sk)->force_reliable)
2084 opt |= L2CAP_LM_RELIABLE;
2085
2086 if (put_user(opt, (u32 __user *) optval))
2087 err = -EFAULT;
2088 break;
2089
2090 case L2CAP_CONNINFO:
2091 if (sk->sk_state != BT_CONNECTED &&
2092 !(sk->sk_state == BT_CONNECT2 &&
2093 bt_sk(sk)->defer_setup)) {
2094 err = -ENOTCONN;
2095 break;
2096 }
2097
2098 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2099 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2100
2101 len = min_t(unsigned int, len, sizeof(cinfo));
2102 if (copy_to_user(optval, (char *) &cinfo, len))
2103 err = -EFAULT;
2104
2105 break;
2106
2107 default:
2108 err = -ENOPROTOOPT;
2109 break;
2110 }
2111
2112 release_sock(sk);
2113 return err;
2114 }
2115
2116 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2117 {
2118 struct sock *sk = sock->sk;
2119 struct bt_security sec;
2120 int len, err = 0;
2121
2122 BT_DBG("sk %p", sk);
2123
2124 if (level == SOL_L2CAP)
2125 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2126
2127 if (level != SOL_BLUETOOTH)
2128 return -ENOPROTOOPT;
2129
2130 if (get_user(len, optlen))
2131 return -EFAULT;
2132
2133 lock_sock(sk);
2134
2135 switch (optname) {
2136 case BT_SECURITY:
2137 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2138 && sk->sk_type != SOCK_RAW) {
2139 err = -EINVAL;
2140 break;
2141 }
2142
2143 sec.level = l2cap_pi(sk)->sec_level;
2144
2145 len = min_t(unsigned int, len, sizeof(sec));
2146 if (copy_to_user(optval, (char *) &sec, len))
2147 err = -EFAULT;
2148
2149 break;
2150
2151 case BT_DEFER_SETUP:
2152 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2153 err = -EINVAL;
2154 break;
2155 }
2156
2157 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2158 err = -EFAULT;
2159
2160 break;
2161
2162 default:
2163 err = -ENOPROTOOPT;
2164 break;
2165 }
2166
2167 release_sock(sk);
2168 return err;
2169 }
2170
2171 static int l2cap_sock_shutdown(struct socket *sock, int how)
2172 {
2173 struct sock *sk = sock->sk;
2174 int err = 0;
2175
2176 BT_DBG("sock %p, sk %p", sock, sk);
2177
2178 if (!sk)
2179 return 0;
2180
2181 lock_sock(sk);
2182 if (!sk->sk_shutdown) {
2183 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2184 err = __l2cap_wait_ack(sk);
2185
2186 sk->sk_shutdown = SHUTDOWN_MASK;
2187 l2cap_sock_clear_timer(sk);
2188 __l2cap_sock_close(sk, 0);
2189
2190 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2191 err = bt_sock_wait_state(sk, BT_CLOSED,
2192 sk->sk_lingertime);
2193 }
2194 release_sock(sk);
2195 return err;
2196 }
2197
2198 static int l2cap_sock_release(struct socket *sock)
2199 {
2200 struct sock *sk = sock->sk;
2201 int err;
2202
2203 BT_DBG("sock %p, sk %p", sock, sk);
2204
2205 if (!sk)
2206 return 0;
2207
2208 err = l2cap_sock_shutdown(sock, 2);
2209
2210 sock_orphan(sk);
2211 l2cap_sock_kill(sk);
2212 return err;
2213 }
2214
2215 static void l2cap_chan_ready(struct sock *sk)
2216 {
2217 struct sock *parent = bt_sk(sk)->parent;
2218
2219 BT_DBG("sk %p, parent %p", sk, parent);
2220
2221 l2cap_pi(sk)->conf_state = 0;
2222 l2cap_sock_clear_timer(sk);
2223
2224 if (!parent) {
2225 /* Outgoing channel.
2226 * Wake up socket sleeping on connect.
2227 */
2228 sk->sk_state = BT_CONNECTED;
2229 sk->sk_state_change(sk);
2230 } else {
2231 /* Incoming channel.
2232 * Wake up socket sleeping on accept.
2233 */
2234 parent->sk_data_ready(parent, 0);
2235 }
2236 }
2237
2238 /* Copy frame to all raw sockets on that connection */
2239 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2240 {
2241 struct l2cap_chan_list *l = &conn->chan_list;
2242 struct sk_buff *nskb;
2243 struct sock *sk;
2244
2245 BT_DBG("conn %p", conn);
2246
2247 read_lock(&l->lock);
2248 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2249 if (sk->sk_type != SOCK_RAW)
2250 continue;
2251
2252 /* Don't send frame to the socket it came from */
2253 if (skb->sk == sk)
2254 continue;
2255 nskb = skb_clone(skb, GFP_ATOMIC);
2256 if (!nskb)
2257 continue;
2258
2259 if (sock_queue_rcv_skb(sk, nskb))
2260 kfree_skb(nskb);
2261 }
2262 read_unlock(&l->lock);
2263 }
2264
2265 /* ---- L2CAP signalling commands ---- */
2266 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2267 u8 code, u8 ident, u16 dlen, void *data)
2268 {
2269 struct sk_buff *skb, **frag;
2270 struct l2cap_cmd_hdr *cmd;
2271 struct l2cap_hdr *lh;
2272 int len, count;
2273
2274 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2275 conn, code, ident, dlen);
2276
2277 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2278 count = min_t(unsigned int, conn->mtu, len);
2279
2280 skb = bt_skb_alloc(count, GFP_ATOMIC);
2281 if (!skb)
2282 return NULL;
2283
2284 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2285 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2286 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2287
2288 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2289 cmd->code = code;
2290 cmd->ident = ident;
2291 cmd->len = cpu_to_le16(dlen);
2292
2293 if (dlen) {
2294 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2295 memcpy(skb_put(skb, count), data, count);
2296 data += count;
2297 }
2298
2299 len -= skb->len;
2300
2301 /* Continuation fragments (no L2CAP header) */
2302 frag = &skb_shinfo(skb)->frag_list;
2303 while (len) {
2304 count = min_t(unsigned int, conn->mtu, len);
2305
2306 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2307 if (!*frag)
2308 goto fail;
2309
2310 memcpy(skb_put(*frag, count), data, count);
2311
2312 len -= count;
2313 data += count;
2314
2315 frag = &(*frag)->next;
2316 }
2317
2318 return skb;
2319
2320 fail:
2321 kfree_skb(skb);
2322 return NULL;
2323 }
2324
2325 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2326 {
2327 struct l2cap_conf_opt *opt = *ptr;
2328 int len;
2329
2330 len = L2CAP_CONF_OPT_SIZE + opt->len;
2331 *ptr += len;
2332
2333 *type = opt->type;
2334 *olen = opt->len;
2335
2336 switch (opt->len) {
2337 case 1:
2338 *val = *((u8 *) opt->val);
2339 break;
2340
2341 case 2:
2342 *val = __le16_to_cpu(*((__le16 *) opt->val));
2343 break;
2344
2345 case 4:
2346 *val = __le32_to_cpu(*((__le32 *) opt->val));
2347 break;
2348
2349 default:
2350 *val = (unsigned long) opt->val;
2351 break;
2352 }
2353
2354 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2355 return len;
2356 }
2357
2358 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2359 {
2360 struct l2cap_conf_opt *opt = *ptr;
2361
2362 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2363
2364 opt->type = type;
2365 opt->len = len;
2366
2367 switch (len) {
2368 case 1:
2369 *((u8 *) opt->val) = val;
2370 break;
2371
2372 case 2:
2373 *((__le16 *) opt->val) = cpu_to_le16(val);
2374 break;
2375
2376 case 4:
2377 *((__le32 *) opt->val) = cpu_to_le32(val);
2378 break;
2379
2380 default:
2381 memcpy(opt->val, (void *) val, len);
2382 break;
2383 }
2384
2385 *ptr += L2CAP_CONF_OPT_SIZE + len;
2386 }
2387
2388 static void l2cap_ack_timeout(unsigned long arg)
2389 {
2390 struct sock *sk = (void *) arg;
2391
2392 bh_lock_sock(sk);
2393 l2cap_send_ack(l2cap_pi(sk));
2394 bh_unlock_sock(sk);
2395 }
2396
2397 static inline void l2cap_ertm_init(struct sock *sk)
2398 {
2399 l2cap_pi(sk)->expected_ack_seq = 0;
2400 l2cap_pi(sk)->unacked_frames = 0;
2401 l2cap_pi(sk)->buffer_seq = 0;
2402 l2cap_pi(sk)->num_acked = 0;
2403 l2cap_pi(sk)->frames_sent = 0;
2404
2405 setup_timer(&l2cap_pi(sk)->retrans_timer,
2406 l2cap_retrans_timeout, (unsigned long) sk);
2407 setup_timer(&l2cap_pi(sk)->monitor_timer,
2408 l2cap_monitor_timeout, (unsigned long) sk);
2409 setup_timer(&l2cap_pi(sk)->ack_timer,
2410 l2cap_ack_timeout, (unsigned long) sk);
2411
2412 __skb_queue_head_init(SREJ_QUEUE(sk));
2413 __skb_queue_head_init(BUSY_QUEUE(sk));
2414 spin_lock_init(&l2cap_pi(sk)->send_lock);
2415
2416 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2417 }
2418
2419 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2420 {
2421 u32 local_feat_mask = l2cap_feat_mask;
2422 if (enable_ertm)
2423 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2424
2425 switch (mode) {
2426 case L2CAP_MODE_ERTM:
2427 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2428 case L2CAP_MODE_STREAMING:
2429 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2430 default:
2431 return 0x00;
2432 }
2433 }
2434
2435 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2436 {
2437 switch (mode) {
2438 case L2CAP_MODE_STREAMING:
2439 case L2CAP_MODE_ERTM:
2440 if (l2cap_mode_supported(mode, remote_feat_mask))
2441 return mode;
2442 /* fall through */
2443 default:
2444 return L2CAP_MODE_BASIC;
2445 }
2446 }
2447
2448 static int l2cap_build_conf_req(struct sock *sk, void *data)
2449 {
2450 struct l2cap_pinfo *pi = l2cap_pi(sk);
2451 struct l2cap_conf_req *req = data;
2452 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2453 void *ptr = req->data;
2454
2455 BT_DBG("sk %p", sk);
2456
2457 if (pi->num_conf_req || pi->num_conf_rsp)
2458 goto done;
2459
2460 switch (pi->mode) {
2461 case L2CAP_MODE_STREAMING:
2462 case L2CAP_MODE_ERTM:
2463 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2464 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2465 l2cap_send_disconn_req(pi->conn, sk);
2466 break;
2467 default:
2468 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2469 break;
2470 }
2471
2472 done:
2473 switch (pi->mode) {
2474 case L2CAP_MODE_BASIC:
2475 if (pi->imtu != L2CAP_DEFAULT_MTU)
2476 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2477 break;
2478
2479 case L2CAP_MODE_ERTM:
2480 rfc.mode = L2CAP_MODE_ERTM;
2481 rfc.txwin_size = pi->tx_win;
2482 rfc.max_transmit = pi->max_tx;
2483 rfc.retrans_timeout = 0;
2484 rfc.monitor_timeout = 0;
2485 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2486 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2487 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2488
2489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2490 sizeof(rfc), (unsigned long) &rfc);
2491
2492 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2493 break;
2494
2495 if (pi->fcs == L2CAP_FCS_NONE ||
2496 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2497 pi->fcs = L2CAP_FCS_NONE;
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2499 }
2500 break;
2501
2502 case L2CAP_MODE_STREAMING:
2503 rfc.mode = L2CAP_MODE_STREAMING;
2504 rfc.txwin_size = 0;
2505 rfc.max_transmit = 0;
2506 rfc.retrans_timeout = 0;
2507 rfc.monitor_timeout = 0;
2508 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2509 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2510 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2511
2512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2513 sizeof(rfc), (unsigned long) &rfc);
2514
2515 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2516 break;
2517
2518 if (pi->fcs == L2CAP_FCS_NONE ||
2519 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2520 pi->fcs = L2CAP_FCS_NONE;
2521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2522 }
2523 break;
2524 }
2525
2526 /* FIXME: Need actual value of the flush timeout */
2527 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2528 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2529
2530 req->dcid = cpu_to_le16(pi->dcid);
2531 req->flags = cpu_to_le16(0);
2532
2533 return ptr - data;
2534 }
2535
2536 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2537 {
2538 struct l2cap_pinfo *pi = l2cap_pi(sk);
2539 struct l2cap_conf_rsp *rsp = data;
2540 void *ptr = rsp->data;
2541 void *req = pi->conf_req;
2542 int len = pi->conf_len;
2543 int type, hint, olen;
2544 unsigned long val;
2545 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2546 u16 mtu = L2CAP_DEFAULT_MTU;
2547 u16 result = L2CAP_CONF_SUCCESS;
2548
2549 BT_DBG("sk %p", sk);
2550
2551 while (len >= L2CAP_CONF_OPT_SIZE) {
2552 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2553
2554 hint = type & L2CAP_CONF_HINT;
2555 type &= L2CAP_CONF_MASK;
2556
2557 switch (type) {
2558 case L2CAP_CONF_MTU:
2559 mtu = val;
2560 break;
2561
2562 case L2CAP_CONF_FLUSH_TO:
2563 pi->flush_to = val;
2564 break;
2565
2566 case L2CAP_CONF_QOS:
2567 break;
2568
2569 case L2CAP_CONF_RFC:
2570 if (olen == sizeof(rfc))
2571 memcpy(&rfc, (void *) val, olen);
2572 break;
2573
2574 case L2CAP_CONF_FCS:
2575 if (val == L2CAP_FCS_NONE)
2576 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2577
2578 break;
2579
2580 default:
2581 if (hint)
2582 break;
2583
2584 result = L2CAP_CONF_UNKNOWN;
2585 *((u8 *) ptr++) = type;
2586 break;
2587 }
2588 }
2589
2590 if (pi->num_conf_rsp || pi->num_conf_req)
2591 goto done;
2592
2593 switch (pi->mode) {
2594 case L2CAP_MODE_STREAMING:
2595 case L2CAP_MODE_ERTM:
2596 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2597 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2598 return -ECONNREFUSED;
2599 break;
2600 default:
2601 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2602 break;
2603 }
2604
2605 done:
2606 if (pi->mode != rfc.mode) {
2607 result = L2CAP_CONF_UNACCEPT;
2608 rfc.mode = pi->mode;
2609
2610 if (pi->num_conf_rsp == 1)
2611 return -ECONNREFUSED;
2612
2613 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2614 sizeof(rfc), (unsigned long) &rfc);
2615 }
2616
2617
2618 if (result == L2CAP_CONF_SUCCESS) {
2619 /* Configure output options and let the other side know
2620 * which ones we don't like. */
2621
2622 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2623 result = L2CAP_CONF_UNACCEPT;
2624 else {
2625 pi->omtu = mtu;
2626 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2627 }
2628 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2629
2630 switch (rfc.mode) {
2631 case L2CAP_MODE_BASIC:
2632 pi->fcs = L2CAP_FCS_NONE;
2633 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2634 break;
2635
2636 case L2CAP_MODE_ERTM:
2637 pi->remote_tx_win = rfc.txwin_size;
2638 pi->remote_max_tx = rfc.max_transmit;
2639 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2640 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2641
2642 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2643
2644 rfc.retrans_timeout =
2645 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2646 rfc.monitor_timeout =
2647 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2648
2649 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2650
2651 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2652 sizeof(rfc), (unsigned long) &rfc);
2653
2654 break;
2655
2656 case L2CAP_MODE_STREAMING:
2657 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2658 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2659
2660 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2661
2662 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2663
2664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2665 sizeof(rfc), (unsigned long) &rfc);
2666
2667 break;
2668
2669 default:
2670 result = L2CAP_CONF_UNACCEPT;
2671
2672 memset(&rfc, 0, sizeof(rfc));
2673 rfc.mode = pi->mode;
2674 }
2675
2676 if (result == L2CAP_CONF_SUCCESS)
2677 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2678 }
2679 rsp->scid = cpu_to_le16(pi->dcid);
2680 rsp->result = cpu_to_le16(result);
2681 rsp->flags = cpu_to_le16(0x0000);
2682
2683 return ptr - data;
2684 }
2685
2686 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2687 {
2688 struct l2cap_pinfo *pi = l2cap_pi(sk);
2689 struct l2cap_conf_req *req = data;
2690 void *ptr = req->data;
2691 int type, olen;
2692 unsigned long val;
2693 struct l2cap_conf_rfc rfc;
2694
2695 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2696
2697 while (len >= L2CAP_CONF_OPT_SIZE) {
2698 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2699
2700 switch (type) {
2701 case L2CAP_CONF_MTU:
2702 if (val < L2CAP_DEFAULT_MIN_MTU) {
2703 *result = L2CAP_CONF_UNACCEPT;
2704 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2705 } else
2706 pi->omtu = val;
2707 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2708 break;
2709
2710 case L2CAP_CONF_FLUSH_TO:
2711 pi->flush_to = val;
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2713 2, pi->flush_to);
2714 break;
2715
2716 case L2CAP_CONF_RFC:
2717 if (olen == sizeof(rfc))
2718 memcpy(&rfc, (void *)val, olen);
2719
2720 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2721 rfc.mode != pi->mode)
2722 return -ECONNREFUSED;
2723
2724 pi->mode = rfc.mode;
2725 pi->fcs = 0;
2726
2727 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2728 sizeof(rfc), (unsigned long) &rfc);
2729 break;
2730 }
2731 }
2732
2733 if (*result == L2CAP_CONF_SUCCESS) {
2734 switch (rfc.mode) {
2735 case L2CAP_MODE_ERTM:
2736 pi->remote_tx_win = rfc.txwin_size;
2737 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2738 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2739 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2740 break;
2741 case L2CAP_MODE_STREAMING:
2742 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2743 }
2744 }
2745
2746 req->dcid = cpu_to_le16(pi->dcid);
2747 req->flags = cpu_to_le16(0x0000);
2748
2749 return ptr - data;
2750 }
2751
2752 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2753 {
2754 struct l2cap_conf_rsp *rsp = data;
2755 void *ptr = rsp->data;
2756
2757 BT_DBG("sk %p", sk);
2758
2759 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2760 rsp->result = cpu_to_le16(result);
2761 rsp->flags = cpu_to_le16(flags);
2762
2763 return ptr - data;
2764 }
2765
2766 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2767 {
2768 struct l2cap_pinfo *pi = l2cap_pi(sk);
2769 int type, olen;
2770 unsigned long val;
2771 struct l2cap_conf_rfc rfc;
2772
2773 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2774
2775 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2776 return;
2777
2778 while (len >= L2CAP_CONF_OPT_SIZE) {
2779 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2780
2781 switch (type) {
2782 case L2CAP_CONF_RFC:
2783 if (olen == sizeof(rfc))
2784 memcpy(&rfc, (void *)val, olen);
2785 goto done;
2786 }
2787 }
2788
2789 done:
2790 switch (rfc.mode) {
2791 case L2CAP_MODE_ERTM:
2792 pi->remote_tx_win = rfc.txwin_size;
2793 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2794 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2795 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2796 break;
2797 case L2CAP_MODE_STREAMING:
2798 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2799 }
2800 }
2801
2802 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2803 {
2804 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2805
2806 if (rej->reason != 0x0000)
2807 return 0;
2808
2809 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2810 cmd->ident == conn->info_ident) {
2811 del_timer(&conn->info_timer);
2812
2813 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2814 conn->info_ident = 0;
2815
2816 l2cap_conn_start(conn);
2817 }
2818
2819 return 0;
2820 }
2821
2822 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2823 {
2824 struct l2cap_chan_list *list = &conn->chan_list;
2825 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2826 struct l2cap_conn_rsp rsp;
2827 struct sock *sk, *parent;
2828 int result, status = L2CAP_CS_NO_INFO;
2829
2830 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2831 __le16 psm = req->psm;
2832
2833 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2834
2835 /* Check if we have socket listening on psm */
2836 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2837 if (!parent) {
2838 result = L2CAP_CR_BAD_PSM;
2839 goto sendresp;
2840 }
2841
2842 /* Check if the ACL is secure enough (if not SDP) */
2843 if (psm != cpu_to_le16(0x0001) &&
2844 !hci_conn_check_link_mode(conn->hcon)) {
2845 conn->disc_reason = 0x05;
2846 result = L2CAP_CR_SEC_BLOCK;
2847 goto response;
2848 }
2849
2850 result = L2CAP_CR_NO_MEM;
2851
2852 /* Check for backlog size */
2853 if (sk_acceptq_is_full(parent)) {
2854 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2855 goto response;
2856 }
2857
2858 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2859 if (!sk)
2860 goto response;
2861
2862 write_lock_bh(&list->lock);
2863
2864 /* Check if we already have channel with that dcid */
2865 if (__l2cap_get_chan_by_dcid(list, scid)) {
2866 write_unlock_bh(&list->lock);
2867 sock_set_flag(sk, SOCK_ZAPPED);
2868 l2cap_sock_kill(sk);
2869 goto response;
2870 }
2871
2872 hci_conn_hold(conn->hcon);
2873
2874 l2cap_sock_init(sk, parent);
2875 bacpy(&bt_sk(sk)->src, conn->src);
2876 bacpy(&bt_sk(sk)->dst, conn->dst);
2877 l2cap_pi(sk)->psm = psm;
2878 l2cap_pi(sk)->dcid = scid;
2879
2880 __l2cap_chan_add(conn, sk, parent);
2881 dcid = l2cap_pi(sk)->scid;
2882
2883 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2884
2885 l2cap_pi(sk)->ident = cmd->ident;
2886
2887 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2888 if (l2cap_check_security(sk)) {
2889 if (bt_sk(sk)->defer_setup) {
2890 sk->sk_state = BT_CONNECT2;
2891 result = L2CAP_CR_PEND;
2892 status = L2CAP_CS_AUTHOR_PEND;
2893 parent->sk_data_ready(parent, 0);
2894 } else {
2895 sk->sk_state = BT_CONFIG;
2896 result = L2CAP_CR_SUCCESS;
2897 status = L2CAP_CS_NO_INFO;
2898 }
2899 } else {
2900 sk->sk_state = BT_CONNECT2;
2901 result = L2CAP_CR_PEND;
2902 status = L2CAP_CS_AUTHEN_PEND;
2903 }
2904 } else {
2905 sk->sk_state = BT_CONNECT2;
2906 result = L2CAP_CR_PEND;
2907 status = L2CAP_CS_NO_INFO;
2908 }
2909
2910 write_unlock_bh(&list->lock);
2911
2912 response:
2913 bh_unlock_sock(parent);
2914
2915 sendresp:
2916 rsp.scid = cpu_to_le16(scid);
2917 rsp.dcid = cpu_to_le16(dcid);
2918 rsp.result = cpu_to_le16(result);
2919 rsp.status = cpu_to_le16(status);
2920 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2921
2922 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2923 struct l2cap_info_req info;
2924 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2925
2926 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2927 conn->info_ident = l2cap_get_ident(conn);
2928
2929 mod_timer(&conn->info_timer, jiffies +
2930 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2931
2932 l2cap_send_cmd(conn, conn->info_ident,
2933 L2CAP_INFO_REQ, sizeof(info), &info);
2934 }
2935
2936 return 0;
2937 }
2938
2939 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2940 {
2941 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2942 u16 scid, dcid, result, status;
2943 struct sock *sk;
2944 u8 req[128];
2945
2946 scid = __le16_to_cpu(rsp->scid);
2947 dcid = __le16_to_cpu(rsp->dcid);
2948 result = __le16_to_cpu(rsp->result);
2949 status = __le16_to_cpu(rsp->status);
2950
2951 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2952
2953 if (scid) {
2954 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2955 if (!sk)
2956 return 0;
2957 } else {
2958 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2959 if (!sk)
2960 return 0;
2961 }
2962
2963 switch (result) {
2964 case L2CAP_CR_SUCCESS:
2965 sk->sk_state = BT_CONFIG;
2966 l2cap_pi(sk)->ident = 0;
2967 l2cap_pi(sk)->dcid = dcid;
2968 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2969 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2970
2971 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2972 l2cap_build_conf_req(sk, req), req);
2973 l2cap_pi(sk)->num_conf_req++;
2974 break;
2975
2976 case L2CAP_CR_PEND:
2977 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2978 break;
2979
2980 default:
2981 l2cap_chan_del(sk, ECONNREFUSED);
2982 break;
2983 }
2984
2985 bh_unlock_sock(sk);
2986 return 0;
2987 }
2988
2989 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2990 {
2991 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2992 u16 dcid, flags;
2993 u8 rsp[64];
2994 struct sock *sk;
2995 int len;
2996
2997 dcid = __le16_to_cpu(req->dcid);
2998 flags = __le16_to_cpu(req->flags);
2999
3000 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3001
3002 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3003 if (!sk)
3004 return -ENOENT;
3005
3006 if (sk->sk_state == BT_DISCONN)
3007 goto unlock;
3008
3009 /* Reject if config buffer is too small. */
3010 len = cmd_len - sizeof(*req);
3011 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3012 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3013 l2cap_build_conf_rsp(sk, rsp,
3014 L2CAP_CONF_REJECT, flags), rsp);
3015 goto unlock;
3016 }
3017
3018 /* Store config. */
3019 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3020 l2cap_pi(sk)->conf_len += len;
3021
3022 if (flags & 0x0001) {
3023 /* Incomplete config. Send empty response. */
3024 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3025 l2cap_build_conf_rsp(sk, rsp,
3026 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3027 goto unlock;
3028 }
3029
3030 /* Complete config. */
3031 len = l2cap_parse_conf_req(sk, rsp);
3032 if (len < 0) {
3033 l2cap_send_disconn_req(conn, sk);
3034 goto unlock;
3035 }
3036
3037 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3038 l2cap_pi(sk)->num_conf_rsp++;
3039
3040 /* Reset config buffer. */
3041 l2cap_pi(sk)->conf_len = 0;
3042
3043 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3044 goto unlock;
3045
3046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3047 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3048 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3049 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3050
3051 sk->sk_state = BT_CONNECTED;
3052
3053 l2cap_pi(sk)->next_tx_seq = 0;
3054 l2cap_pi(sk)->expected_tx_seq = 0;
3055 __skb_queue_head_init(TX_QUEUE(sk));
3056 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3057 l2cap_ertm_init(sk);
3058
3059 l2cap_chan_ready(sk);
3060 goto unlock;
3061 }
3062
3063 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3064 u8 buf[64];
3065 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3066 l2cap_build_conf_req(sk, buf), buf);
3067 l2cap_pi(sk)->num_conf_req++;
3068 }
3069
3070 unlock:
3071 bh_unlock_sock(sk);
3072 return 0;
3073 }
3074
3075 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3076 {
3077 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3078 u16 scid, flags, result;
3079 struct sock *sk;
3080 int len = cmd->len - sizeof(*rsp);
3081
3082 scid = __le16_to_cpu(rsp->scid);
3083 flags = __le16_to_cpu(rsp->flags);
3084 result = __le16_to_cpu(rsp->result);
3085
3086 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3087 scid, flags, result);
3088
3089 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3090 if (!sk)
3091 return 0;
3092
3093 switch (result) {
3094 case L2CAP_CONF_SUCCESS:
3095 l2cap_conf_rfc_get(sk, rsp->data, len);
3096 break;
3097
3098 case L2CAP_CONF_UNACCEPT:
3099 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3100 char req[64];
3101
3102 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3103 l2cap_send_disconn_req(conn, sk);
3104 goto done;
3105 }
3106
3107 /* throw out any old stored conf requests */
3108 result = L2CAP_CONF_SUCCESS;
3109 len = l2cap_parse_conf_rsp(sk, rsp->data,
3110 len, req, &result);
3111 if (len < 0) {
3112 l2cap_send_disconn_req(conn, sk);
3113 goto done;
3114 }
3115
3116 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3117 L2CAP_CONF_REQ, len, req);
3118 l2cap_pi(sk)->num_conf_req++;
3119 if (result != L2CAP_CONF_SUCCESS)
3120 goto done;
3121 break;
3122 }
3123
3124 default:
3125 sk->sk_err = ECONNRESET;
3126 l2cap_sock_set_timer(sk, HZ * 5);
3127 l2cap_send_disconn_req(conn, sk);
3128 goto done;
3129 }
3130
3131 if (flags & 0x01)
3132 goto done;
3133
3134 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3135
3136 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3137 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3138 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3139 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3140
3141 sk->sk_state = BT_CONNECTED;
3142 l2cap_pi(sk)->next_tx_seq = 0;
3143 l2cap_pi(sk)->expected_tx_seq = 0;
3144 __skb_queue_head_init(TX_QUEUE(sk));
3145 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3146 l2cap_ertm_init(sk);
3147
3148 l2cap_chan_ready(sk);
3149 }
3150
3151 done:
3152 bh_unlock_sock(sk);
3153 return 0;
3154 }
3155
3156 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3157 {
3158 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3159 struct l2cap_disconn_rsp rsp;
3160 u16 dcid, scid;
3161 struct sock *sk;
3162
3163 scid = __le16_to_cpu(req->scid);
3164 dcid = __le16_to_cpu(req->dcid);
3165
3166 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3167
3168 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3169 if (!sk)
3170 return 0;
3171
3172 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3173 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3174 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3175
3176 sk->sk_shutdown = SHUTDOWN_MASK;
3177
3178 l2cap_chan_del(sk, ECONNRESET);
3179 bh_unlock_sock(sk);
3180
3181 l2cap_sock_kill(sk);
3182 return 0;
3183 }
3184
3185 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3186 {
3187 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3188 u16 dcid, scid;
3189 struct sock *sk;
3190
3191 scid = __le16_to_cpu(rsp->scid);
3192 dcid = __le16_to_cpu(rsp->dcid);
3193
3194 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3195
3196 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3197 if (!sk)
3198 return 0;
3199
3200 l2cap_chan_del(sk, 0);
3201 bh_unlock_sock(sk);
3202
3203 l2cap_sock_kill(sk);
3204 return 0;
3205 }
3206
3207 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3208 {
3209 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3210 u16 type;
3211
3212 type = __le16_to_cpu(req->type);
3213
3214 BT_DBG("type 0x%4.4x", type);
3215
3216 if (type == L2CAP_IT_FEAT_MASK) {
3217 u8 buf[8];
3218 u32 feat_mask = l2cap_feat_mask;
3219 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3220 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3221 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3222 if (enable_ertm)
3223 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3224 | L2CAP_FEAT_FCS;
3225 put_unaligned_le32(feat_mask, rsp->data);
3226 l2cap_send_cmd(conn, cmd->ident,
3227 L2CAP_INFO_RSP, sizeof(buf), buf);
3228 } else if (type == L2CAP_IT_FIXED_CHAN) {
3229 u8 buf[12];
3230 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3231 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3232 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3233 memcpy(buf + 4, l2cap_fixed_chan, 8);
3234 l2cap_send_cmd(conn, cmd->ident,
3235 L2CAP_INFO_RSP, sizeof(buf), buf);
3236 } else {
3237 struct l2cap_info_rsp rsp;
3238 rsp.type = cpu_to_le16(type);
3239 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3240 l2cap_send_cmd(conn, cmd->ident,
3241 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3242 }
3243
3244 return 0;
3245 }
3246
3247 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3248 {
3249 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3250 u16 type, result;
3251
3252 type = __le16_to_cpu(rsp->type);
3253 result = __le16_to_cpu(rsp->result);
3254
3255 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3256
3257 del_timer(&conn->info_timer);
3258
3259 if (type == L2CAP_IT_FEAT_MASK) {
3260 conn->feat_mask = get_unaligned_le32(rsp->data);
3261
3262 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3263 struct l2cap_info_req req;
3264 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3265
3266 conn->info_ident = l2cap_get_ident(conn);
3267
3268 l2cap_send_cmd(conn, conn->info_ident,
3269 L2CAP_INFO_REQ, sizeof(req), &req);
3270 } else {
3271 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3272 conn->info_ident = 0;
3273
3274 l2cap_conn_start(conn);
3275 }
3276 } else if (type == L2CAP_IT_FIXED_CHAN) {
3277 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3278 conn->info_ident = 0;
3279
3280 l2cap_conn_start(conn);
3281 }
3282
3283 return 0;
3284 }
3285
3286 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3287 {
3288 u8 *data = skb->data;
3289 int len = skb->len;
3290 struct l2cap_cmd_hdr cmd;
3291 int err = 0;
3292
3293 l2cap_raw_recv(conn, skb);
3294
3295 while (len >= L2CAP_CMD_HDR_SIZE) {
3296 u16 cmd_len;
3297 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3298 data += L2CAP_CMD_HDR_SIZE;
3299 len -= L2CAP_CMD_HDR_SIZE;
3300
3301 cmd_len = le16_to_cpu(cmd.len);
3302
3303 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3304
3305 if (cmd_len > len || !cmd.ident) {
3306 BT_DBG("corrupted command");
3307 break;
3308 }
3309
3310 switch (cmd.code) {
3311 case L2CAP_COMMAND_REJ:
3312 l2cap_command_rej(conn, &cmd, data);
3313 break;
3314
3315 case L2CAP_CONN_REQ:
3316 err = l2cap_connect_req(conn, &cmd, data);
3317 break;
3318
3319 case L2CAP_CONN_RSP:
3320 err = l2cap_connect_rsp(conn, &cmd, data);
3321 break;
3322
3323 case L2CAP_CONF_REQ:
3324 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3325 break;
3326
3327 case L2CAP_CONF_RSP:
3328 err = l2cap_config_rsp(conn, &cmd, data);
3329 break;
3330
3331 case L2CAP_DISCONN_REQ:
3332 err = l2cap_disconnect_req(conn, &cmd, data);
3333 break;
3334
3335 case L2CAP_DISCONN_RSP:
3336 err = l2cap_disconnect_rsp(conn, &cmd, data);
3337 break;
3338
3339 case L2CAP_ECHO_REQ:
3340 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3341 break;
3342
3343 case L2CAP_ECHO_RSP:
3344 break;
3345
3346 case L2CAP_INFO_REQ:
3347 err = l2cap_information_req(conn, &cmd, data);
3348 break;
3349
3350 case L2CAP_INFO_RSP:
3351 err = l2cap_information_rsp(conn, &cmd, data);
3352 break;
3353
3354 default:
3355 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3356 err = -EINVAL;
3357 break;
3358 }
3359
3360 if (err) {
3361 struct l2cap_cmd_rej rej;
3362 BT_DBG("error %d", err);
3363
3364 /* FIXME: Map err to a valid reason */
3365 rej.reason = cpu_to_le16(0);
3366 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3367 }
3368
3369 data += cmd_len;
3370 len -= cmd_len;
3371 }
3372
3373 kfree_skb(skb);
3374 }
3375
3376 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3377 {
3378 u16 our_fcs, rcv_fcs;
3379 int hdr_size = L2CAP_HDR_SIZE + 2;
3380
3381 if (pi->fcs == L2CAP_FCS_CRC16) {
3382 skb_trim(skb, skb->len - 2);
3383 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3384 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3385
3386 if (our_fcs != rcv_fcs)
3387 return -EINVAL;
3388 }
3389 return 0;
3390 }
3391
3392 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3393 {
3394 struct l2cap_pinfo *pi = l2cap_pi(sk);
3395 u16 control = 0;
3396
3397 pi->frames_sent = 0;
3398
3399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3400
3401 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3402 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3403 l2cap_send_sframe(pi, control);
3404 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3405 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3406 }
3407
3408 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3409 __mod_retrans_timer();
3410
3411 spin_lock_bh(&pi->send_lock);
3412 l2cap_ertm_send(sk);
3413 spin_unlock_bh(&pi->send_lock);
3414
3415 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3416 pi->frames_sent == 0) {
3417 control |= L2CAP_SUPER_RCV_READY;
3418 l2cap_send_sframe(pi, control);
3419 }
3420 }
3421
3422 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3423 {
3424 struct sk_buff *next_skb;
3425 struct l2cap_pinfo *pi = l2cap_pi(sk);
3426 int tx_seq_offset, next_tx_seq_offset;
3427
3428 bt_cb(skb)->tx_seq = tx_seq;
3429 bt_cb(skb)->sar = sar;
3430
3431 next_skb = skb_peek(SREJ_QUEUE(sk));
3432 if (!next_skb) {
3433 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3434 return 0;
3435 }
3436
3437 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3438 if (tx_seq_offset < 0)
3439 tx_seq_offset += 64;
3440
3441 do {
3442 if (bt_cb(next_skb)->tx_seq == tx_seq)
3443 return -EINVAL;
3444
3445 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3446 pi->buffer_seq) % 64;
3447 if (next_tx_seq_offset < 0)
3448 next_tx_seq_offset += 64;
3449
3450 if (next_tx_seq_offset > tx_seq_offset) {
3451 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3452 return 0;
3453 }
3454
3455 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3456 break;
3457
3458 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3459
3460 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3461
3462 return 0;
3463 }
3464
3465 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3466 {
3467 struct l2cap_pinfo *pi = l2cap_pi(sk);
3468 struct sk_buff *_skb;
3469 int err;
3470
3471 switch (control & L2CAP_CTRL_SAR) {
3472 case L2CAP_SDU_UNSEGMENTED:
3473 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3474 goto drop;
3475
3476 err = sock_queue_rcv_skb(sk, skb);
3477 if (!err)
3478 return err;
3479
3480 break;
3481
3482 case L2CAP_SDU_START:
3483 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3484 goto drop;
3485
3486 pi->sdu_len = get_unaligned_le16(skb->data);
3487
3488 if (pi->sdu_len > pi->imtu)
3489 goto disconnect;
3490
3491 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3492 if (!pi->sdu)
3493 return -ENOMEM;
3494
3495 /* pull sdu_len bytes only after alloc, because of Local Busy
3496 * condition we have to be sure that this will be executed
3497 * only once, i.e., when alloc does not fail */
3498 skb_pull(skb, 2);
3499
3500 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3501
3502 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3503 pi->partial_sdu_len = skb->len;
3504 break;
3505
3506 case L2CAP_SDU_CONTINUE:
3507 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3508 goto disconnect;
3509
3510 if (!pi->sdu)
3511 goto disconnect;
3512
3513 pi->partial_sdu_len += skb->len;
3514 if (pi->partial_sdu_len > pi->sdu_len)
3515 goto drop;
3516
3517 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3518
3519 break;
3520
3521 case L2CAP_SDU_END:
3522 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3523 goto disconnect;
3524
3525 if (!pi->sdu)
3526 goto disconnect;
3527
3528 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3529 pi->partial_sdu_len += skb->len;
3530
3531 if (pi->partial_sdu_len > pi->imtu)
3532 goto drop;
3533
3534 if (pi->partial_sdu_len != pi->sdu_len)
3535 goto drop;
3536
3537 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3538 }
3539
3540 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3541 if (!_skb) {
3542 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3543 return -ENOMEM;
3544 }
3545
3546 err = sock_queue_rcv_skb(sk, _skb);
3547 if (err < 0) {
3548 kfree_skb(_skb);
3549 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3550 return err;
3551 }
3552
3553 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3554 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3555
3556 kfree_skb(pi->sdu);
3557 break;
3558 }
3559
3560 kfree_skb(skb);
3561 return 0;
3562
3563 drop:
3564 kfree_skb(pi->sdu);
3565 pi->sdu = NULL;
3566
3567 disconnect:
3568 l2cap_send_disconn_req(pi->conn, sk);
3569 kfree_skb(skb);
3570 return 0;
3571 }
3572
3573 static void l2cap_busy_work(struct work_struct *work)
3574 {
3575 DECLARE_WAITQUEUE(wait, current);
3576 struct l2cap_pinfo *pi =
3577 container_of(work, struct l2cap_pinfo, busy_work);
3578 struct sock *sk = (struct sock *)pi;
3579 int n_tries = 0, timeo = HZ/5, err;
3580 struct sk_buff *skb;
3581 u16 control;
3582
3583 lock_sock(sk);
3584
3585 add_wait_queue(sk_sleep(sk), &wait);
3586 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3587 set_current_state(TASK_INTERRUPTIBLE);
3588
3589 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3590 err = -EBUSY;
3591 l2cap_send_disconn_req(pi->conn, sk);
3592 goto done;
3593 }
3594
3595 if (!timeo)
3596 timeo = HZ/5;
3597
3598 if (signal_pending(current)) {
3599 err = sock_intr_errno(timeo);
3600 goto done;
3601 }
3602
3603 release_sock(sk);
3604 timeo = schedule_timeout(timeo);
3605 lock_sock(sk);
3606
3607 err = sock_error(sk);
3608 if (err)
3609 goto done;
3610
3611 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3612 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3613 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3614 if (err < 0) {
3615 skb_queue_head(BUSY_QUEUE(sk), skb);
3616 break;
3617 }
3618
3619 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3620 }
3621
3622 if (!skb)
3623 break;
3624 }
3625
3626 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3627 goto done;
3628
3629 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3630 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3631 l2cap_send_sframe(pi, control);
3632 l2cap_pi(sk)->retry_count = 1;
3633
3634 del_timer(&pi->retrans_timer);
3635 __mod_monitor_timer();
3636
3637 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3638
3639 done:
3640 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3641 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3642
3643 set_current_state(TASK_RUNNING);
3644 remove_wait_queue(sk_sleep(sk), &wait);
3645
3646 release_sock(sk);
3647 }
3648
3649 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3650 {
3651 struct l2cap_pinfo *pi = l2cap_pi(sk);
3652 int sctrl, err;
3653
3654 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3655 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3656 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3657 return -EBUSY;
3658 }
3659
3660 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3661 if (err >= 0) {
3662 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3663 return err;
3664 }
3665
3666 /* Busy Condition */
3667 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3668 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3669 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3670
3671 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3672 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3673 l2cap_send_sframe(pi, sctrl);
3674
3675 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3676
3677 del_timer(&pi->ack_timer);
3678
3679 queue_work(_busy_wq, &pi->busy_work);
3680
3681 return err;
3682 }
3683
3684 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3685 {
3686 struct l2cap_pinfo *pi = l2cap_pi(sk);
3687 struct sk_buff *_skb;
3688 int err = -EINVAL;
3689
3690 /*
3691 * TODO: We have to notify the userland if some data is lost with the
3692 * Streaming Mode.
3693 */
3694
3695 switch (control & L2CAP_CTRL_SAR) {
3696 case L2CAP_SDU_UNSEGMENTED:
3697 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3698 kfree_skb(pi->sdu);
3699 break;
3700 }
3701
3702 err = sock_queue_rcv_skb(sk, skb);
3703 if (!err)
3704 return 0;
3705
3706 break;
3707
3708 case L2CAP_SDU_START:
3709 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3710 kfree_skb(pi->sdu);
3711 break;
3712 }
3713
3714 pi->sdu_len = get_unaligned_le16(skb->data);
3715 skb_pull(skb, 2);
3716
3717 if (pi->sdu_len > pi->imtu) {
3718 err = -EMSGSIZE;
3719 break;
3720 }
3721
3722 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3723 if (!pi->sdu) {
3724 err = -ENOMEM;
3725 break;
3726 }
3727
3728 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3729
3730 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3731 pi->partial_sdu_len = skb->len;
3732 err = 0;
3733 break;
3734
3735 case L2CAP_SDU_CONTINUE:
3736 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3737 break;
3738
3739 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3740
3741 pi->partial_sdu_len += skb->len;
3742 if (pi->partial_sdu_len > pi->sdu_len)
3743 kfree_skb(pi->sdu);
3744 else
3745 err = 0;
3746
3747 break;
3748
3749 case L2CAP_SDU_END:
3750 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3751 break;
3752
3753 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3754
3755 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3756 pi->partial_sdu_len += skb->len;
3757
3758 if (pi->partial_sdu_len > pi->imtu)
3759 goto drop;
3760
3761 if (pi->partial_sdu_len == pi->sdu_len) {
3762 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3763 err = sock_queue_rcv_skb(sk, _skb);
3764 if (err < 0)
3765 kfree_skb(_skb);
3766 }
3767 err = 0;
3768
3769 drop:
3770 kfree_skb(pi->sdu);
3771 break;
3772 }
3773
3774 kfree_skb(skb);
3775 return err;
3776 }
3777
3778 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3779 {
3780 struct sk_buff *skb;
3781 u16 control;
3782
3783 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3784 if (bt_cb(skb)->tx_seq != tx_seq)
3785 break;
3786
3787 skb = skb_dequeue(SREJ_QUEUE(sk));
3788 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3789 l2cap_ertm_reassembly_sdu(sk, skb, control);
3790 l2cap_pi(sk)->buffer_seq_srej =
3791 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3792 tx_seq = (tx_seq + 1) % 64;
3793 }
3794 }
3795
3796 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3797 {
3798 struct l2cap_pinfo *pi = l2cap_pi(sk);
3799 struct srej_list *l, *tmp;
3800 u16 control;
3801
3802 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3803 if (l->tx_seq == tx_seq) {
3804 list_del(&l->list);
3805 kfree(l);
3806 return;
3807 }
3808 control = L2CAP_SUPER_SELECT_REJECT;
3809 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3810 l2cap_send_sframe(pi, control);
3811 list_del(&l->list);
3812 list_add_tail(&l->list, SREJ_LIST(sk));
3813 }
3814 }
3815
3816 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3817 {
3818 struct l2cap_pinfo *pi = l2cap_pi(sk);
3819 struct srej_list *new;
3820 u16 control;
3821
3822 while (tx_seq != pi->expected_tx_seq) {
3823 control = L2CAP_SUPER_SELECT_REJECT;
3824 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3825 l2cap_send_sframe(pi, control);
3826
3827 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3828 new->tx_seq = pi->expected_tx_seq;
3829 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3830 list_add_tail(&new->list, SREJ_LIST(sk));
3831 }
3832 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3833 }
3834
3835 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3836 {
3837 struct l2cap_pinfo *pi = l2cap_pi(sk);
3838 u8 tx_seq = __get_txseq(rx_control);
3839 u8 req_seq = __get_reqseq(rx_control);
3840 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3841 int tx_seq_offset, expected_tx_seq_offset;
3842 int num_to_ack = (pi->tx_win/6) + 1;
3843 int err = 0;
3844
3845 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3846
3847 if (L2CAP_CTRL_FINAL & rx_control &&
3848 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3849 del_timer(&pi->monitor_timer);
3850 if (pi->unacked_frames > 0)
3851 __mod_retrans_timer();
3852 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3853 }
3854
3855 pi->expected_ack_seq = req_seq;
3856 l2cap_drop_acked_frames(sk);
3857
3858 if (tx_seq == pi->expected_tx_seq)
3859 goto expected;
3860
3861 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3862 if (tx_seq_offset < 0)
3863 tx_seq_offset += 64;
3864
3865 /* invalid tx_seq */
3866 if (tx_seq_offset >= pi->tx_win) {
3867 l2cap_send_disconn_req(pi->conn, sk);
3868 goto drop;
3869 }
3870
3871 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3872 goto drop;
3873
3874 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3875 struct srej_list *first;
3876
3877 first = list_first_entry(SREJ_LIST(sk),
3878 struct srej_list, list);
3879 if (tx_seq == first->tx_seq) {
3880 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3881 l2cap_check_srej_gap(sk, tx_seq);
3882
3883 list_del(&first->list);
3884 kfree(first);
3885
3886 if (list_empty(SREJ_LIST(sk))) {
3887 pi->buffer_seq = pi->buffer_seq_srej;
3888 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3889 l2cap_send_ack(pi);
3890 }
3891 } else {
3892 struct srej_list *l;
3893
3894 /* duplicated tx_seq */
3895 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3896 goto drop;
3897
3898 list_for_each_entry(l, SREJ_LIST(sk), list) {
3899 if (l->tx_seq == tx_seq) {
3900 l2cap_resend_srejframe(sk, tx_seq);
3901 return 0;
3902 }
3903 }
3904 l2cap_send_srejframe(sk, tx_seq);
3905 }
3906 } else {
3907 expected_tx_seq_offset =
3908 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3909 if (expected_tx_seq_offset < 0)
3910 expected_tx_seq_offset += 64;
3911
3912 /* duplicated tx_seq */
3913 if (tx_seq_offset < expected_tx_seq_offset)
3914 goto drop;
3915
3916 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3917
3918 INIT_LIST_HEAD(SREJ_LIST(sk));
3919 pi->buffer_seq_srej = pi->buffer_seq;
3920
3921 __skb_queue_head_init(SREJ_QUEUE(sk));
3922 __skb_queue_head_init(BUSY_QUEUE(sk));
3923 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3924
3925 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3926
3927 l2cap_send_srejframe(sk, tx_seq);
3928
3929 del_timer(&pi->ack_timer);
3930 }
3931 return 0;
3932
3933 expected:
3934 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3935
3936 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3937 bt_cb(skb)->tx_seq = tx_seq;
3938 bt_cb(skb)->sar = sar;
3939 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3940 return 0;
3941 }
3942
3943 err = l2cap_push_rx_skb(sk, skb, rx_control);
3944 if (err < 0)
3945 return 0;
3946
3947 if (rx_control & L2CAP_CTRL_FINAL) {
3948 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3949 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3950 else
3951 l2cap_retransmit_frames(sk);
3952 }
3953
3954 __mod_ack_timer();
3955
3956 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3957 if (pi->num_acked == num_to_ack - 1)
3958 l2cap_send_ack(pi);
3959
3960 return 0;
3961
3962 drop:
3963 kfree_skb(skb);
3964 return 0;
3965 }
3966
3967 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3968 {
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3970
3971 pi->expected_ack_seq = __get_reqseq(rx_control);
3972 l2cap_drop_acked_frames(sk);
3973
3974 if (rx_control & L2CAP_CTRL_POLL) {
3975 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3976 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3977 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3978 (pi->unacked_frames > 0))
3979 __mod_retrans_timer();
3980
3981 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3982 l2cap_send_srejtail(sk);
3983 } else {
3984 l2cap_send_i_or_rr_or_rnr(sk);
3985 }
3986
3987 } else if (rx_control & L2CAP_CTRL_FINAL) {
3988 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3989
3990 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3991 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3992 else
3993 l2cap_retransmit_frames(sk);
3994
3995 } else {
3996 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3997 (pi->unacked_frames > 0))
3998 __mod_retrans_timer();
3999
4000 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4001 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4002 l2cap_send_ack(pi);
4003 } else {
4004 spin_lock_bh(&pi->send_lock);
4005 l2cap_ertm_send(sk);
4006 spin_unlock_bh(&pi->send_lock);
4007 }
4008 }
4009 }
4010
4011 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4012 {
4013 struct l2cap_pinfo *pi = l2cap_pi(sk);
4014 u8 tx_seq = __get_reqseq(rx_control);
4015
4016 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4017
4018 pi->expected_ack_seq = tx_seq;
4019 l2cap_drop_acked_frames(sk);
4020
4021 if (rx_control & L2CAP_CTRL_FINAL) {
4022 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4023 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4024 else
4025 l2cap_retransmit_frames(sk);
4026 } else {
4027 l2cap_retransmit_frames(sk);
4028
4029 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4030 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4031 }
4032 }
4033 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4034 {
4035 struct l2cap_pinfo *pi = l2cap_pi(sk);
4036 u8 tx_seq = __get_reqseq(rx_control);
4037
4038 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4039
4040 if (rx_control & L2CAP_CTRL_POLL) {
4041 pi->expected_ack_seq = tx_seq;
4042 l2cap_drop_acked_frames(sk);
4043
4044 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4045 l2cap_retransmit_one_frame(sk, tx_seq);
4046
4047 spin_lock_bh(&pi->send_lock);
4048 l2cap_ertm_send(sk);
4049 spin_unlock_bh(&pi->send_lock);
4050
4051 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4052 pi->srej_save_reqseq = tx_seq;
4053 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4054 }
4055 } else if (rx_control & L2CAP_CTRL_FINAL) {
4056 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4057 pi->srej_save_reqseq == tx_seq)
4058 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4059 else
4060 l2cap_retransmit_one_frame(sk, tx_seq);
4061 } else {
4062 l2cap_retransmit_one_frame(sk, tx_seq);
4063 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4064 pi->srej_save_reqseq = tx_seq;
4065 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4066 }
4067 }
4068 }
4069
4070 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4071 {
4072 struct l2cap_pinfo *pi = l2cap_pi(sk);
4073 u8 tx_seq = __get_reqseq(rx_control);
4074
4075 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4076 pi->expected_ack_seq = tx_seq;
4077 l2cap_drop_acked_frames(sk);
4078
4079 if (rx_control & L2CAP_CTRL_POLL)
4080 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4081
4082 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4083 del_timer(&pi->retrans_timer);
4084 if (rx_control & L2CAP_CTRL_POLL)
4085 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4086 return;
4087 }
4088
4089 if (rx_control & L2CAP_CTRL_POLL)
4090 l2cap_send_srejtail(sk);
4091 else
4092 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4093 }
4094
4095 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4096 {
4097 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4098
4099 if (L2CAP_CTRL_FINAL & rx_control &&
4100 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4101 del_timer(&l2cap_pi(sk)->monitor_timer);
4102 if (l2cap_pi(sk)->unacked_frames > 0)
4103 __mod_retrans_timer();
4104 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4105 }
4106
4107 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4108 case L2CAP_SUPER_RCV_READY:
4109 l2cap_data_channel_rrframe(sk, rx_control);
4110 break;
4111
4112 case L2CAP_SUPER_REJECT:
4113 l2cap_data_channel_rejframe(sk, rx_control);
4114 break;
4115
4116 case L2CAP_SUPER_SELECT_REJECT:
4117 l2cap_data_channel_srejframe(sk, rx_control);
4118 break;
4119
4120 case L2CAP_SUPER_RCV_NOT_READY:
4121 l2cap_data_channel_rnrframe(sk, rx_control);
4122 break;
4123 }
4124
4125 kfree_skb(skb);
4126 return 0;
4127 }
4128
4129 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4130 {
4131 struct sock *sk;
4132 struct l2cap_pinfo *pi;
4133 u16 control;
4134 u8 tx_seq, req_seq;
4135 int len, next_tx_seq_offset, req_seq_offset;
4136
4137 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4138 if (!sk) {
4139 BT_DBG("unknown cid 0x%4.4x", cid);
4140 goto drop;
4141 }
4142
4143 pi = l2cap_pi(sk);
4144
4145 BT_DBG("sk %p, len %d", sk, skb->len);
4146
4147 if (sk->sk_state != BT_CONNECTED)
4148 goto drop;
4149
4150 switch (pi->mode) {
4151 case L2CAP_MODE_BASIC:
4152 /* If socket recv buffers overflows we drop data here
4153 * which is *bad* because L2CAP has to be reliable.
4154 * But we don't have any other choice. L2CAP doesn't
4155 * provide flow control mechanism. */
4156
4157 if (pi->imtu < skb->len)
4158 goto drop;
4159
4160 if (!sock_queue_rcv_skb(sk, skb))
4161 goto done;
4162 break;
4163
4164 case L2CAP_MODE_ERTM:
4165 control = get_unaligned_le16(skb->data);
4166 skb_pull(skb, 2);
4167 len = skb->len;
4168
4169 if (__is_sar_start(control) && __is_iframe(control))
4170 len -= 2;
4171
4172 if (pi->fcs == L2CAP_FCS_CRC16)
4173 len -= 2;
4174
4175 /*
4176 * We can just drop the corrupted I-frame here.
4177 * Receiver will miss it and start proper recovery
4178 * procedures and ask retransmission.
4179 */
4180 if (len > pi->mps) {
4181 l2cap_send_disconn_req(pi->conn, sk);
4182 goto drop;
4183 }
4184
4185 if (l2cap_check_fcs(pi, skb))
4186 goto drop;
4187
4188 req_seq = __get_reqseq(control);
4189 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4190 if (req_seq_offset < 0)
4191 req_seq_offset += 64;
4192
4193 next_tx_seq_offset =
4194 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4195 if (next_tx_seq_offset < 0)
4196 next_tx_seq_offset += 64;
4197
4198 /* check for invalid req-seq */
4199 if (req_seq_offset > next_tx_seq_offset) {
4200 l2cap_send_disconn_req(pi->conn, sk);
4201 goto drop;
4202 }
4203
4204 if (__is_iframe(control)) {
4205 if (len < 0) {
4206 l2cap_send_disconn_req(pi->conn, sk);
4207 goto drop;
4208 }
4209
4210 l2cap_data_channel_iframe(sk, control, skb);
4211 } else {
4212 if (len != 0) {
4213 l2cap_send_disconn_req(pi->conn, sk);
4214 goto drop;
4215 }
4216
4217 l2cap_data_channel_sframe(sk, control, skb);
4218 }
4219
4220 goto done;
4221
4222 case L2CAP_MODE_STREAMING:
4223 control = get_unaligned_le16(skb->data);
4224 skb_pull(skb, 2);
4225 len = skb->len;
4226
4227 if (__is_sar_start(control))
4228 len -= 2;
4229
4230 if (pi->fcs == L2CAP_FCS_CRC16)
4231 len -= 2;
4232
4233 if (len > pi->mps || len < 0 || __is_sframe(control))
4234 goto drop;
4235
4236 if (l2cap_check_fcs(pi, skb))
4237 goto drop;
4238
4239 tx_seq = __get_txseq(control);
4240
4241 if (pi->expected_tx_seq == tx_seq)
4242 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4243 else
4244 pi->expected_tx_seq = (tx_seq + 1) % 64;
4245
4246 l2cap_streaming_reassembly_sdu(sk, skb, control);
4247
4248 goto done;
4249
4250 default:
4251 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4252 break;
4253 }
4254
4255 drop:
4256 kfree_skb(skb);
4257
4258 done:
4259 if (sk)
4260 bh_unlock_sock(sk);
4261
4262 return 0;
4263 }
4264
4265 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4266 {
4267 struct sock *sk;
4268
4269 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4270 if (!sk)
4271 goto drop;
4272
4273 BT_DBG("sk %p, len %d", sk, skb->len);
4274
4275 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4276 goto drop;
4277
4278 if (l2cap_pi(sk)->imtu < skb->len)
4279 goto drop;
4280
4281 if (!sock_queue_rcv_skb(sk, skb))
4282 goto done;
4283
4284 drop:
4285 kfree_skb(skb);
4286
4287 done:
4288 if (sk)
4289 bh_unlock_sock(sk);
4290 return 0;
4291 }
4292
4293 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4294 {
4295 struct l2cap_hdr *lh = (void *) skb->data;
4296 u16 cid, len;
4297 __le16 psm;
4298
4299 skb_pull(skb, L2CAP_HDR_SIZE);
4300 cid = __le16_to_cpu(lh->cid);
4301 len = __le16_to_cpu(lh->len);
4302
4303 if (len != skb->len) {
4304 kfree_skb(skb);
4305 return;
4306 }
4307
4308 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4309
4310 switch (cid) {
4311 case L2CAP_CID_SIGNALING:
4312 l2cap_sig_channel(conn, skb);
4313 break;
4314
4315 case L2CAP_CID_CONN_LESS:
4316 psm = get_unaligned_le16(skb->data);
4317 skb_pull(skb, 2);
4318 l2cap_conless_channel(conn, psm, skb);
4319 break;
4320
4321 default:
4322 l2cap_data_channel(conn, cid, skb);
4323 break;
4324 }
4325 }
4326
4327 /* ---- L2CAP interface with lower layer (HCI) ---- */
4328
4329 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4330 {
4331 int exact = 0, lm1 = 0, lm2 = 0;
4332 register struct sock *sk;
4333 struct hlist_node *node;
4334
4335 if (type != ACL_LINK)
4336 return 0;
4337
4338 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4339
4340 /* Find listening sockets and check their link_mode */
4341 read_lock(&l2cap_sk_list.lock);
4342 sk_for_each(sk, node, &l2cap_sk_list.head) {
4343 if (sk->sk_state != BT_LISTEN)
4344 continue;
4345
4346 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4347 lm1 |= HCI_LM_ACCEPT;
4348 if (l2cap_pi(sk)->role_switch)
4349 lm1 |= HCI_LM_MASTER;
4350 exact++;
4351 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4352 lm2 |= HCI_LM_ACCEPT;
4353 if (l2cap_pi(sk)->role_switch)
4354 lm2 |= HCI_LM_MASTER;
4355 }
4356 }
4357 read_unlock(&l2cap_sk_list.lock);
4358
4359 return exact ? lm1 : lm2;
4360 }
4361
4362 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4363 {
4364 struct l2cap_conn *conn;
4365
4366 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4367
4368 if (hcon->type != ACL_LINK)
4369 return 0;
4370
4371 if (!status) {
4372 conn = l2cap_conn_add(hcon, status);
4373 if (conn)
4374 l2cap_conn_ready(conn);
4375 } else
4376 l2cap_conn_del(hcon, bt_err(status));
4377
4378 return 0;
4379 }
4380
4381 static int l2cap_disconn_ind(struct hci_conn *hcon)
4382 {
4383 struct l2cap_conn *conn = hcon->l2cap_data;
4384
4385 BT_DBG("hcon %p", hcon);
4386
4387 if (hcon->type != ACL_LINK || !conn)
4388 return 0x13;
4389
4390 return conn->disc_reason;
4391 }
4392
4393 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4394 {
4395 BT_DBG("hcon %p reason %d", hcon, reason);
4396
4397 if (hcon->type != ACL_LINK)
4398 return 0;
4399
4400 l2cap_conn_del(hcon, bt_err(reason));
4401
4402 return 0;
4403 }
4404
4405 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4406 {
4407 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4408 return;
4409
4410 if (encrypt == 0x00) {
4411 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4412 l2cap_sock_clear_timer(sk);
4413 l2cap_sock_set_timer(sk, HZ * 5);
4414 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4415 __l2cap_sock_close(sk, ECONNREFUSED);
4416 } else {
4417 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4418 l2cap_sock_clear_timer(sk);
4419 }
4420 }
4421
4422 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4423 {
4424 struct l2cap_chan_list *l;
4425 struct l2cap_conn *conn = hcon->l2cap_data;
4426 struct sock *sk;
4427
4428 if (!conn)
4429 return 0;
4430
4431 l = &conn->chan_list;
4432
4433 BT_DBG("conn %p", conn);
4434
4435 read_lock(&l->lock);
4436
4437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4438 bh_lock_sock(sk);
4439
4440 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4441 bh_unlock_sock(sk);
4442 continue;
4443 }
4444
4445 if (!status && (sk->sk_state == BT_CONNECTED ||
4446 sk->sk_state == BT_CONFIG)) {
4447 l2cap_check_encryption(sk, encrypt);
4448 bh_unlock_sock(sk);
4449 continue;
4450 }
4451
4452 if (sk->sk_state == BT_CONNECT) {
4453 if (!status) {
4454 struct l2cap_conn_req req;
4455 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4456 req.psm = l2cap_pi(sk)->psm;
4457
4458 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4459 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4460
4461 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4462 L2CAP_CONN_REQ, sizeof(req), &req);
4463 } else {
4464 l2cap_sock_clear_timer(sk);
4465 l2cap_sock_set_timer(sk, HZ / 10);
4466 }
4467 } else if (sk->sk_state == BT_CONNECT2) {
4468 struct l2cap_conn_rsp rsp;
4469 __u16 result;
4470
4471 if (!status) {
4472 sk->sk_state = BT_CONFIG;
4473 result = L2CAP_CR_SUCCESS;
4474 } else {
4475 sk->sk_state = BT_DISCONN;
4476 l2cap_sock_set_timer(sk, HZ / 10);
4477 result = L2CAP_CR_SEC_BLOCK;
4478 }
4479
4480 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4481 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4482 rsp.result = cpu_to_le16(result);
4483 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4484 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4485 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4486 }
4487
4488 bh_unlock_sock(sk);
4489 }
4490
4491 read_unlock(&l->lock);
4492
4493 return 0;
4494 }
4495
4496 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4497 {
4498 struct l2cap_conn *conn = hcon->l2cap_data;
4499
4500 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4501 goto drop;
4502
4503 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4504
4505 if (flags & ACL_START) {
4506 struct l2cap_hdr *hdr;
4507 int len;
4508
4509 if (conn->rx_len) {
4510 BT_ERR("Unexpected start frame (len %d)", skb->len);
4511 kfree_skb(conn->rx_skb);
4512 conn->rx_skb = NULL;
4513 conn->rx_len = 0;
4514 l2cap_conn_unreliable(conn, ECOMM);
4515 }
4516
4517 if (skb->len < 2) {
4518 BT_ERR("Frame is too short (len %d)", skb->len);
4519 l2cap_conn_unreliable(conn, ECOMM);
4520 goto drop;
4521 }
4522
4523 hdr = (struct l2cap_hdr *) skb->data;
4524 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4525
4526 if (len == skb->len) {
4527 /* Complete frame received */
4528 l2cap_recv_frame(conn, skb);
4529 return 0;
4530 }
4531
4532 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4533
4534 if (skb->len > len) {
4535 BT_ERR("Frame is too long (len %d, expected len %d)",
4536 skb->len, len);
4537 l2cap_conn_unreliable(conn, ECOMM);
4538 goto drop;
4539 }
4540
4541 /* Allocate skb for the complete frame (with header) */
4542 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4543 if (!conn->rx_skb)
4544 goto drop;
4545
4546 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4547 skb->len);
4548 conn->rx_len = len - skb->len;
4549 } else {
4550 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4551
4552 if (!conn->rx_len) {
4553 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4554 l2cap_conn_unreliable(conn, ECOMM);
4555 goto drop;
4556 }
4557
4558 if (skb->len > conn->rx_len) {
4559 BT_ERR("Fragment is too long (len %d, expected %d)",
4560 skb->len, conn->rx_len);
4561 kfree_skb(conn->rx_skb);
4562 conn->rx_skb = NULL;
4563 conn->rx_len = 0;
4564 l2cap_conn_unreliable(conn, ECOMM);
4565 goto drop;
4566 }
4567
4568 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4569 skb->len);
4570 conn->rx_len -= skb->len;
4571
4572 if (!conn->rx_len) {
4573 /* Complete frame received */
4574 l2cap_recv_frame(conn, conn->rx_skb);
4575 conn->rx_skb = NULL;
4576 }
4577 }
4578
4579 drop:
4580 kfree_skb(skb);
4581 return 0;
4582 }
4583
4584 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4585 {
4586 struct sock *sk;
4587 struct hlist_node *node;
4588
4589 read_lock_bh(&l2cap_sk_list.lock);
4590
4591 sk_for_each(sk, node, &l2cap_sk_list.head) {
4592 struct l2cap_pinfo *pi = l2cap_pi(sk);
4593
4594 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4595 batostr(&bt_sk(sk)->src),
4596 batostr(&bt_sk(sk)->dst),
4597 sk->sk_state, __le16_to_cpu(pi->psm),
4598 pi->scid, pi->dcid,
4599 pi->imtu, pi->omtu, pi->sec_level);
4600 }
4601
4602 read_unlock_bh(&l2cap_sk_list.lock);
4603
4604 return 0;
4605 }
4606
4607 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4608 {
4609 return single_open(file, l2cap_debugfs_show, inode->i_private);
4610 }
4611
4612 static const struct file_operations l2cap_debugfs_fops = {
4613 .open = l2cap_debugfs_open,
4614 .read = seq_read,
4615 .llseek = seq_lseek,
4616 .release = single_release,
4617 };
4618
4619 static struct dentry *l2cap_debugfs;
4620
4621 static const struct proto_ops l2cap_sock_ops = {
4622 .family = PF_BLUETOOTH,
4623 .owner = THIS_MODULE,
4624 .release = l2cap_sock_release,
4625 .bind = l2cap_sock_bind,
4626 .connect = l2cap_sock_connect,
4627 .listen = l2cap_sock_listen,
4628 .accept = l2cap_sock_accept,
4629 .getname = l2cap_sock_getname,
4630 .sendmsg = l2cap_sock_sendmsg,
4631 .recvmsg = l2cap_sock_recvmsg,
4632 .poll = bt_sock_poll,
4633 .ioctl = bt_sock_ioctl,
4634 .mmap = sock_no_mmap,
4635 .socketpair = sock_no_socketpair,
4636 .shutdown = l2cap_sock_shutdown,
4637 .setsockopt = l2cap_sock_setsockopt,
4638 .getsockopt = l2cap_sock_getsockopt
4639 };
4640
4641 static const struct net_proto_family l2cap_sock_family_ops = {
4642 .family = PF_BLUETOOTH,
4643 .owner = THIS_MODULE,
4644 .create = l2cap_sock_create,
4645 };
4646
4647 static struct hci_proto l2cap_hci_proto = {
4648 .name = "L2CAP",
4649 .id = HCI_PROTO_L2CAP,
4650 .connect_ind = l2cap_connect_ind,
4651 .connect_cfm = l2cap_connect_cfm,
4652 .disconn_ind = l2cap_disconn_ind,
4653 .disconn_cfm = l2cap_disconn_cfm,
4654 .security_cfm = l2cap_security_cfm,
4655 .recv_acldata = l2cap_recv_acldata
4656 };
4657
4658 static int __init l2cap_init(void)
4659 {
4660 int err;
4661
4662 err = proto_register(&l2cap_proto, 0);
4663 if (err < 0)
4664 return err;
4665
4666 _busy_wq = create_singlethread_workqueue("l2cap");
4667 if (!_busy_wq)
4668 goto error;
4669
4670 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4671 if (err < 0) {
4672 BT_ERR("L2CAP socket registration failed");
4673 goto error;
4674 }
4675
4676 err = hci_register_proto(&l2cap_hci_proto);
4677 if (err < 0) {
4678 BT_ERR("L2CAP protocol registration failed");
4679 bt_sock_unregister(BTPROTO_L2CAP);
4680 goto error;
4681 }
4682
4683 if (bt_debugfs) {
4684 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4685 bt_debugfs, NULL, &l2cap_debugfs_fops);
4686 if (!l2cap_debugfs)
4687 BT_ERR("Failed to create L2CAP debug file");
4688 }
4689
4690 BT_INFO("L2CAP ver %s", VERSION);
4691 BT_INFO("L2CAP socket layer initialized");
4692
4693 return 0;
4694
4695 error:
4696 proto_unregister(&l2cap_proto);
4697 return err;
4698 }
4699
4700 static void __exit l2cap_exit(void)
4701 {
4702 debugfs_remove(l2cap_debugfs);
4703
4704 flush_workqueue(_busy_wq);
4705 destroy_workqueue(_busy_wq);
4706
4707 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4708 BT_ERR("L2CAP socket unregistration failed");
4709
4710 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4711 BT_ERR("L2CAP protocol unregistration failed");
4712
4713 proto_unregister(&l2cap_proto);
4714 }
4715
4716 void l2cap_load(void)
4717 {
4718 /* Dummy function to trigger automatic L2CAP module loading by
4719 * other modules that use L2CAP sockets but don't use any other
4720 * symbols from it. */
4721 }
4722 EXPORT_SYMBOL(l2cap_load);
4723
4724 module_init(l2cap_init);
4725 module_exit(l2cap_exit);
4726
4727 module_param(enable_ertm, bool, 0644);
4728 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4729
4730 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4731 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4732 MODULE_VERSION(VERSION);
4733 MODULE_LICENSE("GPL");
4734 MODULE_ALIAS("bt-proto-0");