]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/l2cap.c
Bluetooth: Only check SAR bits if frame is an I-frame
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static const struct proto_ops l2cap_sock_ops;
64
65 static struct workqueue_struct *_busy_wq;
66
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
76
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
79
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
82 {
83 struct sock *sk = (struct sock *) arg;
84 int reason;
85
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
87
88 bh_lock_sock(sk);
89
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
95 else
96 reason = ETIMEDOUT;
97
98 __l2cap_sock_close(sk, reason);
99
100 bh_unlock_sock(sk);
101
102 l2cap_sock_kill(sk);
103 sock_put(sk);
104 }
105
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
107 {
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 }
111
112 static void l2cap_sock_clear_timer(struct sock *sk)
113 {
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
116 }
117
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 {
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
124 break;
125 }
126 return s;
127 }
128
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 {
131 struct sock *s;
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
134 break;
135 }
136 return s;
137 }
138
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 {
143 struct sock *s;
144 read_lock(&l->lock);
145 s = __l2cap_get_chan_by_scid(l, cid);
146 if (s)
147 bh_lock_sock(s);
148 read_unlock(&l->lock);
149 return s;
150 }
151
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 {
154 struct sock *s;
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
157 break;
158 }
159 return s;
160 }
161
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 {
164 struct sock *s;
165 read_lock(&l->lock);
166 s = __l2cap_get_chan_by_ident(l, ident);
167 if (s)
168 bh_lock_sock(s);
169 read_unlock(&l->lock);
170 return s;
171 }
172
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
174 {
175 u16 cid = L2CAP_CID_DYN_START;
176
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
179 return cid;
180 }
181
182 return 0;
183 }
184
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
186 {
187 sock_hold(sk);
188
189 if (l->head)
190 l2cap_pi(l->head)->prev_c = sk;
191
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
194 l->head = sk;
195 }
196
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
198 {
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
200
201 write_lock_bh(&l->lock);
202 if (sk == l->head)
203 l->head = next;
204
205 if (next)
206 l2cap_pi(next)->prev_c = prev;
207 if (prev)
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
210
211 __sock_put(sk);
212 }
213
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
215 {
216 struct l2cap_chan_list *l = &conn->chan_list;
217
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
220
221 conn->disc_reason = 0x13;
222
223 l2cap_pi(sk)->conn = conn;
224
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 } else {
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 }
239
240 __l2cap_chan_link(l, sk);
241
242 if (parent)
243 bt_accept_enqueue(parent, sk);
244 }
245
246 /* Delete channel.
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
249 {
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
252
253 l2cap_sock_clear_timer(sk);
254
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256
257 if (conn) {
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
262 }
263
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
266
267 if (err)
268 sk->sk_err = err;
269
270 if (parent) {
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
273 } else
274 sk->sk_state_change(sk);
275 }
276
277 /* Service level security */
278 static inline int l2cap_check_security(struct sock *sk)
279 {
280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 __u8 auth_type;
282
283 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
285 auth_type = HCI_AT_NO_BONDING_MITM;
286 else
287 auth_type = HCI_AT_NO_BONDING;
288
289 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
290 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
291 } else {
292 switch (l2cap_pi(sk)->sec_level) {
293 case BT_SECURITY_HIGH:
294 auth_type = HCI_AT_GENERAL_BONDING_MITM;
295 break;
296 case BT_SECURITY_MEDIUM:
297 auth_type = HCI_AT_GENERAL_BONDING;
298 break;
299 default:
300 auth_type = HCI_AT_NO_BONDING;
301 break;
302 }
303 }
304
305 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 auth_type);
307 }
308
309 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
310 {
311 u8 id;
312
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
317 */
318
319 spin_lock_bh(&conn->lock);
320
321 if (++conn->tx_ident > 128)
322 conn->tx_ident = 1;
323
324 id = conn->tx_ident;
325
326 spin_unlock_bh(&conn->lock);
327
328 return id;
329 }
330
331 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
332 {
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
334
335 BT_DBG("code 0x%2.2x", code);
336
337 if (!skb)
338 return;
339
340 hci_send_acl(conn->hcon, skb, 0);
341 }
342
343 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 {
345 struct sk_buff *skb;
346 struct l2cap_hdr *lh;
347 struct l2cap_conn *conn = pi->conn;
348 int count, hlen = L2CAP_HDR_SIZE + 2;
349
350 if (pi->fcs == L2CAP_FCS_CRC16)
351 hlen += 2;
352
353 BT_DBG("pi %p, control 0x%2.2x", pi, control);
354
355 count = min_t(unsigned int, conn->mtu, hlen);
356 control |= L2CAP_CTRL_FRAME_TYPE;
357
358 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
359 control |= L2CAP_CTRL_FINAL;
360 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
361 }
362
363 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
364 control |= L2CAP_CTRL_POLL;
365 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
366 }
367
368 skb = bt_skb_alloc(count, GFP_ATOMIC);
369 if (!skb)
370 return;
371
372 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
373 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
374 lh->cid = cpu_to_le16(pi->dcid);
375 put_unaligned_le16(control, skb_put(skb, 2));
376
377 if (pi->fcs == L2CAP_FCS_CRC16) {
378 u16 fcs = crc16(0, (u8 *)lh, count - 2);
379 put_unaligned_le16(fcs, skb_put(skb, 2));
380 }
381
382 hci_send_acl(pi->conn->hcon, skb, 0);
383 }
384
385 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
386 {
387 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
388 control |= L2CAP_SUPER_RCV_NOT_READY;
389 pi->conn_state |= L2CAP_CONN_RNR_SENT;
390 } else
391 control |= L2CAP_SUPER_RCV_READY;
392
393 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
394
395 l2cap_send_sframe(pi, control);
396 }
397
398 static inline int __l2cap_no_conn_pending(struct sock *sk)
399 {
400 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
401 }
402
403 static void l2cap_do_start(struct sock *sk)
404 {
405 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
406
407 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
409 return;
410
411 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
412 struct l2cap_conn_req req;
413 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
414 req.psm = l2cap_pi(sk)->psm;
415
416 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
417 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
418
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
421 }
422 } else {
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
425
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
428
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
431
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
434 }
435 }
436
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
438 {
439 struct l2cap_disconn_req req;
440
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
445 }
446
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
449 {
450 struct l2cap_chan_list *l = &conn->chan_list;
451 struct sock *sk;
452
453 BT_DBG("conn %p", conn);
454
455 read_lock(&l->lock);
456
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
458 bh_lock_sock(sk);
459
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
462 bh_unlock_sock(sk);
463 continue;
464 }
465
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk) &&
468 __l2cap_no_conn_pending(sk)) {
469 struct l2cap_conn_req req;
470 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
471 req.psm = l2cap_pi(sk)->psm;
472
473 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
475
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_REQ, sizeof(req), &req);
478 }
479 } else if (sk->sk_state == BT_CONNECT2) {
480 struct l2cap_conn_rsp rsp;
481 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
482 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
483
484 if (l2cap_check_security(sk)) {
485 if (bt_sk(sk)->defer_setup) {
486 struct sock *parent = bt_sk(sk)->parent;
487 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
488 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
489 parent->sk_data_ready(parent, 0);
490
491 } else {
492 sk->sk_state = BT_CONFIG;
493 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
494 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
495 }
496 } else {
497 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
498 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
499 }
500
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
503 }
504
505 bh_unlock_sock(sk);
506 }
507
508 read_unlock(&l->lock);
509 }
510
511 static void l2cap_conn_ready(struct l2cap_conn *conn)
512 {
513 struct l2cap_chan_list *l = &conn->chan_list;
514 struct sock *sk;
515
516 BT_DBG("conn %p", conn);
517
518 read_lock(&l->lock);
519
520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 bh_lock_sock(sk);
522
523 if (sk->sk_type != SOCK_SEQPACKET &&
524 sk->sk_type != SOCK_STREAM) {
525 l2cap_sock_clear_timer(sk);
526 sk->sk_state = BT_CONNECTED;
527 sk->sk_state_change(sk);
528 } else if (sk->sk_state == BT_CONNECT)
529 l2cap_do_start(sk);
530
531 bh_unlock_sock(sk);
532 }
533
534 read_unlock(&l->lock);
535 }
536
537 /* Notify sockets that we cannot guaranty reliability anymore */
538 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
539 {
540 struct l2cap_chan_list *l = &conn->chan_list;
541 struct sock *sk;
542
543 BT_DBG("conn %p", conn);
544
545 read_lock(&l->lock);
546
547 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
548 if (l2cap_pi(sk)->force_reliable)
549 sk->sk_err = err;
550 }
551
552 read_unlock(&l->lock);
553 }
554
555 static void l2cap_info_timeout(unsigned long arg)
556 {
557 struct l2cap_conn *conn = (void *) arg;
558
559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
560 conn->info_ident = 0;
561
562 l2cap_conn_start(conn);
563 }
564
565 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
566 {
567 struct l2cap_conn *conn = hcon->l2cap_data;
568
569 if (conn || status)
570 return conn;
571
572 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
573 if (!conn)
574 return NULL;
575
576 hcon->l2cap_data = conn;
577 conn->hcon = hcon;
578
579 BT_DBG("hcon %p conn %p", hcon, conn);
580
581 conn->mtu = hcon->hdev->acl_mtu;
582 conn->src = &hcon->hdev->bdaddr;
583 conn->dst = &hcon->dst;
584
585 conn->feat_mask = 0;
586
587 spin_lock_init(&conn->lock);
588 rwlock_init(&conn->chan_list.lock);
589
590 setup_timer(&conn->info_timer, l2cap_info_timeout,
591 (unsigned long) conn);
592
593 conn->disc_reason = 0x13;
594
595 return conn;
596 }
597
598 static void l2cap_conn_del(struct hci_conn *hcon, int err)
599 {
600 struct l2cap_conn *conn = hcon->l2cap_data;
601 struct sock *sk;
602
603 if (!conn)
604 return;
605
606 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
607
608 kfree_skb(conn->rx_skb);
609
610 /* Kill channels */
611 while ((sk = conn->chan_list.head)) {
612 bh_lock_sock(sk);
613 l2cap_chan_del(sk, err);
614 bh_unlock_sock(sk);
615 l2cap_sock_kill(sk);
616 }
617
618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
619 del_timer_sync(&conn->info_timer);
620
621 hcon->l2cap_data = NULL;
622 kfree(conn);
623 }
624
625 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
626 {
627 struct l2cap_chan_list *l = &conn->chan_list;
628 write_lock_bh(&l->lock);
629 __l2cap_chan_add(conn, sk, parent);
630 write_unlock_bh(&l->lock);
631 }
632
633 /* ---- Socket interface ---- */
634 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
635 {
636 struct sock *sk;
637 struct hlist_node *node;
638 sk_for_each(sk, node, &l2cap_sk_list.head)
639 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
640 goto found;
641 sk = NULL;
642 found:
643 return sk;
644 }
645
646 /* Find socket with psm and source bdaddr.
647 * Returns closest match.
648 */
649 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
650 {
651 struct sock *sk = NULL, *sk1 = NULL;
652 struct hlist_node *node;
653
654 sk_for_each(sk, node, &l2cap_sk_list.head) {
655 if (state && sk->sk_state != state)
656 continue;
657
658 if (l2cap_pi(sk)->psm == psm) {
659 /* Exact match. */
660 if (!bacmp(&bt_sk(sk)->src, src))
661 break;
662
663 /* Closest match */
664 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
665 sk1 = sk;
666 }
667 }
668 return node ? sk : sk1;
669 }
670
671 /* Find socket with given address (psm, src).
672 * Returns locked socket */
673 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
674 {
675 struct sock *s;
676 read_lock(&l2cap_sk_list.lock);
677 s = __l2cap_get_sock_by_psm(state, psm, src);
678 if (s)
679 bh_lock_sock(s);
680 read_unlock(&l2cap_sk_list.lock);
681 return s;
682 }
683
684 static void l2cap_sock_destruct(struct sock *sk)
685 {
686 BT_DBG("sk %p", sk);
687
688 skb_queue_purge(&sk->sk_receive_queue);
689 skb_queue_purge(&sk->sk_write_queue);
690 }
691
692 static void l2cap_sock_cleanup_listen(struct sock *parent)
693 {
694 struct sock *sk;
695
696 BT_DBG("parent %p", parent);
697
698 /* Close not yet accepted channels */
699 while ((sk = bt_accept_dequeue(parent, NULL)))
700 l2cap_sock_close(sk);
701
702 parent->sk_state = BT_CLOSED;
703 sock_set_flag(parent, SOCK_ZAPPED);
704 }
705
706 /* Kill socket (only if zapped and orphan)
707 * Must be called on unlocked socket.
708 */
709 static void l2cap_sock_kill(struct sock *sk)
710 {
711 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
712 return;
713
714 BT_DBG("sk %p state %d", sk, sk->sk_state);
715
716 /* Kill poor orphan */
717 bt_sock_unlink(&l2cap_sk_list, sk);
718 sock_set_flag(sk, SOCK_DEAD);
719 sock_put(sk);
720 }
721
722 static void __l2cap_sock_close(struct sock *sk, int reason)
723 {
724 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
725
726 switch (sk->sk_state) {
727 case BT_LISTEN:
728 l2cap_sock_cleanup_listen(sk);
729 break;
730
731 case BT_CONNECTED:
732 case BT_CONFIG:
733 if (sk->sk_type == SOCK_SEQPACKET ||
734 sk->sk_type == SOCK_STREAM) {
735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
736
737 sk->sk_state = BT_DISCONN;
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739 l2cap_send_disconn_req(conn, sk);
740 } else
741 l2cap_chan_del(sk, reason);
742 break;
743
744 case BT_CONNECT2:
745 if (sk->sk_type == SOCK_SEQPACKET ||
746 sk->sk_type == SOCK_STREAM) {
747 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
748 struct l2cap_conn_rsp rsp;
749 __u16 result;
750
751 if (bt_sk(sk)->defer_setup)
752 result = L2CAP_CR_SEC_BLOCK;
753 else
754 result = L2CAP_CR_BAD_PSM;
755
756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
758 rsp.result = cpu_to_le16(result);
759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
760 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
762 } else
763 l2cap_chan_del(sk, reason);
764 break;
765
766 case BT_CONNECT:
767 case BT_DISCONN:
768 l2cap_chan_del(sk, reason);
769 break;
770
771 default:
772 sock_set_flag(sk, SOCK_ZAPPED);
773 break;
774 }
775 }
776
777 /* Must be called on unlocked socket. */
778 static void l2cap_sock_close(struct sock *sk)
779 {
780 l2cap_sock_clear_timer(sk);
781 lock_sock(sk);
782 __l2cap_sock_close(sk, ECONNRESET);
783 release_sock(sk);
784 l2cap_sock_kill(sk);
785 }
786
787 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
788 {
789 struct l2cap_pinfo *pi = l2cap_pi(sk);
790
791 BT_DBG("sk %p", sk);
792
793 if (parent) {
794 sk->sk_type = parent->sk_type;
795 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
796
797 pi->imtu = l2cap_pi(parent)->imtu;
798 pi->omtu = l2cap_pi(parent)->omtu;
799 pi->mode = l2cap_pi(parent)->mode;
800 pi->fcs = l2cap_pi(parent)->fcs;
801 pi->max_tx = l2cap_pi(parent)->max_tx;
802 pi->tx_win = l2cap_pi(parent)->tx_win;
803 pi->sec_level = l2cap_pi(parent)->sec_level;
804 pi->role_switch = l2cap_pi(parent)->role_switch;
805 pi->force_reliable = l2cap_pi(parent)->force_reliable;
806 } else {
807 pi->imtu = L2CAP_DEFAULT_MTU;
808 pi->omtu = 0;
809 if (enable_ertm && sk->sk_type == SOCK_STREAM)
810 pi->mode = L2CAP_MODE_ERTM;
811 else
812 pi->mode = L2CAP_MODE_BASIC;
813 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
814 pi->fcs = L2CAP_FCS_CRC16;
815 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
816 pi->sec_level = BT_SECURITY_LOW;
817 pi->role_switch = 0;
818 pi->force_reliable = 0;
819 }
820
821 /* Default config options */
822 pi->conf_len = 0;
823 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
824 skb_queue_head_init(TX_QUEUE(sk));
825 skb_queue_head_init(SREJ_QUEUE(sk));
826 skb_queue_head_init(BUSY_QUEUE(sk));
827 INIT_LIST_HEAD(SREJ_LIST(sk));
828 }
829
830 static struct proto l2cap_proto = {
831 .name = "L2CAP",
832 .owner = THIS_MODULE,
833 .obj_size = sizeof(struct l2cap_pinfo)
834 };
835
836 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
837 {
838 struct sock *sk;
839
840 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
841 if (!sk)
842 return NULL;
843
844 sock_init_data(sock, sk);
845 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
846
847 sk->sk_destruct = l2cap_sock_destruct;
848 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
849
850 sock_reset_flag(sk, SOCK_ZAPPED);
851
852 sk->sk_protocol = proto;
853 sk->sk_state = BT_OPEN;
854
855 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
856
857 bt_sock_link(&l2cap_sk_list, sk);
858 return sk;
859 }
860
861 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
862 int kern)
863 {
864 struct sock *sk;
865
866 BT_DBG("sock %p", sock);
867
868 sock->state = SS_UNCONNECTED;
869
870 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
871 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
872 return -ESOCKTNOSUPPORT;
873
874 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
875 return -EPERM;
876
877 sock->ops = &l2cap_sock_ops;
878
879 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
880 if (!sk)
881 return -ENOMEM;
882
883 l2cap_sock_init(sk, NULL);
884 return 0;
885 }
886
887 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
888 {
889 struct sock *sk = sock->sk;
890 struct sockaddr_l2 la;
891 int len, err = 0;
892
893 BT_DBG("sk %p", sk);
894
895 if (!addr || addr->sa_family != AF_BLUETOOTH)
896 return -EINVAL;
897
898 memset(&la, 0, sizeof(la));
899 len = min_t(unsigned int, sizeof(la), alen);
900 memcpy(&la, addr, len);
901
902 if (la.l2_cid)
903 return -EINVAL;
904
905 lock_sock(sk);
906
907 if (sk->sk_state != BT_OPEN) {
908 err = -EBADFD;
909 goto done;
910 }
911
912 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
913 !capable(CAP_NET_BIND_SERVICE)) {
914 err = -EACCES;
915 goto done;
916 }
917
918 write_lock_bh(&l2cap_sk_list.lock);
919
920 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
921 err = -EADDRINUSE;
922 } else {
923 /* Save source address */
924 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
925 l2cap_pi(sk)->psm = la.l2_psm;
926 l2cap_pi(sk)->sport = la.l2_psm;
927 sk->sk_state = BT_BOUND;
928
929 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
930 __le16_to_cpu(la.l2_psm) == 0x0003)
931 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
932 }
933
934 write_unlock_bh(&l2cap_sk_list.lock);
935
936 done:
937 release_sock(sk);
938 return err;
939 }
940
941 static int l2cap_do_connect(struct sock *sk)
942 {
943 bdaddr_t *src = &bt_sk(sk)->src;
944 bdaddr_t *dst = &bt_sk(sk)->dst;
945 struct l2cap_conn *conn;
946 struct hci_conn *hcon;
947 struct hci_dev *hdev;
948 __u8 auth_type;
949 int err;
950
951 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
952 l2cap_pi(sk)->psm);
953
954 hdev = hci_get_route(dst, src);
955 if (!hdev)
956 return -EHOSTUNREACH;
957
958 hci_dev_lock_bh(hdev);
959
960 err = -ENOMEM;
961
962 if (sk->sk_type == SOCK_RAW) {
963 switch (l2cap_pi(sk)->sec_level) {
964 case BT_SECURITY_HIGH:
965 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
966 break;
967 case BT_SECURITY_MEDIUM:
968 auth_type = HCI_AT_DEDICATED_BONDING;
969 break;
970 default:
971 auth_type = HCI_AT_NO_BONDING;
972 break;
973 }
974 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
975 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
976 auth_type = HCI_AT_NO_BONDING_MITM;
977 else
978 auth_type = HCI_AT_NO_BONDING;
979
980 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
981 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
982 } else {
983 switch (l2cap_pi(sk)->sec_level) {
984 case BT_SECURITY_HIGH:
985 auth_type = HCI_AT_GENERAL_BONDING_MITM;
986 break;
987 case BT_SECURITY_MEDIUM:
988 auth_type = HCI_AT_GENERAL_BONDING;
989 break;
990 default:
991 auth_type = HCI_AT_NO_BONDING;
992 break;
993 }
994 }
995
996 hcon = hci_connect(hdev, ACL_LINK, dst,
997 l2cap_pi(sk)->sec_level, auth_type);
998 if (!hcon)
999 goto done;
1000
1001 conn = l2cap_conn_add(hcon, 0);
1002 if (!conn) {
1003 hci_conn_put(hcon);
1004 goto done;
1005 }
1006
1007 err = 0;
1008
1009 /* Update source addr of the socket */
1010 bacpy(src, conn->src);
1011
1012 l2cap_chan_add(conn, sk, NULL);
1013
1014 sk->sk_state = BT_CONNECT;
1015 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1016
1017 if (hcon->state == BT_CONNECTED) {
1018 if (sk->sk_type != SOCK_SEQPACKET &&
1019 sk->sk_type != SOCK_STREAM) {
1020 l2cap_sock_clear_timer(sk);
1021 sk->sk_state = BT_CONNECTED;
1022 } else
1023 l2cap_do_start(sk);
1024 }
1025
1026 done:
1027 hci_dev_unlock_bh(hdev);
1028 hci_dev_put(hdev);
1029 return err;
1030 }
1031
1032 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1033 {
1034 struct sock *sk = sock->sk;
1035 struct sockaddr_l2 la;
1036 int len, err = 0;
1037
1038 BT_DBG("sk %p", sk);
1039
1040 if (!addr || alen < sizeof(addr->sa_family) ||
1041 addr->sa_family != AF_BLUETOOTH)
1042 return -EINVAL;
1043
1044 memset(&la, 0, sizeof(la));
1045 len = min_t(unsigned int, sizeof(la), alen);
1046 memcpy(&la, addr, len);
1047
1048 if (la.l2_cid)
1049 return -EINVAL;
1050
1051 lock_sock(sk);
1052
1053 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1054 && !la.l2_psm) {
1055 err = -EINVAL;
1056 goto done;
1057 }
1058
1059 switch (l2cap_pi(sk)->mode) {
1060 case L2CAP_MODE_BASIC:
1061 break;
1062 case L2CAP_MODE_ERTM:
1063 case L2CAP_MODE_STREAMING:
1064 if (enable_ertm)
1065 break;
1066 /* fall through */
1067 default:
1068 err = -ENOTSUPP;
1069 goto done;
1070 }
1071
1072 switch (sk->sk_state) {
1073 case BT_CONNECT:
1074 case BT_CONNECT2:
1075 case BT_CONFIG:
1076 /* Already connecting */
1077 goto wait;
1078
1079 case BT_CONNECTED:
1080 /* Already connected */
1081 goto done;
1082
1083 case BT_OPEN:
1084 case BT_BOUND:
1085 /* Can connect */
1086 break;
1087
1088 default:
1089 err = -EBADFD;
1090 goto done;
1091 }
1092
1093 /* Set destination address and psm */
1094 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1095 l2cap_pi(sk)->psm = la.l2_psm;
1096
1097 err = l2cap_do_connect(sk);
1098 if (err)
1099 goto done;
1100
1101 wait:
1102 err = bt_sock_wait_state(sk, BT_CONNECTED,
1103 sock_sndtimeo(sk, flags & O_NONBLOCK));
1104 done:
1105 release_sock(sk);
1106 return err;
1107 }
1108
1109 static int l2cap_sock_listen(struct socket *sock, int backlog)
1110 {
1111 struct sock *sk = sock->sk;
1112 int err = 0;
1113
1114 BT_DBG("sk %p backlog %d", sk, backlog);
1115
1116 lock_sock(sk);
1117
1118 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1119 || sk->sk_state != BT_BOUND) {
1120 err = -EBADFD;
1121 goto done;
1122 }
1123
1124 switch (l2cap_pi(sk)->mode) {
1125 case L2CAP_MODE_BASIC:
1126 break;
1127 case L2CAP_MODE_ERTM:
1128 case L2CAP_MODE_STREAMING:
1129 if (enable_ertm)
1130 break;
1131 /* fall through */
1132 default:
1133 err = -ENOTSUPP;
1134 goto done;
1135 }
1136
1137 if (!l2cap_pi(sk)->psm) {
1138 bdaddr_t *src = &bt_sk(sk)->src;
1139 u16 psm;
1140
1141 err = -EINVAL;
1142
1143 write_lock_bh(&l2cap_sk_list.lock);
1144
1145 for (psm = 0x1001; psm < 0x1100; psm += 2)
1146 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1147 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1148 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1149 err = 0;
1150 break;
1151 }
1152
1153 write_unlock_bh(&l2cap_sk_list.lock);
1154
1155 if (err < 0)
1156 goto done;
1157 }
1158
1159 sk->sk_max_ack_backlog = backlog;
1160 sk->sk_ack_backlog = 0;
1161 sk->sk_state = BT_LISTEN;
1162
1163 done:
1164 release_sock(sk);
1165 return err;
1166 }
1167
1168 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1169 {
1170 DECLARE_WAITQUEUE(wait, current);
1171 struct sock *sk = sock->sk, *nsk;
1172 long timeo;
1173 int err = 0;
1174
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1176
1177 if (sk->sk_state != BT_LISTEN) {
1178 err = -EBADFD;
1179 goto done;
1180 }
1181
1182 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1183
1184 BT_DBG("sk %p timeo %ld", sk, timeo);
1185
1186 /* Wait for an incoming connection. (wake-one). */
1187 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1188 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1189 set_current_state(TASK_INTERRUPTIBLE);
1190 if (!timeo) {
1191 err = -EAGAIN;
1192 break;
1193 }
1194
1195 release_sock(sk);
1196 timeo = schedule_timeout(timeo);
1197 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1198
1199 if (sk->sk_state != BT_LISTEN) {
1200 err = -EBADFD;
1201 break;
1202 }
1203
1204 if (signal_pending(current)) {
1205 err = sock_intr_errno(timeo);
1206 break;
1207 }
1208 }
1209 set_current_state(TASK_RUNNING);
1210 remove_wait_queue(sk_sleep(sk), &wait);
1211
1212 if (err)
1213 goto done;
1214
1215 newsock->state = SS_CONNECTED;
1216
1217 BT_DBG("new socket %p", nsk);
1218
1219 done:
1220 release_sock(sk);
1221 return err;
1222 }
1223
1224 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1225 {
1226 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1227 struct sock *sk = sock->sk;
1228
1229 BT_DBG("sock %p, sk %p", sock, sk);
1230
1231 addr->sa_family = AF_BLUETOOTH;
1232 *len = sizeof(struct sockaddr_l2);
1233
1234 if (peer) {
1235 la->l2_psm = l2cap_pi(sk)->psm;
1236 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1237 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1238 } else {
1239 la->l2_psm = l2cap_pi(sk)->sport;
1240 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1241 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1242 }
1243
1244 return 0;
1245 }
1246
1247 static int __l2cap_wait_ack(struct sock *sk)
1248 {
1249 DECLARE_WAITQUEUE(wait, current);
1250 int err = 0;
1251 int timeo = HZ/5;
1252
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1255 set_current_state(TASK_INTERRUPTIBLE);
1256
1257 if (!timeo)
1258 timeo = HZ/5;
1259
1260 if (signal_pending(current)) {
1261 err = sock_intr_errno(timeo);
1262 break;
1263 }
1264
1265 release_sock(sk);
1266 timeo = schedule_timeout(timeo);
1267 lock_sock(sk);
1268
1269 err = sock_error(sk);
1270 if (err)
1271 break;
1272 }
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1275 return err;
1276 }
1277
1278 static void l2cap_monitor_timeout(unsigned long arg)
1279 {
1280 struct sock *sk = (void *) arg;
1281
1282 bh_lock_sock(sk);
1283 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1284 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1285 bh_unlock_sock(sk);
1286 return;
1287 }
1288
1289 l2cap_pi(sk)->retry_count++;
1290 __mod_monitor_timer();
1291
1292 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1293 bh_unlock_sock(sk);
1294 }
1295
1296 static void l2cap_retrans_timeout(unsigned long arg)
1297 {
1298 struct sock *sk = (void *) arg;
1299
1300 bh_lock_sock(sk);
1301 l2cap_pi(sk)->retry_count = 1;
1302 __mod_monitor_timer();
1303
1304 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1305
1306 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1307 bh_unlock_sock(sk);
1308 }
1309
1310 static void l2cap_drop_acked_frames(struct sock *sk)
1311 {
1312 struct sk_buff *skb;
1313
1314 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1315 l2cap_pi(sk)->unacked_frames) {
1316 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1317 break;
1318
1319 skb = skb_dequeue(TX_QUEUE(sk));
1320 kfree_skb(skb);
1321
1322 l2cap_pi(sk)->unacked_frames--;
1323 }
1324
1325 if (!l2cap_pi(sk)->unacked_frames)
1326 del_timer(&l2cap_pi(sk)->retrans_timer);
1327 }
1328
1329 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1330 {
1331 struct l2cap_pinfo *pi = l2cap_pi(sk);
1332
1333 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1334
1335 hci_send_acl(pi->conn->hcon, skb, 0);
1336 }
1337
1338 static int l2cap_streaming_send(struct sock *sk)
1339 {
1340 struct sk_buff *skb, *tx_skb;
1341 struct l2cap_pinfo *pi = l2cap_pi(sk);
1342 u16 control, fcs;
1343
1344 while ((skb = sk->sk_send_head)) {
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1346
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1349 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1350
1351 if (pi->fcs == L2CAP_FCS_CRC16) {
1352 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1353 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1354 }
1355
1356 l2cap_do_send(sk, tx_skb);
1357
1358 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1359
1360 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1361 sk->sk_send_head = NULL;
1362 else
1363 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1364
1365 skb = skb_dequeue(TX_QUEUE(sk));
1366 kfree_skb(skb);
1367 }
1368 return 0;
1369 }
1370
1371 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1372 {
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb, *tx_skb;
1375 u16 control, fcs;
1376
1377 skb = skb_peek(TX_QUEUE(sk));
1378 if (!skb)
1379 return;
1380
1381 do {
1382 if (bt_cb(skb)->tx_seq == tx_seq)
1383 break;
1384
1385 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1386 return;
1387
1388 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1389
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1393 return;
1394 }
1395
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1397 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1400 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1401 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1402
1403 if (pi->fcs == L2CAP_FCS_CRC16) {
1404 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1405 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1406 }
1407
1408 l2cap_do_send(sk, tx_skb);
1409 }
1410
1411 static int l2cap_ertm_send(struct sock *sk)
1412 {
1413 struct sk_buff *skb, *tx_skb;
1414 struct l2cap_pinfo *pi = l2cap_pi(sk);
1415 u16 control, fcs;
1416 int nsent = 0;
1417
1418 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1419 return 0;
1420
1421 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1422 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1423
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1427 break;
1428 }
1429
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1431
1432 bt_cb(skb)->retries++;
1433
1434 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1436 control |= L2CAP_CTRL_FINAL;
1437 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1438 }
1439 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1440 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1441 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1442
1443
1444 if (pi->fcs == L2CAP_FCS_CRC16) {
1445 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1446 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1447 }
1448
1449 l2cap_do_send(sk, tx_skb);
1450
1451 __mod_retrans_timer();
1452
1453 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1454 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1455
1456 pi->unacked_frames++;
1457 pi->frames_sent++;
1458
1459 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1460 sk->sk_send_head = NULL;
1461 else
1462 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1463
1464 nsent++;
1465 }
1466
1467 return nsent;
1468 }
1469
1470 static int l2cap_retransmit_frames(struct sock *sk)
1471 {
1472 struct l2cap_pinfo *pi = l2cap_pi(sk);
1473 int ret;
1474
1475 spin_lock_bh(&pi->send_lock);
1476
1477 if (!skb_queue_empty(TX_QUEUE(sk)))
1478 sk->sk_send_head = TX_QUEUE(sk)->next;
1479
1480 pi->next_tx_seq = pi->expected_ack_seq;
1481 ret = l2cap_ertm_send(sk);
1482
1483 spin_unlock_bh(&pi->send_lock);
1484
1485 return ret;
1486 }
1487
1488 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1489 {
1490 struct sock *sk = (struct sock *)pi;
1491 u16 control = 0;
1492 int nframes;
1493
1494 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1495
1496 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1497 control |= L2CAP_SUPER_RCV_NOT_READY;
1498 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1499 l2cap_send_sframe(pi, control);
1500 return;
1501 }
1502
1503 spin_lock_bh(&pi->send_lock);
1504 nframes = l2cap_ertm_send(sk);
1505 spin_unlock_bh(&pi->send_lock);
1506
1507 if (nframes > 0)
1508 return;
1509
1510 control |= L2CAP_SUPER_RCV_READY;
1511 l2cap_send_sframe(pi, control);
1512 }
1513
1514 static void l2cap_send_srejtail(struct sock *sk)
1515 {
1516 struct srej_list *tail;
1517 u16 control;
1518
1519 control = L2CAP_SUPER_SELECT_REJECT;
1520 control |= L2CAP_CTRL_FINAL;
1521
1522 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1523 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1524
1525 l2cap_send_sframe(l2cap_pi(sk), control);
1526 }
1527
1528 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1529 {
1530 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1531 struct sk_buff **frag;
1532 int err, sent = 0;
1533
1534 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1535 return -EFAULT;
1536
1537 sent += count;
1538 len -= count;
1539
1540 /* Continuation fragments (no L2CAP header) */
1541 frag = &skb_shinfo(skb)->frag_list;
1542 while (len) {
1543 count = min_t(unsigned int, conn->mtu, len);
1544
1545 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (!*frag)
1547 return -EFAULT;
1548 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1549 return -EFAULT;
1550
1551 sent += count;
1552 len -= count;
1553
1554 frag = &(*frag)->next;
1555 }
1556
1557 return sent;
1558 }
1559
1560 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1561 {
1562 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1563 struct sk_buff *skb;
1564 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1565 struct l2cap_hdr *lh;
1566
1567 BT_DBG("sk %p len %d", sk, (int)len);
1568
1569 count = min_t(unsigned int, (conn->mtu - hlen), len);
1570 skb = bt_skb_send_alloc(sk, count + hlen,
1571 msg->msg_flags & MSG_DONTWAIT, &err);
1572 if (!skb)
1573 return ERR_PTR(-ENOMEM);
1574
1575 /* Create L2CAP header */
1576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1577 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1578 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1579 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1580
1581 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1582 if (unlikely(err < 0)) {
1583 kfree_skb(skb);
1584 return ERR_PTR(err);
1585 }
1586 return skb;
1587 }
1588
1589 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1590 {
1591 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen = L2CAP_HDR_SIZE;
1594 struct l2cap_hdr *lh;
1595
1596 BT_DBG("sk %p len %d", sk, (int)len);
1597
1598 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599 skb = bt_skb_send_alloc(sk, count + hlen,
1600 msg->msg_flags & MSG_DONTWAIT, &err);
1601 if (!skb)
1602 return ERR_PTR(-ENOMEM);
1603
1604 /* Create L2CAP header */
1605 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1606 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1607 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1608
1609 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1610 if (unlikely(err < 0)) {
1611 kfree_skb(skb);
1612 return ERR_PTR(err);
1613 }
1614 return skb;
1615 }
1616
1617 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1618 {
1619 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1620 struct sk_buff *skb;
1621 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1622 struct l2cap_hdr *lh;
1623
1624 BT_DBG("sk %p len %d", sk, (int)len);
1625
1626 if (!conn)
1627 return ERR_PTR(-ENOTCONN);
1628
1629 if (sdulen)
1630 hlen += 2;
1631
1632 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1633 hlen += 2;
1634
1635 count = min_t(unsigned int, (conn->mtu - hlen), len);
1636 skb = bt_skb_send_alloc(sk, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1638 if (!skb)
1639 return ERR_PTR(-ENOMEM);
1640
1641 /* Create L2CAP header */
1642 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1643 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1645 put_unaligned_le16(control, skb_put(skb, 2));
1646 if (sdulen)
1647 put_unaligned_le16(sdulen, skb_put(skb, 2));
1648
1649 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1650 if (unlikely(err < 0)) {
1651 kfree_skb(skb);
1652 return ERR_PTR(err);
1653 }
1654
1655 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1656 put_unaligned_le16(0, skb_put(skb, 2));
1657
1658 bt_cb(skb)->retries = 0;
1659 return skb;
1660 }
1661
1662 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1663 {
1664 struct l2cap_pinfo *pi = l2cap_pi(sk);
1665 struct sk_buff *skb;
1666 struct sk_buff_head sar_queue;
1667 u16 control;
1668 size_t size = 0;
1669
1670 skb_queue_head_init(&sar_queue);
1671 control = L2CAP_SDU_START;
1672 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1673 if (IS_ERR(skb))
1674 return PTR_ERR(skb);
1675
1676 __skb_queue_tail(&sar_queue, skb);
1677 len -= pi->remote_mps;
1678 size += pi->remote_mps;
1679
1680 while (len > 0) {
1681 size_t buflen;
1682
1683 if (len > pi->remote_mps) {
1684 control = L2CAP_SDU_CONTINUE;
1685 buflen = pi->remote_mps;
1686 } else {
1687 control = L2CAP_SDU_END;
1688 buflen = len;
1689 }
1690
1691 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1692 if (IS_ERR(skb)) {
1693 skb_queue_purge(&sar_queue);
1694 return PTR_ERR(skb);
1695 }
1696
1697 __skb_queue_tail(&sar_queue, skb);
1698 len -= buflen;
1699 size += buflen;
1700 }
1701 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1702 spin_lock_bh(&pi->send_lock);
1703 if (sk->sk_send_head == NULL)
1704 sk->sk_send_head = sar_queue.next;
1705 spin_unlock_bh(&pi->send_lock);
1706
1707 return size;
1708 }
1709
1710 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1711 {
1712 struct sock *sk = sock->sk;
1713 struct l2cap_pinfo *pi = l2cap_pi(sk);
1714 struct sk_buff *skb;
1715 u16 control;
1716 int err;
1717
1718 BT_DBG("sock %p, sk %p", sock, sk);
1719
1720 err = sock_error(sk);
1721 if (err)
1722 return err;
1723
1724 if (msg->msg_flags & MSG_OOB)
1725 return -EOPNOTSUPP;
1726
1727 lock_sock(sk);
1728
1729 if (sk->sk_state != BT_CONNECTED) {
1730 err = -ENOTCONN;
1731 goto done;
1732 }
1733
1734 /* Connectionless channel */
1735 if (sk->sk_type == SOCK_DGRAM) {
1736 skb = l2cap_create_connless_pdu(sk, msg, len);
1737 if (IS_ERR(skb)) {
1738 err = PTR_ERR(skb);
1739 } else {
1740 l2cap_do_send(sk, skb);
1741 err = len;
1742 }
1743 goto done;
1744 }
1745
1746 switch (pi->mode) {
1747 case L2CAP_MODE_BASIC:
1748 /* Check outgoing MTU */
1749 if (len > pi->omtu) {
1750 err = -EINVAL;
1751 goto done;
1752 }
1753
1754 /* Create a basic PDU */
1755 skb = l2cap_create_basic_pdu(sk, msg, len);
1756 if (IS_ERR(skb)) {
1757 err = PTR_ERR(skb);
1758 goto done;
1759 }
1760
1761 l2cap_do_send(sk, skb);
1762 err = len;
1763 break;
1764
1765 case L2CAP_MODE_ERTM:
1766 case L2CAP_MODE_STREAMING:
1767 /* Entire SDU fits into one PDU */
1768 if (len <= pi->remote_mps) {
1769 control = L2CAP_SDU_UNSEGMENTED;
1770 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1771 if (IS_ERR(skb)) {
1772 err = PTR_ERR(skb);
1773 goto done;
1774 }
1775 __skb_queue_tail(TX_QUEUE(sk), skb);
1776
1777 if (pi->mode == L2CAP_MODE_ERTM)
1778 spin_lock_bh(&pi->send_lock);
1779
1780 if (sk->sk_send_head == NULL)
1781 sk->sk_send_head = skb;
1782
1783 if (pi->mode == L2CAP_MODE_ERTM)
1784 spin_unlock_bh(&pi->send_lock);
1785 } else {
1786 /* Segment SDU into multiples PDUs */
1787 err = l2cap_sar_segment_sdu(sk, msg, len);
1788 if (err < 0)
1789 goto done;
1790 }
1791
1792 if (pi->mode == L2CAP_MODE_STREAMING) {
1793 err = l2cap_streaming_send(sk);
1794 } else {
1795 spin_lock_bh(&pi->send_lock);
1796 err = l2cap_ertm_send(sk);
1797 spin_unlock_bh(&pi->send_lock);
1798 }
1799
1800 if (err >= 0)
1801 err = len;
1802 break;
1803
1804 default:
1805 BT_DBG("bad state %1.1x", pi->mode);
1806 err = -EINVAL;
1807 }
1808
1809 done:
1810 release_sock(sk);
1811 return err;
1812 }
1813
1814 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1815 {
1816 struct sock *sk = sock->sk;
1817
1818 lock_sock(sk);
1819
1820 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1821 struct l2cap_conn_rsp rsp;
1822
1823 sk->sk_state = BT_CONFIG;
1824
1825 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1826 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1827 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1828 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1829 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1830 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1831
1832 release_sock(sk);
1833 return 0;
1834 }
1835
1836 release_sock(sk);
1837
1838 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1839 }
1840
1841 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1842 {
1843 struct sock *sk = sock->sk;
1844 struct l2cap_options opts;
1845 int len, err = 0;
1846 u32 opt;
1847
1848 BT_DBG("sk %p", sk);
1849
1850 lock_sock(sk);
1851
1852 switch (optname) {
1853 case L2CAP_OPTIONS:
1854 opts.imtu = l2cap_pi(sk)->imtu;
1855 opts.omtu = l2cap_pi(sk)->omtu;
1856 opts.flush_to = l2cap_pi(sk)->flush_to;
1857 opts.mode = l2cap_pi(sk)->mode;
1858 opts.fcs = l2cap_pi(sk)->fcs;
1859 opts.max_tx = l2cap_pi(sk)->max_tx;
1860 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1861
1862 len = min_t(unsigned int, sizeof(opts), optlen);
1863 if (copy_from_user((char *) &opts, optval, len)) {
1864 err = -EFAULT;
1865 break;
1866 }
1867
1868 l2cap_pi(sk)->mode = opts.mode;
1869 switch (l2cap_pi(sk)->mode) {
1870 case L2CAP_MODE_BASIC:
1871 break;
1872 case L2CAP_MODE_ERTM:
1873 case L2CAP_MODE_STREAMING:
1874 if (enable_ertm)
1875 break;
1876 /* fall through */
1877 default:
1878 err = -EINVAL;
1879 break;
1880 }
1881
1882 l2cap_pi(sk)->imtu = opts.imtu;
1883 l2cap_pi(sk)->omtu = opts.omtu;
1884 l2cap_pi(sk)->fcs = opts.fcs;
1885 l2cap_pi(sk)->max_tx = opts.max_tx;
1886 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1887 break;
1888
1889 case L2CAP_LM:
1890 if (get_user(opt, (u32 __user *) optval)) {
1891 err = -EFAULT;
1892 break;
1893 }
1894
1895 if (opt & L2CAP_LM_AUTH)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1897 if (opt & L2CAP_LM_ENCRYPT)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1899 if (opt & L2CAP_LM_SECURE)
1900 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1901
1902 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1903 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1904 break;
1905
1906 default:
1907 err = -ENOPROTOOPT;
1908 break;
1909 }
1910
1911 release_sock(sk);
1912 return err;
1913 }
1914
1915 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1916 {
1917 struct sock *sk = sock->sk;
1918 struct bt_security sec;
1919 int len, err = 0;
1920 u32 opt;
1921
1922 BT_DBG("sk %p", sk);
1923
1924 if (level == SOL_L2CAP)
1925 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1926
1927 if (level != SOL_BLUETOOTH)
1928 return -ENOPROTOOPT;
1929
1930 lock_sock(sk);
1931
1932 switch (optname) {
1933 case BT_SECURITY:
1934 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1935 && sk->sk_type != SOCK_RAW) {
1936 err = -EINVAL;
1937 break;
1938 }
1939
1940 sec.level = BT_SECURITY_LOW;
1941
1942 len = min_t(unsigned int, sizeof(sec), optlen);
1943 if (copy_from_user((char *) &sec, optval, len)) {
1944 err = -EFAULT;
1945 break;
1946 }
1947
1948 if (sec.level < BT_SECURITY_LOW ||
1949 sec.level > BT_SECURITY_HIGH) {
1950 err = -EINVAL;
1951 break;
1952 }
1953
1954 l2cap_pi(sk)->sec_level = sec.level;
1955 break;
1956
1957 case BT_DEFER_SETUP:
1958 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1959 err = -EINVAL;
1960 break;
1961 }
1962
1963 if (get_user(opt, (u32 __user *) optval)) {
1964 err = -EFAULT;
1965 break;
1966 }
1967
1968 bt_sk(sk)->defer_setup = opt;
1969 break;
1970
1971 default:
1972 err = -ENOPROTOOPT;
1973 break;
1974 }
1975
1976 release_sock(sk);
1977 return err;
1978 }
1979
1980 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1981 {
1982 struct sock *sk = sock->sk;
1983 struct l2cap_options opts;
1984 struct l2cap_conninfo cinfo;
1985 int len, err = 0;
1986 u32 opt;
1987
1988 BT_DBG("sk %p", sk);
1989
1990 if (get_user(len, optlen))
1991 return -EFAULT;
1992
1993 lock_sock(sk);
1994
1995 switch (optname) {
1996 case L2CAP_OPTIONS:
1997 opts.imtu = l2cap_pi(sk)->imtu;
1998 opts.omtu = l2cap_pi(sk)->omtu;
1999 opts.flush_to = l2cap_pi(sk)->flush_to;
2000 opts.mode = l2cap_pi(sk)->mode;
2001 opts.fcs = l2cap_pi(sk)->fcs;
2002 opts.max_tx = l2cap_pi(sk)->max_tx;
2003 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2004
2005 len = min_t(unsigned int, len, sizeof(opts));
2006 if (copy_to_user(optval, (char *) &opts, len))
2007 err = -EFAULT;
2008
2009 break;
2010
2011 case L2CAP_LM:
2012 switch (l2cap_pi(sk)->sec_level) {
2013 case BT_SECURITY_LOW:
2014 opt = L2CAP_LM_AUTH;
2015 break;
2016 case BT_SECURITY_MEDIUM:
2017 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2018 break;
2019 case BT_SECURITY_HIGH:
2020 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2021 L2CAP_LM_SECURE;
2022 break;
2023 default:
2024 opt = 0;
2025 break;
2026 }
2027
2028 if (l2cap_pi(sk)->role_switch)
2029 opt |= L2CAP_LM_MASTER;
2030
2031 if (l2cap_pi(sk)->force_reliable)
2032 opt |= L2CAP_LM_RELIABLE;
2033
2034 if (put_user(opt, (u32 __user *) optval))
2035 err = -EFAULT;
2036 break;
2037
2038 case L2CAP_CONNINFO:
2039 if (sk->sk_state != BT_CONNECTED &&
2040 !(sk->sk_state == BT_CONNECT2 &&
2041 bt_sk(sk)->defer_setup)) {
2042 err = -ENOTCONN;
2043 break;
2044 }
2045
2046 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2047 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2048
2049 len = min_t(unsigned int, len, sizeof(cinfo));
2050 if (copy_to_user(optval, (char *) &cinfo, len))
2051 err = -EFAULT;
2052
2053 break;
2054
2055 default:
2056 err = -ENOPROTOOPT;
2057 break;
2058 }
2059
2060 release_sock(sk);
2061 return err;
2062 }
2063
2064 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2065 {
2066 struct sock *sk = sock->sk;
2067 struct bt_security sec;
2068 int len, err = 0;
2069
2070 BT_DBG("sk %p", sk);
2071
2072 if (level == SOL_L2CAP)
2073 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2074
2075 if (level != SOL_BLUETOOTH)
2076 return -ENOPROTOOPT;
2077
2078 if (get_user(len, optlen))
2079 return -EFAULT;
2080
2081 lock_sock(sk);
2082
2083 switch (optname) {
2084 case BT_SECURITY:
2085 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2086 && sk->sk_type != SOCK_RAW) {
2087 err = -EINVAL;
2088 break;
2089 }
2090
2091 sec.level = l2cap_pi(sk)->sec_level;
2092
2093 len = min_t(unsigned int, len, sizeof(sec));
2094 if (copy_to_user(optval, (char *) &sec, len))
2095 err = -EFAULT;
2096
2097 break;
2098
2099 case BT_DEFER_SETUP:
2100 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2101 err = -EINVAL;
2102 break;
2103 }
2104
2105 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2106 err = -EFAULT;
2107
2108 break;
2109
2110 default:
2111 err = -ENOPROTOOPT;
2112 break;
2113 }
2114
2115 release_sock(sk);
2116 return err;
2117 }
2118
2119 static int l2cap_sock_shutdown(struct socket *sock, int how)
2120 {
2121 struct sock *sk = sock->sk;
2122 int err = 0;
2123
2124 BT_DBG("sock %p, sk %p", sock, sk);
2125
2126 if (!sk)
2127 return 0;
2128
2129 lock_sock(sk);
2130 if (!sk->sk_shutdown) {
2131 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2132 err = __l2cap_wait_ack(sk);
2133
2134 sk->sk_shutdown = SHUTDOWN_MASK;
2135 l2cap_sock_clear_timer(sk);
2136 __l2cap_sock_close(sk, 0);
2137
2138 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2139 err = bt_sock_wait_state(sk, BT_CLOSED,
2140 sk->sk_lingertime);
2141 }
2142 release_sock(sk);
2143 return err;
2144 }
2145
2146 static int l2cap_sock_release(struct socket *sock)
2147 {
2148 struct sock *sk = sock->sk;
2149 int err;
2150
2151 BT_DBG("sock %p, sk %p", sock, sk);
2152
2153 if (!sk)
2154 return 0;
2155
2156 err = l2cap_sock_shutdown(sock, 2);
2157
2158 sock_orphan(sk);
2159 l2cap_sock_kill(sk);
2160 return err;
2161 }
2162
2163 static void l2cap_chan_ready(struct sock *sk)
2164 {
2165 struct sock *parent = bt_sk(sk)->parent;
2166
2167 BT_DBG("sk %p, parent %p", sk, parent);
2168
2169 l2cap_pi(sk)->conf_state = 0;
2170 l2cap_sock_clear_timer(sk);
2171
2172 if (!parent) {
2173 /* Outgoing channel.
2174 * Wake up socket sleeping on connect.
2175 */
2176 sk->sk_state = BT_CONNECTED;
2177 sk->sk_state_change(sk);
2178 } else {
2179 /* Incoming channel.
2180 * Wake up socket sleeping on accept.
2181 */
2182 parent->sk_data_ready(parent, 0);
2183 }
2184 }
2185
2186 /* Copy frame to all raw sockets on that connection */
2187 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2188 {
2189 struct l2cap_chan_list *l = &conn->chan_list;
2190 struct sk_buff *nskb;
2191 struct sock *sk;
2192
2193 BT_DBG("conn %p", conn);
2194
2195 read_lock(&l->lock);
2196 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2197 if (sk->sk_type != SOCK_RAW)
2198 continue;
2199
2200 /* Don't send frame to the socket it came from */
2201 if (skb->sk == sk)
2202 continue;
2203 nskb = skb_clone(skb, GFP_ATOMIC);
2204 if (!nskb)
2205 continue;
2206
2207 if (sock_queue_rcv_skb(sk, nskb))
2208 kfree_skb(nskb);
2209 }
2210 read_unlock(&l->lock);
2211 }
2212
2213 /* ---- L2CAP signalling commands ---- */
2214 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2215 u8 code, u8 ident, u16 dlen, void *data)
2216 {
2217 struct sk_buff *skb, **frag;
2218 struct l2cap_cmd_hdr *cmd;
2219 struct l2cap_hdr *lh;
2220 int len, count;
2221
2222 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2223 conn, code, ident, dlen);
2224
2225 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2226 count = min_t(unsigned int, conn->mtu, len);
2227
2228 skb = bt_skb_alloc(count, GFP_ATOMIC);
2229 if (!skb)
2230 return NULL;
2231
2232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2233 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2234 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2235
2236 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2237 cmd->code = code;
2238 cmd->ident = ident;
2239 cmd->len = cpu_to_le16(dlen);
2240
2241 if (dlen) {
2242 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2243 memcpy(skb_put(skb, count), data, count);
2244 data += count;
2245 }
2246
2247 len -= skb->len;
2248
2249 /* Continuation fragments (no L2CAP header) */
2250 frag = &skb_shinfo(skb)->frag_list;
2251 while (len) {
2252 count = min_t(unsigned int, conn->mtu, len);
2253
2254 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2255 if (!*frag)
2256 goto fail;
2257
2258 memcpy(skb_put(*frag, count), data, count);
2259
2260 len -= count;
2261 data += count;
2262
2263 frag = &(*frag)->next;
2264 }
2265
2266 return skb;
2267
2268 fail:
2269 kfree_skb(skb);
2270 return NULL;
2271 }
2272
2273 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2274 {
2275 struct l2cap_conf_opt *opt = *ptr;
2276 int len;
2277
2278 len = L2CAP_CONF_OPT_SIZE + opt->len;
2279 *ptr += len;
2280
2281 *type = opt->type;
2282 *olen = opt->len;
2283
2284 switch (opt->len) {
2285 case 1:
2286 *val = *((u8 *) opt->val);
2287 break;
2288
2289 case 2:
2290 *val = __le16_to_cpu(*((__le16 *) opt->val));
2291 break;
2292
2293 case 4:
2294 *val = __le32_to_cpu(*((__le32 *) opt->val));
2295 break;
2296
2297 default:
2298 *val = (unsigned long) opt->val;
2299 break;
2300 }
2301
2302 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2303 return len;
2304 }
2305
2306 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2307 {
2308 struct l2cap_conf_opt *opt = *ptr;
2309
2310 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2311
2312 opt->type = type;
2313 opt->len = len;
2314
2315 switch (len) {
2316 case 1:
2317 *((u8 *) opt->val) = val;
2318 break;
2319
2320 case 2:
2321 *((__le16 *) opt->val) = cpu_to_le16(val);
2322 break;
2323
2324 case 4:
2325 *((__le32 *) opt->val) = cpu_to_le32(val);
2326 break;
2327
2328 default:
2329 memcpy(opt->val, (void *) val, len);
2330 break;
2331 }
2332
2333 *ptr += L2CAP_CONF_OPT_SIZE + len;
2334 }
2335
2336 static void l2cap_ack_timeout(unsigned long arg)
2337 {
2338 struct sock *sk = (void *) arg;
2339
2340 bh_lock_sock(sk);
2341 l2cap_send_ack(l2cap_pi(sk));
2342 bh_unlock_sock(sk);
2343 }
2344
2345 static inline void l2cap_ertm_init(struct sock *sk)
2346 {
2347 l2cap_pi(sk)->expected_ack_seq = 0;
2348 l2cap_pi(sk)->unacked_frames = 0;
2349 l2cap_pi(sk)->buffer_seq = 0;
2350 l2cap_pi(sk)->num_acked = 0;
2351 l2cap_pi(sk)->frames_sent = 0;
2352
2353 setup_timer(&l2cap_pi(sk)->retrans_timer,
2354 l2cap_retrans_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->monitor_timer,
2356 l2cap_monitor_timeout, (unsigned long) sk);
2357 setup_timer(&l2cap_pi(sk)->ack_timer,
2358 l2cap_ack_timeout, (unsigned long) sk);
2359
2360 __skb_queue_head_init(SREJ_QUEUE(sk));
2361 __skb_queue_head_init(BUSY_QUEUE(sk));
2362 spin_lock_init(&l2cap_pi(sk)->send_lock);
2363
2364 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2365 }
2366
2367 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2368 {
2369 u32 local_feat_mask = l2cap_feat_mask;
2370 if (enable_ertm)
2371 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2372
2373 switch (mode) {
2374 case L2CAP_MODE_ERTM:
2375 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2376 case L2CAP_MODE_STREAMING:
2377 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2378 default:
2379 return 0x00;
2380 }
2381 }
2382
2383 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2384 {
2385 switch (mode) {
2386 case L2CAP_MODE_STREAMING:
2387 case L2CAP_MODE_ERTM:
2388 if (l2cap_mode_supported(mode, remote_feat_mask))
2389 return mode;
2390 /* fall through */
2391 default:
2392 return L2CAP_MODE_BASIC;
2393 }
2394 }
2395
2396 static int l2cap_build_conf_req(struct sock *sk, void *data)
2397 {
2398 struct l2cap_pinfo *pi = l2cap_pi(sk);
2399 struct l2cap_conf_req *req = data;
2400 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2401 void *ptr = req->data;
2402
2403 BT_DBG("sk %p", sk);
2404
2405 if (pi->num_conf_req || pi->num_conf_rsp)
2406 goto done;
2407
2408 switch (pi->mode) {
2409 case L2CAP_MODE_STREAMING:
2410 case L2CAP_MODE_ERTM:
2411 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2412 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2413 l2cap_send_disconn_req(pi->conn, sk);
2414 break;
2415 default:
2416 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2417 break;
2418 }
2419
2420 done:
2421 switch (pi->mode) {
2422 case L2CAP_MODE_BASIC:
2423 if (pi->imtu != L2CAP_DEFAULT_MTU)
2424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2425 break;
2426
2427 case L2CAP_MODE_ERTM:
2428 rfc.mode = L2CAP_MODE_ERTM;
2429 rfc.txwin_size = pi->tx_win;
2430 rfc.max_transmit = pi->max_tx;
2431 rfc.retrans_timeout = 0;
2432 rfc.monitor_timeout = 0;
2433 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2434 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2435 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2436
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2438 sizeof(rfc), (unsigned long) &rfc);
2439
2440 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2441 break;
2442
2443 if (pi->fcs == L2CAP_FCS_NONE ||
2444 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2445 pi->fcs = L2CAP_FCS_NONE;
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2447 }
2448 break;
2449
2450 case L2CAP_MODE_STREAMING:
2451 rfc.mode = L2CAP_MODE_STREAMING;
2452 rfc.txwin_size = 0;
2453 rfc.max_transmit = 0;
2454 rfc.retrans_timeout = 0;
2455 rfc.monitor_timeout = 0;
2456 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2457 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2458 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2459
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2461 sizeof(rfc), (unsigned long) &rfc);
2462
2463 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2464 break;
2465
2466 if (pi->fcs == L2CAP_FCS_NONE ||
2467 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2468 pi->fcs = L2CAP_FCS_NONE;
2469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2470 }
2471 break;
2472 }
2473
2474 /* FIXME: Need actual value of the flush timeout */
2475 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2476 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2477
2478 req->dcid = cpu_to_le16(pi->dcid);
2479 req->flags = cpu_to_le16(0);
2480
2481 return ptr - data;
2482 }
2483
2484 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2485 {
2486 struct l2cap_pinfo *pi = l2cap_pi(sk);
2487 struct l2cap_conf_rsp *rsp = data;
2488 void *ptr = rsp->data;
2489 void *req = pi->conf_req;
2490 int len = pi->conf_len;
2491 int type, hint, olen;
2492 unsigned long val;
2493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2494 u16 mtu = L2CAP_DEFAULT_MTU;
2495 u16 result = L2CAP_CONF_SUCCESS;
2496
2497 BT_DBG("sk %p", sk);
2498
2499 while (len >= L2CAP_CONF_OPT_SIZE) {
2500 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2501
2502 hint = type & L2CAP_CONF_HINT;
2503 type &= L2CAP_CONF_MASK;
2504
2505 switch (type) {
2506 case L2CAP_CONF_MTU:
2507 mtu = val;
2508 break;
2509
2510 case L2CAP_CONF_FLUSH_TO:
2511 pi->flush_to = val;
2512 break;
2513
2514 case L2CAP_CONF_QOS:
2515 break;
2516
2517 case L2CAP_CONF_RFC:
2518 if (olen == sizeof(rfc))
2519 memcpy(&rfc, (void *) val, olen);
2520 break;
2521
2522 case L2CAP_CONF_FCS:
2523 if (val == L2CAP_FCS_NONE)
2524 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2525
2526 break;
2527
2528 default:
2529 if (hint)
2530 break;
2531
2532 result = L2CAP_CONF_UNKNOWN;
2533 *((u8 *) ptr++) = type;
2534 break;
2535 }
2536 }
2537
2538 if (pi->num_conf_rsp || pi->num_conf_req)
2539 goto done;
2540
2541 switch (pi->mode) {
2542 case L2CAP_MODE_STREAMING:
2543 case L2CAP_MODE_ERTM:
2544 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2545 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2546 return -ECONNREFUSED;
2547 break;
2548 default:
2549 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2550 break;
2551 }
2552
2553 done:
2554 if (pi->mode != rfc.mode) {
2555 result = L2CAP_CONF_UNACCEPT;
2556 rfc.mode = pi->mode;
2557
2558 if (pi->num_conf_rsp == 1)
2559 return -ECONNREFUSED;
2560
2561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2562 sizeof(rfc), (unsigned long) &rfc);
2563 }
2564
2565
2566 if (result == L2CAP_CONF_SUCCESS) {
2567 /* Configure output options and let the other side know
2568 * which ones we don't like. */
2569
2570 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2571 result = L2CAP_CONF_UNACCEPT;
2572 else {
2573 pi->omtu = mtu;
2574 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2575 }
2576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2577
2578 switch (rfc.mode) {
2579 case L2CAP_MODE_BASIC:
2580 pi->fcs = L2CAP_FCS_NONE;
2581 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2582 break;
2583
2584 case L2CAP_MODE_ERTM:
2585 pi->remote_tx_win = rfc.txwin_size;
2586 pi->remote_max_tx = rfc.max_transmit;
2587 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2588 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2589
2590 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2591
2592 rfc.retrans_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2594 rfc.monitor_timeout =
2595 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2596
2597 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2598
2599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2600 sizeof(rfc), (unsigned long) &rfc);
2601
2602 break;
2603
2604 case L2CAP_MODE_STREAMING:
2605 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2606 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2607
2608 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2609
2610 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2611
2612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2613 sizeof(rfc), (unsigned long) &rfc);
2614
2615 break;
2616
2617 default:
2618 result = L2CAP_CONF_UNACCEPT;
2619
2620 memset(&rfc, 0, sizeof(rfc));
2621 rfc.mode = pi->mode;
2622 }
2623
2624 if (result == L2CAP_CONF_SUCCESS)
2625 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2626 }
2627 rsp->scid = cpu_to_le16(pi->dcid);
2628 rsp->result = cpu_to_le16(result);
2629 rsp->flags = cpu_to_le16(0x0000);
2630
2631 return ptr - data;
2632 }
2633
2634 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2635 {
2636 struct l2cap_pinfo *pi = l2cap_pi(sk);
2637 struct l2cap_conf_req *req = data;
2638 void *ptr = req->data;
2639 int type, olen;
2640 unsigned long val;
2641 struct l2cap_conf_rfc rfc;
2642
2643 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2644
2645 while (len >= L2CAP_CONF_OPT_SIZE) {
2646 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2647
2648 switch (type) {
2649 case L2CAP_CONF_MTU:
2650 if (val < L2CAP_DEFAULT_MIN_MTU) {
2651 *result = L2CAP_CONF_UNACCEPT;
2652 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2653 } else
2654 pi->omtu = val;
2655 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2656 break;
2657
2658 case L2CAP_CONF_FLUSH_TO:
2659 pi->flush_to = val;
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2661 2, pi->flush_to);
2662 break;
2663
2664 case L2CAP_CONF_RFC:
2665 if (olen == sizeof(rfc))
2666 memcpy(&rfc, (void *)val, olen);
2667
2668 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2669 rfc.mode != pi->mode)
2670 return -ECONNREFUSED;
2671
2672 pi->mode = rfc.mode;
2673 pi->fcs = 0;
2674
2675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2676 sizeof(rfc), (unsigned long) &rfc);
2677 break;
2678 }
2679 }
2680
2681 if (*result == L2CAP_CONF_SUCCESS) {
2682 switch (rfc.mode) {
2683 case L2CAP_MODE_ERTM:
2684 pi->remote_tx_win = rfc.txwin_size;
2685 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2686 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2687 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2688 break;
2689 case L2CAP_MODE_STREAMING:
2690 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2691 }
2692 }
2693
2694 req->dcid = cpu_to_le16(pi->dcid);
2695 req->flags = cpu_to_le16(0x0000);
2696
2697 return ptr - data;
2698 }
2699
2700 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2701 {
2702 struct l2cap_conf_rsp *rsp = data;
2703 void *ptr = rsp->data;
2704
2705 BT_DBG("sk %p", sk);
2706
2707 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2708 rsp->result = cpu_to_le16(result);
2709 rsp->flags = cpu_to_le16(flags);
2710
2711 return ptr - data;
2712 }
2713
2714 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2715 {
2716 struct l2cap_pinfo *pi = l2cap_pi(sk);
2717 int type, olen;
2718 unsigned long val;
2719 struct l2cap_conf_rfc rfc;
2720
2721 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2722
2723 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2724 return;
2725
2726 while (len >= L2CAP_CONF_OPT_SIZE) {
2727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2728
2729 switch (type) {
2730 case L2CAP_CONF_RFC:
2731 if (olen == sizeof(rfc))
2732 memcpy(&rfc, (void *)val, olen);
2733 goto done;
2734 }
2735 }
2736
2737 done:
2738 switch (rfc.mode) {
2739 case L2CAP_MODE_ERTM:
2740 pi->remote_tx_win = rfc.txwin_size;
2741 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2742 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2743 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2744 break;
2745 case L2CAP_MODE_STREAMING:
2746 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2747 }
2748 }
2749
2750 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2751 {
2752 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2753
2754 if (rej->reason != 0x0000)
2755 return 0;
2756
2757 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2758 cmd->ident == conn->info_ident) {
2759 del_timer(&conn->info_timer);
2760
2761 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2762 conn->info_ident = 0;
2763
2764 l2cap_conn_start(conn);
2765 }
2766
2767 return 0;
2768 }
2769
2770 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2771 {
2772 struct l2cap_chan_list *list = &conn->chan_list;
2773 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2774 struct l2cap_conn_rsp rsp;
2775 struct sock *sk, *parent;
2776 int result, status = L2CAP_CS_NO_INFO;
2777
2778 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2779 __le16 psm = req->psm;
2780
2781 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2782
2783 /* Check if we have socket listening on psm */
2784 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2785 if (!parent) {
2786 result = L2CAP_CR_BAD_PSM;
2787 goto sendresp;
2788 }
2789
2790 /* Check if the ACL is secure enough (if not SDP) */
2791 if (psm != cpu_to_le16(0x0001) &&
2792 !hci_conn_check_link_mode(conn->hcon)) {
2793 conn->disc_reason = 0x05;
2794 result = L2CAP_CR_SEC_BLOCK;
2795 goto response;
2796 }
2797
2798 result = L2CAP_CR_NO_MEM;
2799
2800 /* Check for backlog size */
2801 if (sk_acceptq_is_full(parent)) {
2802 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2803 goto response;
2804 }
2805
2806 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2807 if (!sk)
2808 goto response;
2809
2810 write_lock_bh(&list->lock);
2811
2812 /* Check if we already have channel with that dcid */
2813 if (__l2cap_get_chan_by_dcid(list, scid)) {
2814 write_unlock_bh(&list->lock);
2815 sock_set_flag(sk, SOCK_ZAPPED);
2816 l2cap_sock_kill(sk);
2817 goto response;
2818 }
2819
2820 hci_conn_hold(conn->hcon);
2821
2822 l2cap_sock_init(sk, parent);
2823 bacpy(&bt_sk(sk)->src, conn->src);
2824 bacpy(&bt_sk(sk)->dst, conn->dst);
2825 l2cap_pi(sk)->psm = psm;
2826 l2cap_pi(sk)->dcid = scid;
2827
2828 __l2cap_chan_add(conn, sk, parent);
2829 dcid = l2cap_pi(sk)->scid;
2830
2831 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2832
2833 l2cap_pi(sk)->ident = cmd->ident;
2834
2835 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2836 if (l2cap_check_security(sk)) {
2837 if (bt_sk(sk)->defer_setup) {
2838 sk->sk_state = BT_CONNECT2;
2839 result = L2CAP_CR_PEND;
2840 status = L2CAP_CS_AUTHOR_PEND;
2841 parent->sk_data_ready(parent, 0);
2842 } else {
2843 sk->sk_state = BT_CONFIG;
2844 result = L2CAP_CR_SUCCESS;
2845 status = L2CAP_CS_NO_INFO;
2846 }
2847 } else {
2848 sk->sk_state = BT_CONNECT2;
2849 result = L2CAP_CR_PEND;
2850 status = L2CAP_CS_AUTHEN_PEND;
2851 }
2852 } else {
2853 sk->sk_state = BT_CONNECT2;
2854 result = L2CAP_CR_PEND;
2855 status = L2CAP_CS_NO_INFO;
2856 }
2857
2858 write_unlock_bh(&list->lock);
2859
2860 response:
2861 bh_unlock_sock(parent);
2862
2863 sendresp:
2864 rsp.scid = cpu_to_le16(scid);
2865 rsp.dcid = cpu_to_le16(dcid);
2866 rsp.result = cpu_to_le16(result);
2867 rsp.status = cpu_to_le16(status);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2869
2870 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2871 struct l2cap_info_req info;
2872 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2873
2874 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2875 conn->info_ident = l2cap_get_ident(conn);
2876
2877 mod_timer(&conn->info_timer, jiffies +
2878 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2879
2880 l2cap_send_cmd(conn, conn->info_ident,
2881 L2CAP_INFO_REQ, sizeof(info), &info);
2882 }
2883
2884 return 0;
2885 }
2886
2887 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2888 {
2889 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2890 u16 scid, dcid, result, status;
2891 struct sock *sk;
2892 u8 req[128];
2893
2894 scid = __le16_to_cpu(rsp->scid);
2895 dcid = __le16_to_cpu(rsp->dcid);
2896 result = __le16_to_cpu(rsp->result);
2897 status = __le16_to_cpu(rsp->status);
2898
2899 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2900
2901 if (scid) {
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2903 if (!sk)
2904 return 0;
2905 } else {
2906 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2907 if (!sk)
2908 return 0;
2909 }
2910
2911 switch (result) {
2912 case L2CAP_CR_SUCCESS:
2913 sk->sk_state = BT_CONFIG;
2914 l2cap_pi(sk)->ident = 0;
2915 l2cap_pi(sk)->dcid = dcid;
2916 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2917 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2918
2919 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2920 l2cap_build_conf_req(sk, req), req);
2921 l2cap_pi(sk)->num_conf_req++;
2922 break;
2923
2924 case L2CAP_CR_PEND:
2925 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2926 break;
2927
2928 default:
2929 l2cap_chan_del(sk, ECONNREFUSED);
2930 break;
2931 }
2932
2933 bh_unlock_sock(sk);
2934 return 0;
2935 }
2936
2937 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2938 {
2939 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2940 u16 dcid, flags;
2941 u8 rsp[64];
2942 struct sock *sk;
2943 int len;
2944
2945 dcid = __le16_to_cpu(req->dcid);
2946 flags = __le16_to_cpu(req->flags);
2947
2948 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2949
2950 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2951 if (!sk)
2952 return -ENOENT;
2953
2954 if (sk->sk_state == BT_DISCONN)
2955 goto unlock;
2956
2957 /* Reject if config buffer is too small. */
2958 len = cmd_len - sizeof(*req);
2959 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2960 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2961 l2cap_build_conf_rsp(sk, rsp,
2962 L2CAP_CONF_REJECT, flags), rsp);
2963 goto unlock;
2964 }
2965
2966 /* Store config. */
2967 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2968 l2cap_pi(sk)->conf_len += len;
2969
2970 if (flags & 0x0001) {
2971 /* Incomplete config. Send empty response. */
2972 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2973 l2cap_build_conf_rsp(sk, rsp,
2974 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2975 goto unlock;
2976 }
2977
2978 /* Complete config. */
2979 len = l2cap_parse_conf_req(sk, rsp);
2980 if (len < 0) {
2981 l2cap_send_disconn_req(conn, sk);
2982 goto unlock;
2983 }
2984
2985 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2986 l2cap_pi(sk)->num_conf_rsp++;
2987
2988 /* Reset config buffer. */
2989 l2cap_pi(sk)->conf_len = 0;
2990
2991 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2992 goto unlock;
2993
2994 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2995 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2996 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2997 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2998
2999 sk->sk_state = BT_CONNECTED;
3000
3001 l2cap_pi(sk)->next_tx_seq = 0;
3002 l2cap_pi(sk)->expected_tx_seq = 0;
3003 __skb_queue_head_init(TX_QUEUE(sk));
3004 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3005 l2cap_ertm_init(sk);
3006
3007 l2cap_chan_ready(sk);
3008 goto unlock;
3009 }
3010
3011 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3012 u8 buf[64];
3013 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3014 l2cap_build_conf_req(sk, buf), buf);
3015 l2cap_pi(sk)->num_conf_req++;
3016 }
3017
3018 unlock:
3019 bh_unlock_sock(sk);
3020 return 0;
3021 }
3022
3023 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3024 {
3025 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3026 u16 scid, flags, result;
3027 struct sock *sk;
3028 int len = cmd->len - sizeof(*rsp);
3029
3030 scid = __le16_to_cpu(rsp->scid);
3031 flags = __le16_to_cpu(rsp->flags);
3032 result = __le16_to_cpu(rsp->result);
3033
3034 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3035 scid, flags, result);
3036
3037 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3038 if (!sk)
3039 return 0;
3040
3041 switch (result) {
3042 case L2CAP_CONF_SUCCESS:
3043 l2cap_conf_rfc_get(sk, rsp->data, len);
3044 break;
3045
3046 case L2CAP_CONF_UNACCEPT:
3047 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3048 char req[64];
3049
3050 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3051 l2cap_send_disconn_req(conn, sk);
3052 goto done;
3053 }
3054
3055 /* throw out any old stored conf requests */
3056 result = L2CAP_CONF_SUCCESS;
3057 len = l2cap_parse_conf_rsp(sk, rsp->data,
3058 len, req, &result);
3059 if (len < 0) {
3060 l2cap_send_disconn_req(conn, sk);
3061 goto done;
3062 }
3063
3064 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3065 L2CAP_CONF_REQ, len, req);
3066 l2cap_pi(sk)->num_conf_req++;
3067 if (result != L2CAP_CONF_SUCCESS)
3068 goto done;
3069 break;
3070 }
3071
3072 default:
3073 sk->sk_state = BT_DISCONN;
3074 sk->sk_err = ECONNRESET;
3075 l2cap_sock_set_timer(sk, HZ * 5);
3076 l2cap_send_disconn_req(conn, sk);
3077 goto done;
3078 }
3079
3080 if (flags & 0x01)
3081 goto done;
3082
3083 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3084
3085 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3086 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3087 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3088 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3089
3090 sk->sk_state = BT_CONNECTED;
3091 l2cap_pi(sk)->next_tx_seq = 0;
3092 l2cap_pi(sk)->expected_tx_seq = 0;
3093 __skb_queue_head_init(TX_QUEUE(sk));
3094 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3095 l2cap_ertm_init(sk);
3096
3097 l2cap_chan_ready(sk);
3098 }
3099
3100 done:
3101 bh_unlock_sock(sk);
3102 return 0;
3103 }
3104
3105 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 {
3107 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3108 struct l2cap_disconn_rsp rsp;
3109 u16 dcid, scid;
3110 struct sock *sk;
3111
3112 scid = __le16_to_cpu(req->scid);
3113 dcid = __le16_to_cpu(req->dcid);
3114
3115 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3116
3117 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3118 if (!sk)
3119 return 0;
3120
3121 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3122 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3123 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3124
3125 sk->sk_shutdown = SHUTDOWN_MASK;
3126
3127 skb_queue_purge(TX_QUEUE(sk));
3128
3129 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3130 skb_queue_purge(SREJ_QUEUE(sk));
3131 skb_queue_purge(BUSY_QUEUE(sk));
3132 del_timer(&l2cap_pi(sk)->retrans_timer);
3133 del_timer(&l2cap_pi(sk)->monitor_timer);
3134 del_timer(&l2cap_pi(sk)->ack_timer);
3135 }
3136
3137 l2cap_chan_del(sk, ECONNRESET);
3138 bh_unlock_sock(sk);
3139
3140 l2cap_sock_kill(sk);
3141 return 0;
3142 }
3143
3144 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3145 {
3146 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3147 u16 dcid, scid;
3148 struct sock *sk;
3149
3150 scid = __le16_to_cpu(rsp->scid);
3151 dcid = __le16_to_cpu(rsp->dcid);
3152
3153 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3154
3155 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3156 if (!sk)
3157 return 0;
3158
3159 skb_queue_purge(TX_QUEUE(sk));
3160
3161 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3162 skb_queue_purge(SREJ_QUEUE(sk));
3163 skb_queue_purge(BUSY_QUEUE(sk));
3164 del_timer(&l2cap_pi(sk)->retrans_timer);
3165 del_timer(&l2cap_pi(sk)->monitor_timer);
3166 del_timer(&l2cap_pi(sk)->ack_timer);
3167 }
3168
3169 l2cap_chan_del(sk, 0);
3170 bh_unlock_sock(sk);
3171
3172 l2cap_sock_kill(sk);
3173 return 0;
3174 }
3175
3176 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3177 {
3178 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3179 u16 type;
3180
3181 type = __le16_to_cpu(req->type);
3182
3183 BT_DBG("type 0x%4.4x", type);
3184
3185 if (type == L2CAP_IT_FEAT_MASK) {
3186 u8 buf[8];
3187 u32 feat_mask = l2cap_feat_mask;
3188 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3189 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3190 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3191 if (enable_ertm)
3192 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3193 | L2CAP_FEAT_FCS;
3194 put_unaligned_le32(feat_mask, rsp->data);
3195 l2cap_send_cmd(conn, cmd->ident,
3196 L2CAP_INFO_RSP, sizeof(buf), buf);
3197 } else if (type == L2CAP_IT_FIXED_CHAN) {
3198 u8 buf[12];
3199 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3200 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3201 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3202 memcpy(buf + 4, l2cap_fixed_chan, 8);
3203 l2cap_send_cmd(conn, cmd->ident,
3204 L2CAP_INFO_RSP, sizeof(buf), buf);
3205 } else {
3206 struct l2cap_info_rsp rsp;
3207 rsp.type = cpu_to_le16(type);
3208 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3209 l2cap_send_cmd(conn, cmd->ident,
3210 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3211 }
3212
3213 return 0;
3214 }
3215
3216 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3217 {
3218 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3219 u16 type, result;
3220
3221 type = __le16_to_cpu(rsp->type);
3222 result = __le16_to_cpu(rsp->result);
3223
3224 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3225
3226 del_timer(&conn->info_timer);
3227
3228 if (type == L2CAP_IT_FEAT_MASK) {
3229 conn->feat_mask = get_unaligned_le32(rsp->data);
3230
3231 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3232 struct l2cap_info_req req;
3233 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3234
3235 conn->info_ident = l2cap_get_ident(conn);
3236
3237 l2cap_send_cmd(conn, conn->info_ident,
3238 L2CAP_INFO_REQ, sizeof(req), &req);
3239 } else {
3240 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3241 conn->info_ident = 0;
3242
3243 l2cap_conn_start(conn);
3244 }
3245 } else if (type == L2CAP_IT_FIXED_CHAN) {
3246 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3247 conn->info_ident = 0;
3248
3249 l2cap_conn_start(conn);
3250 }
3251
3252 return 0;
3253 }
3254
3255 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3256 {
3257 u8 *data = skb->data;
3258 int len = skb->len;
3259 struct l2cap_cmd_hdr cmd;
3260 int err = 0;
3261
3262 l2cap_raw_recv(conn, skb);
3263
3264 while (len >= L2CAP_CMD_HDR_SIZE) {
3265 u16 cmd_len;
3266 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3267 data += L2CAP_CMD_HDR_SIZE;
3268 len -= L2CAP_CMD_HDR_SIZE;
3269
3270 cmd_len = le16_to_cpu(cmd.len);
3271
3272 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3273
3274 if (cmd_len > len || !cmd.ident) {
3275 BT_DBG("corrupted command");
3276 break;
3277 }
3278
3279 switch (cmd.code) {
3280 case L2CAP_COMMAND_REJ:
3281 l2cap_command_rej(conn, &cmd, data);
3282 break;
3283
3284 case L2CAP_CONN_REQ:
3285 err = l2cap_connect_req(conn, &cmd, data);
3286 break;
3287
3288 case L2CAP_CONN_RSP:
3289 err = l2cap_connect_rsp(conn, &cmd, data);
3290 break;
3291
3292 case L2CAP_CONF_REQ:
3293 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3294 break;
3295
3296 case L2CAP_CONF_RSP:
3297 err = l2cap_config_rsp(conn, &cmd, data);
3298 break;
3299
3300 case L2CAP_DISCONN_REQ:
3301 err = l2cap_disconnect_req(conn, &cmd, data);
3302 break;
3303
3304 case L2CAP_DISCONN_RSP:
3305 err = l2cap_disconnect_rsp(conn, &cmd, data);
3306 break;
3307
3308 case L2CAP_ECHO_REQ:
3309 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3310 break;
3311
3312 case L2CAP_ECHO_RSP:
3313 break;
3314
3315 case L2CAP_INFO_REQ:
3316 err = l2cap_information_req(conn, &cmd, data);
3317 break;
3318
3319 case L2CAP_INFO_RSP:
3320 err = l2cap_information_rsp(conn, &cmd, data);
3321 break;
3322
3323 default:
3324 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3325 err = -EINVAL;
3326 break;
3327 }
3328
3329 if (err) {
3330 struct l2cap_cmd_rej rej;
3331 BT_DBG("error %d", err);
3332
3333 /* FIXME: Map err to a valid reason */
3334 rej.reason = cpu_to_le16(0);
3335 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3336 }
3337
3338 data += cmd_len;
3339 len -= cmd_len;
3340 }
3341
3342 kfree_skb(skb);
3343 }
3344
3345 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3346 {
3347 u16 our_fcs, rcv_fcs;
3348 int hdr_size = L2CAP_HDR_SIZE + 2;
3349
3350 if (pi->fcs == L2CAP_FCS_CRC16) {
3351 skb_trim(skb, skb->len - 2);
3352 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3353 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3354
3355 if (our_fcs != rcv_fcs)
3356 return -EINVAL;
3357 }
3358 return 0;
3359 }
3360
3361 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3362 {
3363 struct l2cap_pinfo *pi = l2cap_pi(sk);
3364 u16 control = 0;
3365
3366 pi->frames_sent = 0;
3367 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3368
3369 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3370
3371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3372 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3373 l2cap_send_sframe(pi, control);
3374 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3375 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3376 }
3377
3378 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3379 __mod_retrans_timer();
3380
3381 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3382
3383 spin_lock_bh(&pi->send_lock);
3384 l2cap_ertm_send(sk);
3385 spin_unlock_bh(&pi->send_lock);
3386
3387 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3388 pi->frames_sent == 0) {
3389 control |= L2CAP_SUPER_RCV_READY;
3390 l2cap_send_sframe(pi, control);
3391 }
3392 }
3393
3394 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3395 {
3396 struct sk_buff *next_skb;
3397
3398 bt_cb(skb)->tx_seq = tx_seq;
3399 bt_cb(skb)->sar = sar;
3400
3401 next_skb = skb_peek(SREJ_QUEUE(sk));
3402 if (!next_skb) {
3403 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3404 return 0;
3405 }
3406
3407 do {
3408 if (bt_cb(next_skb)->tx_seq == tx_seq)
3409 return -EINVAL;
3410
3411 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3412 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3413 return 0;
3414 }
3415
3416 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3417 break;
3418
3419 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3420
3421 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3422
3423 return 0;
3424 }
3425
3426 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3427 {
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct sk_buff *_skb;
3430 int err;
3431
3432 switch (control & L2CAP_CTRL_SAR) {
3433 case L2CAP_SDU_UNSEGMENTED:
3434 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3435 goto drop;
3436
3437 err = sock_queue_rcv_skb(sk, skb);
3438 if (!err)
3439 return err;
3440
3441 break;
3442
3443 case L2CAP_SDU_START:
3444 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3445 goto drop;
3446
3447 pi->sdu_len = get_unaligned_le16(skb->data);
3448
3449 if (pi->sdu_len > pi->imtu)
3450 goto disconnect;
3451
3452 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3453 if (!pi->sdu)
3454 return -ENOMEM;
3455
3456 /* pull sdu_len bytes only after alloc, because of Local Busy
3457 * condition we have to be sure that this will be executed
3458 * only once, i.e., when alloc does not fail */
3459 skb_pull(skb, 2);
3460
3461 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3462
3463 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3464 pi->partial_sdu_len = skb->len;
3465 break;
3466
3467 case L2CAP_SDU_CONTINUE:
3468 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3469 goto disconnect;
3470
3471 if (!pi->sdu)
3472 goto disconnect;
3473
3474 pi->partial_sdu_len += skb->len;
3475 if (pi->partial_sdu_len > pi->sdu_len)
3476 goto drop;
3477
3478 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3479
3480 break;
3481
3482 case L2CAP_SDU_END:
3483 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3484 goto disconnect;
3485
3486 if (!pi->sdu)
3487 goto disconnect;
3488
3489 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3490 pi->partial_sdu_len += skb->len;
3491
3492 if (pi->partial_sdu_len > pi->imtu)
3493 goto drop;
3494
3495 if (pi->partial_sdu_len != pi->sdu_len)
3496 goto drop;
3497
3498 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3499 }
3500
3501 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3502 if (!_skb) {
3503 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3504 return -ENOMEM;
3505 }
3506
3507 err = sock_queue_rcv_skb(sk, _skb);
3508 if (err < 0) {
3509 kfree_skb(_skb);
3510 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3511 return err;
3512 }
3513
3514 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3515 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3516
3517 kfree_skb(pi->sdu);
3518 break;
3519 }
3520
3521 kfree_skb(skb);
3522 return 0;
3523
3524 drop:
3525 kfree_skb(pi->sdu);
3526 pi->sdu = NULL;
3527
3528 disconnect:
3529 l2cap_send_disconn_req(pi->conn, sk);
3530 kfree_skb(skb);
3531 return 0;
3532 }
3533
3534 static void l2cap_busy_work(struct work_struct *work)
3535 {
3536 DECLARE_WAITQUEUE(wait, current);
3537 struct l2cap_pinfo *pi =
3538 container_of(work, struct l2cap_pinfo, busy_work);
3539 struct sock *sk = (struct sock *)pi;
3540 int n_tries = 0, timeo = HZ/5, err;
3541 struct sk_buff *skb;
3542 u16 control;
3543
3544 lock_sock(sk);
3545
3546 add_wait_queue(sk_sleep(sk), &wait);
3547 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3548 set_current_state(TASK_INTERRUPTIBLE);
3549
3550 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3551 err = -EBUSY;
3552 l2cap_send_disconn_req(pi->conn, sk);
3553 goto done;
3554 }
3555
3556 if (!timeo)
3557 timeo = HZ/5;
3558
3559 if (signal_pending(current)) {
3560 err = sock_intr_errno(timeo);
3561 goto done;
3562 }
3563
3564 release_sock(sk);
3565 timeo = schedule_timeout(timeo);
3566 lock_sock(sk);
3567
3568 err = sock_error(sk);
3569 if (err)
3570 goto done;
3571
3572 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3573 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3574 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3575 if (err < 0) {
3576 skb_queue_head(BUSY_QUEUE(sk), skb);
3577 break;
3578 }
3579
3580 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3581 }
3582
3583 if (!skb)
3584 break;
3585 }
3586
3587 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3588 goto done;
3589
3590 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3591 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3592 l2cap_send_sframe(pi, control);
3593 l2cap_pi(sk)->retry_count = 1;
3594
3595 del_timer(&pi->retrans_timer);
3596 __mod_monitor_timer();
3597
3598 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3599
3600 done:
3601 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3602 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3603
3604 set_current_state(TASK_RUNNING);
3605 remove_wait_queue(sk_sleep(sk), &wait);
3606
3607 release_sock(sk);
3608 }
3609
3610 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3611 {
3612 struct l2cap_pinfo *pi = l2cap_pi(sk);
3613 int sctrl, err;
3614
3615 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3616 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3617 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3618 return -EBUSY;
3619 }
3620
3621 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3622 if (err >= 0) {
3623 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3624 return err;
3625 }
3626
3627 /* Busy Condition */
3628 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3629 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3630 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3631
3632 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3633 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3634 l2cap_send_sframe(pi, sctrl);
3635
3636 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3637
3638 queue_work(_busy_wq, &pi->busy_work);
3639
3640 return err;
3641 }
3642
3643 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3644 {
3645 struct l2cap_pinfo *pi = l2cap_pi(sk);
3646 struct sk_buff *_skb;
3647 int err = -EINVAL;
3648
3649 /*
3650 * TODO: We have to notify the userland if some data is lost with the
3651 * Streaming Mode.
3652 */
3653
3654 switch (control & L2CAP_CTRL_SAR) {
3655 case L2CAP_SDU_UNSEGMENTED:
3656 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3657 kfree_skb(pi->sdu);
3658 break;
3659 }
3660
3661 err = sock_queue_rcv_skb(sk, skb);
3662 if (!err)
3663 return 0;
3664
3665 break;
3666
3667 case L2CAP_SDU_START:
3668 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3669 kfree_skb(pi->sdu);
3670 break;
3671 }
3672
3673 pi->sdu_len = get_unaligned_le16(skb->data);
3674 skb_pull(skb, 2);
3675
3676 if (pi->sdu_len > pi->imtu) {
3677 err = -EMSGSIZE;
3678 break;
3679 }
3680
3681 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3682 if (!pi->sdu) {
3683 err = -ENOMEM;
3684 break;
3685 }
3686
3687 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3688
3689 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3690 pi->partial_sdu_len = skb->len;
3691 err = 0;
3692 break;
3693
3694 case L2CAP_SDU_CONTINUE:
3695 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3696 break;
3697
3698 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3699
3700 pi->partial_sdu_len += skb->len;
3701 if (pi->partial_sdu_len > pi->sdu_len)
3702 kfree_skb(pi->sdu);
3703 else
3704 err = 0;
3705
3706 break;
3707
3708 case L2CAP_SDU_END:
3709 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3710 break;
3711
3712 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3713
3714 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3715 pi->partial_sdu_len += skb->len;
3716
3717 if (pi->partial_sdu_len > pi->imtu)
3718 goto drop;
3719
3720 if (pi->partial_sdu_len == pi->sdu_len) {
3721 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3722 err = sock_queue_rcv_skb(sk, _skb);
3723 if (err < 0)
3724 kfree_skb(_skb);
3725 }
3726 err = 0;
3727
3728 drop:
3729 kfree_skb(pi->sdu);
3730 break;
3731 }
3732
3733 kfree_skb(skb);
3734 return err;
3735 }
3736
3737 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3738 {
3739 struct sk_buff *skb;
3740 u16 control;
3741
3742 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3743 if (bt_cb(skb)->tx_seq != tx_seq)
3744 break;
3745
3746 skb = skb_dequeue(SREJ_QUEUE(sk));
3747 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3748 l2cap_ertm_reassembly_sdu(sk, skb, control);
3749 l2cap_pi(sk)->buffer_seq_srej =
3750 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3751 tx_seq = (tx_seq + 1) % 64;
3752 }
3753 }
3754
3755 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3756 {
3757 struct l2cap_pinfo *pi = l2cap_pi(sk);
3758 struct srej_list *l, *tmp;
3759 u16 control;
3760
3761 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3762 if (l->tx_seq == tx_seq) {
3763 list_del(&l->list);
3764 kfree(l);
3765 return;
3766 }
3767 control = L2CAP_SUPER_SELECT_REJECT;
3768 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3769 l2cap_send_sframe(pi, control);
3770 list_del(&l->list);
3771 list_add_tail(&l->list, SREJ_LIST(sk));
3772 }
3773 }
3774
3775 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3776 {
3777 struct l2cap_pinfo *pi = l2cap_pi(sk);
3778 struct srej_list *new;
3779 u16 control;
3780
3781 while (tx_seq != pi->expected_tx_seq) {
3782 control = L2CAP_SUPER_SELECT_REJECT;
3783 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3784 l2cap_send_sframe(pi, control);
3785
3786 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3787 new->tx_seq = pi->expected_tx_seq;
3788 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3789 list_add_tail(&new->list, SREJ_LIST(sk));
3790 }
3791 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3792 }
3793
3794 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3795 {
3796 struct l2cap_pinfo *pi = l2cap_pi(sk);
3797 u8 tx_seq = __get_txseq(rx_control);
3798 u8 req_seq = __get_reqseq(rx_control);
3799 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3800 int tx_seq_offset, expected_tx_seq_offset;
3801 int num_to_ack = (pi->tx_win/6) + 1;
3802 int err = 0;
3803
3804 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3805
3806 if (L2CAP_CTRL_FINAL & rx_control &&
3807 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3808 del_timer(&pi->monitor_timer);
3809 if (pi->unacked_frames > 0)
3810 __mod_retrans_timer();
3811 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3812 }
3813
3814 pi->expected_ack_seq = req_seq;
3815 l2cap_drop_acked_frames(sk);
3816
3817 if (tx_seq == pi->expected_tx_seq)
3818 goto expected;
3819
3820 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3821 if (tx_seq_offset < 0)
3822 tx_seq_offset += 64;
3823
3824 /* invalid tx_seq */
3825 if (tx_seq_offset >= pi->tx_win) {
3826 l2cap_send_disconn_req(pi->conn, sk);
3827 goto drop;
3828 }
3829
3830 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3831 goto drop;
3832
3833 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3834 struct srej_list *first;
3835
3836 first = list_first_entry(SREJ_LIST(sk),
3837 struct srej_list, list);
3838 if (tx_seq == first->tx_seq) {
3839 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3840 l2cap_check_srej_gap(sk, tx_seq);
3841
3842 list_del(&first->list);
3843 kfree(first);
3844
3845 if (list_empty(SREJ_LIST(sk))) {
3846 pi->buffer_seq = pi->buffer_seq_srej;
3847 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3848 l2cap_send_ack(pi);
3849 }
3850 } else {
3851 struct srej_list *l;
3852
3853 /* duplicated tx_seq */
3854 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3855 goto drop;
3856
3857 list_for_each_entry(l, SREJ_LIST(sk), list) {
3858 if (l->tx_seq == tx_seq) {
3859 l2cap_resend_srejframe(sk, tx_seq);
3860 return 0;
3861 }
3862 }
3863 l2cap_send_srejframe(sk, tx_seq);
3864 }
3865 } else {
3866 expected_tx_seq_offset =
3867 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3868 if (expected_tx_seq_offset < 0)
3869 expected_tx_seq_offset += 64;
3870
3871 /* duplicated tx_seq */
3872 if (tx_seq_offset < expected_tx_seq_offset)
3873 goto drop;
3874
3875 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3876
3877 INIT_LIST_HEAD(SREJ_LIST(sk));
3878 pi->buffer_seq_srej = pi->buffer_seq;
3879
3880 __skb_queue_head_init(SREJ_QUEUE(sk));
3881 __skb_queue_head_init(BUSY_QUEUE(sk));
3882 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3883
3884 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3885
3886 l2cap_send_srejframe(sk, tx_seq);
3887 }
3888 return 0;
3889
3890 expected:
3891 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3892
3893 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3894 bt_cb(skb)->tx_seq = tx_seq;
3895 bt_cb(skb)->sar = sar;
3896 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3897 return 0;
3898 }
3899
3900 if (rx_control & L2CAP_CTRL_FINAL) {
3901 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3902 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3903 else
3904 l2cap_retransmit_frames(sk);
3905 }
3906
3907 err = l2cap_push_rx_skb(sk, skb, rx_control);
3908 if (err < 0)
3909 return 0;
3910
3911 __mod_ack_timer();
3912
3913 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3914 if (pi->num_acked == num_to_ack - 1)
3915 l2cap_send_ack(pi);
3916
3917 return 0;
3918
3919 drop:
3920 kfree_skb(skb);
3921 return 0;
3922 }
3923
3924 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3925 {
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927
3928 pi->expected_ack_seq = __get_reqseq(rx_control);
3929 l2cap_drop_acked_frames(sk);
3930
3931 if (rx_control & L2CAP_CTRL_POLL) {
3932 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3933 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3934 (pi->unacked_frames > 0))
3935 __mod_retrans_timer();
3936
3937 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3938 l2cap_send_srejtail(sk);
3939 } else {
3940 l2cap_send_i_or_rr_or_rnr(sk);
3941 }
3942
3943 } else if (rx_control & L2CAP_CTRL_FINAL) {
3944 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3945
3946 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3947 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3948 else
3949 l2cap_retransmit_frames(sk);
3950
3951 } else {
3952 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3953 (pi->unacked_frames > 0))
3954 __mod_retrans_timer();
3955
3956 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3957 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3958 l2cap_send_ack(pi);
3959 } else {
3960 spin_lock_bh(&pi->send_lock);
3961 l2cap_ertm_send(sk);
3962 spin_unlock_bh(&pi->send_lock);
3963 }
3964 }
3965 }
3966
3967 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3968 {
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3970 u8 tx_seq = __get_reqseq(rx_control);
3971
3972 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3973
3974 pi->expected_ack_seq = tx_seq;
3975 l2cap_drop_acked_frames(sk);
3976
3977 if (rx_control & L2CAP_CTRL_FINAL) {
3978 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3979 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3980 else
3981 l2cap_retransmit_frames(sk);
3982 } else {
3983 l2cap_retransmit_frames(sk);
3984
3985 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3986 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3987 }
3988 }
3989 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3990 {
3991 struct l2cap_pinfo *pi = l2cap_pi(sk);
3992 u8 tx_seq = __get_reqseq(rx_control);
3993
3994 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3995
3996 if (rx_control & L2CAP_CTRL_POLL) {
3997 pi->expected_ack_seq = tx_seq;
3998 l2cap_drop_acked_frames(sk);
3999 l2cap_retransmit_one_frame(sk, tx_seq);
4000
4001 spin_lock_bh(&pi->send_lock);
4002 l2cap_ertm_send(sk);
4003 spin_unlock_bh(&pi->send_lock);
4004
4005 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4006 pi->srej_save_reqseq = tx_seq;
4007 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4008 }
4009 } else if (rx_control & L2CAP_CTRL_FINAL) {
4010 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4011 pi->srej_save_reqseq == tx_seq)
4012 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4013 else
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4015 } else {
4016 l2cap_retransmit_one_frame(sk, tx_seq);
4017 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4018 pi->srej_save_reqseq = tx_seq;
4019 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4020 }
4021 }
4022 }
4023
4024 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4025 {
4026 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027 u8 tx_seq = __get_reqseq(rx_control);
4028
4029 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4030 pi->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(sk);
4032
4033 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4034 del_timer(&pi->retrans_timer);
4035 if (rx_control & L2CAP_CTRL_POLL)
4036 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4037 return;
4038 }
4039
4040 if (rx_control & L2CAP_CTRL_POLL)
4041 l2cap_send_srejtail(sk);
4042 else
4043 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4044 }
4045
4046 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4047 {
4048 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4049
4050 if (L2CAP_CTRL_FINAL & rx_control &&
4051 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4052 del_timer(&l2cap_pi(sk)->monitor_timer);
4053 if (l2cap_pi(sk)->unacked_frames > 0)
4054 __mod_retrans_timer();
4055 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4056 }
4057
4058 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4059 case L2CAP_SUPER_RCV_READY:
4060 l2cap_data_channel_rrframe(sk, rx_control);
4061 break;
4062
4063 case L2CAP_SUPER_REJECT:
4064 l2cap_data_channel_rejframe(sk, rx_control);
4065 break;
4066
4067 case L2CAP_SUPER_SELECT_REJECT:
4068 l2cap_data_channel_srejframe(sk, rx_control);
4069 break;
4070
4071 case L2CAP_SUPER_RCV_NOT_READY:
4072 l2cap_data_channel_rnrframe(sk, rx_control);
4073 break;
4074 }
4075
4076 kfree_skb(skb);
4077 return 0;
4078 }
4079
4080 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4081 {
4082 struct sock *sk;
4083 struct l2cap_pinfo *pi;
4084 u16 control, len;
4085 u8 tx_seq, req_seq;
4086 int next_tx_seq_offset, req_seq_offset;
4087
4088 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4089 if (!sk) {
4090 BT_DBG("unknown cid 0x%4.4x", cid);
4091 goto drop;
4092 }
4093
4094 pi = l2cap_pi(sk);
4095
4096 BT_DBG("sk %p, len %d", sk, skb->len);
4097
4098 if (sk->sk_state != BT_CONNECTED)
4099 goto drop;
4100
4101 switch (pi->mode) {
4102 case L2CAP_MODE_BASIC:
4103 /* If socket recv buffers overflows we drop data here
4104 * which is *bad* because L2CAP has to be reliable.
4105 * But we don't have any other choice. L2CAP doesn't
4106 * provide flow control mechanism. */
4107
4108 if (pi->imtu < skb->len)
4109 goto drop;
4110
4111 if (!sock_queue_rcv_skb(sk, skb))
4112 goto done;
4113 break;
4114
4115 case L2CAP_MODE_ERTM:
4116 control = get_unaligned_le16(skb->data);
4117 skb_pull(skb, 2);
4118 len = skb->len;
4119
4120 if (__is_sar_start(control) && __is_iframe(control))
4121 len -= 2;
4122
4123 if (pi->fcs == L2CAP_FCS_CRC16)
4124 len -= 2;
4125
4126 /*
4127 * We can just drop the corrupted I-frame here.
4128 * Receiver will miss it and start proper recovery
4129 * procedures and ask retransmission.
4130 */
4131 if (len > pi->mps) {
4132 l2cap_send_disconn_req(pi->conn, sk);
4133 goto drop;
4134 }
4135
4136 if (l2cap_check_fcs(pi, skb))
4137 goto drop;
4138
4139 req_seq = __get_reqseq(control);
4140 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4141 if (req_seq_offset < 0)
4142 req_seq_offset += 64;
4143
4144 next_tx_seq_offset =
4145 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4146 if (next_tx_seq_offset < 0)
4147 next_tx_seq_offset += 64;
4148
4149 /* check for invalid req-seq */
4150 if (req_seq_offset > next_tx_seq_offset) {
4151 l2cap_send_disconn_req(pi->conn, sk);
4152 goto drop;
4153 }
4154
4155 if (__is_iframe(control)) {
4156 if (len < 4) {
4157 l2cap_send_disconn_req(pi->conn, sk);
4158 goto drop;
4159 }
4160
4161 l2cap_data_channel_iframe(sk, control, skb);
4162 } else {
4163 if (len != 0) {
4164 l2cap_send_disconn_req(pi->conn, sk);
4165 goto drop;
4166 }
4167
4168 l2cap_data_channel_sframe(sk, control, skb);
4169 }
4170
4171 goto done;
4172
4173 case L2CAP_MODE_STREAMING:
4174 control = get_unaligned_le16(skb->data);
4175 skb_pull(skb, 2);
4176 len = skb->len;
4177
4178 if (__is_sar_start(control))
4179 len -= 2;
4180
4181 if (pi->fcs == L2CAP_FCS_CRC16)
4182 len -= 2;
4183
4184 if (len > pi->mps || len < 4 || __is_sframe(control))
4185 goto drop;
4186
4187 if (l2cap_check_fcs(pi, skb))
4188 goto drop;
4189
4190 tx_seq = __get_txseq(control);
4191
4192 if (pi->expected_tx_seq == tx_seq)
4193 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4194 else
4195 pi->expected_tx_seq = (tx_seq + 1) % 64;
4196
4197 l2cap_streaming_reassembly_sdu(sk, skb, control);
4198
4199 goto done;
4200
4201 default:
4202 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4203 break;
4204 }
4205
4206 drop:
4207 kfree_skb(skb);
4208
4209 done:
4210 if (sk)
4211 bh_unlock_sock(sk);
4212
4213 return 0;
4214 }
4215
4216 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4217 {
4218 struct sock *sk;
4219
4220 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4221 if (!sk)
4222 goto drop;
4223
4224 BT_DBG("sk %p, len %d", sk, skb->len);
4225
4226 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4227 goto drop;
4228
4229 if (l2cap_pi(sk)->imtu < skb->len)
4230 goto drop;
4231
4232 if (!sock_queue_rcv_skb(sk, skb))
4233 goto done;
4234
4235 drop:
4236 kfree_skb(skb);
4237
4238 done:
4239 if (sk)
4240 bh_unlock_sock(sk);
4241 return 0;
4242 }
4243
4244 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4245 {
4246 struct l2cap_hdr *lh = (void *) skb->data;
4247 u16 cid, len;
4248 __le16 psm;
4249
4250 skb_pull(skb, L2CAP_HDR_SIZE);
4251 cid = __le16_to_cpu(lh->cid);
4252 len = __le16_to_cpu(lh->len);
4253
4254 if (len != skb->len) {
4255 kfree_skb(skb);
4256 return;
4257 }
4258
4259 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4260
4261 switch (cid) {
4262 case L2CAP_CID_SIGNALING:
4263 l2cap_sig_channel(conn, skb);
4264 break;
4265
4266 case L2CAP_CID_CONN_LESS:
4267 psm = get_unaligned_le16(skb->data);
4268 skb_pull(skb, 2);
4269 l2cap_conless_channel(conn, psm, skb);
4270 break;
4271
4272 default:
4273 l2cap_data_channel(conn, cid, skb);
4274 break;
4275 }
4276 }
4277
4278 /* ---- L2CAP interface with lower layer (HCI) ---- */
4279
4280 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4281 {
4282 int exact = 0, lm1 = 0, lm2 = 0;
4283 register struct sock *sk;
4284 struct hlist_node *node;
4285
4286 if (type != ACL_LINK)
4287 return 0;
4288
4289 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4290
4291 /* Find listening sockets and check their link_mode */
4292 read_lock(&l2cap_sk_list.lock);
4293 sk_for_each(sk, node, &l2cap_sk_list.head) {
4294 if (sk->sk_state != BT_LISTEN)
4295 continue;
4296
4297 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4298 lm1 |= HCI_LM_ACCEPT;
4299 if (l2cap_pi(sk)->role_switch)
4300 lm1 |= HCI_LM_MASTER;
4301 exact++;
4302 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4303 lm2 |= HCI_LM_ACCEPT;
4304 if (l2cap_pi(sk)->role_switch)
4305 lm2 |= HCI_LM_MASTER;
4306 }
4307 }
4308 read_unlock(&l2cap_sk_list.lock);
4309
4310 return exact ? lm1 : lm2;
4311 }
4312
4313 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4314 {
4315 struct l2cap_conn *conn;
4316
4317 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4318
4319 if (hcon->type != ACL_LINK)
4320 return 0;
4321
4322 if (!status) {
4323 conn = l2cap_conn_add(hcon, status);
4324 if (conn)
4325 l2cap_conn_ready(conn);
4326 } else
4327 l2cap_conn_del(hcon, bt_err(status));
4328
4329 return 0;
4330 }
4331
4332 static int l2cap_disconn_ind(struct hci_conn *hcon)
4333 {
4334 struct l2cap_conn *conn = hcon->l2cap_data;
4335
4336 BT_DBG("hcon %p", hcon);
4337
4338 if (hcon->type != ACL_LINK || !conn)
4339 return 0x13;
4340
4341 return conn->disc_reason;
4342 }
4343
4344 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4345 {
4346 BT_DBG("hcon %p reason %d", hcon, reason);
4347
4348 if (hcon->type != ACL_LINK)
4349 return 0;
4350
4351 l2cap_conn_del(hcon, bt_err(reason));
4352
4353 return 0;
4354 }
4355
4356 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4357 {
4358 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4359 return;
4360
4361 if (encrypt == 0x00) {
4362 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4363 l2cap_sock_clear_timer(sk);
4364 l2cap_sock_set_timer(sk, HZ * 5);
4365 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4366 __l2cap_sock_close(sk, ECONNREFUSED);
4367 } else {
4368 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4369 l2cap_sock_clear_timer(sk);
4370 }
4371 }
4372
4373 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4374 {
4375 struct l2cap_chan_list *l;
4376 struct l2cap_conn *conn = hcon->l2cap_data;
4377 struct sock *sk;
4378
4379 if (!conn)
4380 return 0;
4381
4382 l = &conn->chan_list;
4383
4384 BT_DBG("conn %p", conn);
4385
4386 read_lock(&l->lock);
4387
4388 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4389 bh_lock_sock(sk);
4390
4391 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4392 bh_unlock_sock(sk);
4393 continue;
4394 }
4395
4396 if (!status && (sk->sk_state == BT_CONNECTED ||
4397 sk->sk_state == BT_CONFIG)) {
4398 l2cap_check_encryption(sk, encrypt);
4399 bh_unlock_sock(sk);
4400 continue;
4401 }
4402
4403 if (sk->sk_state == BT_CONNECT) {
4404 if (!status) {
4405 struct l2cap_conn_req req;
4406 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4407 req.psm = l2cap_pi(sk)->psm;
4408
4409 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4410 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4411
4412 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4413 L2CAP_CONN_REQ, sizeof(req), &req);
4414 } else {
4415 l2cap_sock_clear_timer(sk);
4416 l2cap_sock_set_timer(sk, HZ / 10);
4417 }
4418 } else if (sk->sk_state == BT_CONNECT2) {
4419 struct l2cap_conn_rsp rsp;
4420 __u16 result;
4421
4422 if (!status) {
4423 sk->sk_state = BT_CONFIG;
4424 result = L2CAP_CR_SUCCESS;
4425 } else {
4426 sk->sk_state = BT_DISCONN;
4427 l2cap_sock_set_timer(sk, HZ / 10);
4428 result = L2CAP_CR_SEC_BLOCK;
4429 }
4430
4431 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4432 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4433 rsp.result = cpu_to_le16(result);
4434 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4435 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4436 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4437 }
4438
4439 bh_unlock_sock(sk);
4440 }
4441
4442 read_unlock(&l->lock);
4443
4444 return 0;
4445 }
4446
4447 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4448 {
4449 struct l2cap_conn *conn = hcon->l2cap_data;
4450
4451 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4452 goto drop;
4453
4454 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4455
4456 if (flags & ACL_START) {
4457 struct l2cap_hdr *hdr;
4458 int len;
4459
4460 if (conn->rx_len) {
4461 BT_ERR("Unexpected start frame (len %d)", skb->len);
4462 kfree_skb(conn->rx_skb);
4463 conn->rx_skb = NULL;
4464 conn->rx_len = 0;
4465 l2cap_conn_unreliable(conn, ECOMM);
4466 }
4467
4468 if (skb->len < 2) {
4469 BT_ERR("Frame is too short (len %d)", skb->len);
4470 l2cap_conn_unreliable(conn, ECOMM);
4471 goto drop;
4472 }
4473
4474 hdr = (struct l2cap_hdr *) skb->data;
4475 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4476
4477 if (len == skb->len) {
4478 /* Complete frame received */
4479 l2cap_recv_frame(conn, skb);
4480 return 0;
4481 }
4482
4483 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4484
4485 if (skb->len > len) {
4486 BT_ERR("Frame is too long (len %d, expected len %d)",
4487 skb->len, len);
4488 l2cap_conn_unreliable(conn, ECOMM);
4489 goto drop;
4490 }
4491
4492 /* Allocate skb for the complete frame (with header) */
4493 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4494 if (!conn->rx_skb)
4495 goto drop;
4496
4497 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4498 skb->len);
4499 conn->rx_len = len - skb->len;
4500 } else {
4501 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4502
4503 if (!conn->rx_len) {
4504 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4505 l2cap_conn_unreliable(conn, ECOMM);
4506 goto drop;
4507 }
4508
4509 if (skb->len > conn->rx_len) {
4510 BT_ERR("Fragment is too long (len %d, expected %d)",
4511 skb->len, conn->rx_len);
4512 kfree_skb(conn->rx_skb);
4513 conn->rx_skb = NULL;
4514 conn->rx_len = 0;
4515 l2cap_conn_unreliable(conn, ECOMM);
4516 goto drop;
4517 }
4518
4519 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4520 skb->len);
4521 conn->rx_len -= skb->len;
4522
4523 if (!conn->rx_len) {
4524 /* Complete frame received */
4525 l2cap_recv_frame(conn, conn->rx_skb);
4526 conn->rx_skb = NULL;
4527 }
4528 }
4529
4530 drop:
4531 kfree_skb(skb);
4532 return 0;
4533 }
4534
4535 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4536 {
4537 struct sock *sk;
4538 struct hlist_node *node;
4539
4540 read_lock_bh(&l2cap_sk_list.lock);
4541
4542 sk_for_each(sk, node, &l2cap_sk_list.head) {
4543 struct l2cap_pinfo *pi = l2cap_pi(sk);
4544
4545 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4546 batostr(&bt_sk(sk)->src),
4547 batostr(&bt_sk(sk)->dst),
4548 sk->sk_state, __le16_to_cpu(pi->psm),
4549 pi->scid, pi->dcid,
4550 pi->imtu, pi->omtu, pi->sec_level);
4551 }
4552
4553 read_unlock_bh(&l2cap_sk_list.lock);
4554
4555 return 0;
4556 }
4557
4558 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4559 {
4560 return single_open(file, l2cap_debugfs_show, inode->i_private);
4561 }
4562
4563 static const struct file_operations l2cap_debugfs_fops = {
4564 .open = l2cap_debugfs_open,
4565 .read = seq_read,
4566 .llseek = seq_lseek,
4567 .release = single_release,
4568 };
4569
4570 static struct dentry *l2cap_debugfs;
4571
4572 static const struct proto_ops l2cap_sock_ops = {
4573 .family = PF_BLUETOOTH,
4574 .owner = THIS_MODULE,
4575 .release = l2cap_sock_release,
4576 .bind = l2cap_sock_bind,
4577 .connect = l2cap_sock_connect,
4578 .listen = l2cap_sock_listen,
4579 .accept = l2cap_sock_accept,
4580 .getname = l2cap_sock_getname,
4581 .sendmsg = l2cap_sock_sendmsg,
4582 .recvmsg = l2cap_sock_recvmsg,
4583 .poll = bt_sock_poll,
4584 .ioctl = bt_sock_ioctl,
4585 .mmap = sock_no_mmap,
4586 .socketpair = sock_no_socketpair,
4587 .shutdown = l2cap_sock_shutdown,
4588 .setsockopt = l2cap_sock_setsockopt,
4589 .getsockopt = l2cap_sock_getsockopt
4590 };
4591
4592 static const struct net_proto_family l2cap_sock_family_ops = {
4593 .family = PF_BLUETOOTH,
4594 .owner = THIS_MODULE,
4595 .create = l2cap_sock_create,
4596 };
4597
4598 static struct hci_proto l2cap_hci_proto = {
4599 .name = "L2CAP",
4600 .id = HCI_PROTO_L2CAP,
4601 .connect_ind = l2cap_connect_ind,
4602 .connect_cfm = l2cap_connect_cfm,
4603 .disconn_ind = l2cap_disconn_ind,
4604 .disconn_cfm = l2cap_disconn_cfm,
4605 .security_cfm = l2cap_security_cfm,
4606 .recv_acldata = l2cap_recv_acldata
4607 };
4608
4609 static int __init l2cap_init(void)
4610 {
4611 int err;
4612
4613 err = proto_register(&l2cap_proto, 0);
4614 if (err < 0)
4615 return err;
4616
4617 _busy_wq = create_singlethread_workqueue("l2cap");
4618 if (!_busy_wq)
4619 goto error;
4620
4621 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4622 if (err < 0) {
4623 BT_ERR("L2CAP socket registration failed");
4624 goto error;
4625 }
4626
4627 err = hci_register_proto(&l2cap_hci_proto);
4628 if (err < 0) {
4629 BT_ERR("L2CAP protocol registration failed");
4630 bt_sock_unregister(BTPROTO_L2CAP);
4631 goto error;
4632 }
4633
4634 if (bt_debugfs) {
4635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4636 bt_debugfs, NULL, &l2cap_debugfs_fops);
4637 if (!l2cap_debugfs)
4638 BT_ERR("Failed to create L2CAP debug file");
4639 }
4640
4641 BT_INFO("L2CAP ver %s", VERSION);
4642 BT_INFO("L2CAP socket layer initialized");
4643
4644 return 0;
4645
4646 error:
4647 proto_unregister(&l2cap_proto);
4648 return err;
4649 }
4650
4651 static void __exit l2cap_exit(void)
4652 {
4653 debugfs_remove(l2cap_debugfs);
4654
4655 flush_workqueue(_busy_wq);
4656 destroy_workqueue(_busy_wq);
4657
4658 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4659 BT_ERR("L2CAP socket unregistration failed");
4660
4661 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4662 BT_ERR("L2CAP protocol unregistration failed");
4663
4664 proto_unregister(&l2cap_proto);
4665 }
4666
4667 void l2cap_load(void)
4668 {
4669 /* Dummy function to trigger automatic L2CAP module loading by
4670 * other modules that use L2CAP sockets but don't use any other
4671 * symbols from it. */
4672 }
4673 EXPORT_SYMBOL(l2cap_load);
4674
4675 module_init(l2cap_init);
4676 module_exit(l2cap_exit);
4677
4678 module_param(enable_ertm, bool, 0644);
4679 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4680
4681 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4682 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4683 MODULE_VERSION(VERSION);
4684 MODULE_LICENSE("GPL");
4685 MODULE_ALIAS("bt-proto-0");