]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/bluetooth/l2cap.c
Bluetooth: Implement RejActioned flag
[mirror_ubuntu-eoan-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
53
54 #define VERSION "2.14"
55
56 static int enable_ertm = 0;
57
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
60
61 static const struct proto_ops l2cap_sock_ops;
62
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
65 };
66
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
76 {
77 struct sock *sk = (struct sock *) arg;
78 int reason;
79
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
81
82 bh_lock_sock(sk);
83
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
91
92 __l2cap_sock_close(sk, reason);
93
94 bh_unlock_sock(sk);
95
96 l2cap_sock_kill(sk);
97 sock_put(sk);
98 }
99
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 {
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
104 }
105
106 static void l2cap_sock_clear_timer(struct sock *sk)
107 {
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
110 }
111
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
114 {
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
119 }
120 return s;
121 }
122
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
124 {
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
129 }
130 return s;
131 }
132
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 {
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s)
141 bh_lock_sock(s);
142 read_unlock(&l->lock);
143 return s;
144 }
145
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
147 {
148 struct sock *s;
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
151 break;
152 }
153 return s;
154 }
155
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 {
158 struct sock *s;
159 read_lock(&l->lock);
160 s = __l2cap_get_chan_by_ident(l, ident);
161 if (s)
162 bh_lock_sock(s);
163 read_unlock(&l->lock);
164 return s;
165 }
166
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 {
169 u16 cid = L2CAP_CID_DYN_START;
170
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
173 return cid;
174 }
175
176 return 0;
177 }
178
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
180 {
181 sock_hold(sk);
182
183 if (l->head)
184 l2cap_pi(l->head)->prev_c = sk;
185
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
188 l->head = sk;
189 }
190
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 {
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194
195 write_lock_bh(&l->lock);
196 if (sk == l->head)
197 l->head = next;
198
199 if (next)
200 l2cap_pi(next)->prev_c = prev;
201 if (prev)
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
204
205 __sock_put(sk);
206 }
207
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 {
210 struct l2cap_chan_list *l = &conn->chan_list;
211
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214
215 conn->disc_reason = 0x13;
216
217 l2cap_pi(sk)->conn = conn;
218
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 } else {
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 }
233
234 __l2cap_chan_link(l, sk);
235
236 if (parent)
237 bt_accept_enqueue(parent, sk);
238 }
239
240 /* Delete channel.
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
243 {
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
246
247 l2cap_sock_clear_timer(sk);
248
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
250
251 if (conn) {
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
256 }
257
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
260
261 if (err)
262 sk->sk_err = err;
263
264 if (parent) {
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
267 } else
268 sk->sk_state_change(sk);
269 }
270
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
273 {
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
275 __u8 auth_type;
276
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
280 else
281 auth_type = HCI_AT_NO_BONDING;
282
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 } else {
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 break;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
292 break;
293 default:
294 auth_type = HCI_AT_NO_BONDING;
295 break;
296 }
297 }
298
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
300 auth_type);
301 }
302
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
304 {
305 u8 id;
306
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
311 */
312
313 spin_lock_bh(&conn->lock);
314
315 if (++conn->tx_ident > 128)
316 conn->tx_ident = 1;
317
318 id = conn->tx_ident;
319
320 spin_unlock_bh(&conn->lock);
321
322 return id;
323 }
324
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 {
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328
329 BT_DBG("code 0x%2.2x", code);
330
331 if (!skb)
332 return -ENOMEM;
333
334 return hci_send_acl(conn->hcon, skb, 0);
335 }
336
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
338 {
339 struct sk_buff *skb;
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
343
344 if (pi->fcs == L2CAP_FCS_CRC16)
345 hlen += 2;
346
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
348
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
351
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
353 if (!skb)
354 return -ENOMEM;
355
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
360
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
364 }
365
366 return hci_send_acl(pi->conn->hcon, skb, 0);
367 }
368
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
370 {
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
373 else
374 control |= L2CAP_SUPER_RCV_READY;
375
376 return l2cap_send_sframe(pi, control);
377 }
378
379 static void l2cap_do_start(struct sock *sk)
380 {
381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
382
383 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
385 return;
386
387 if (l2cap_check_security(sk)) {
388 struct l2cap_conn_req req;
389 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
390 req.psm = l2cap_pi(sk)->psm;
391
392 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
393
394 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
395 L2CAP_CONN_REQ, sizeof(req), &req);
396 }
397 } else {
398 struct l2cap_info_req req;
399 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
400
401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
402 conn->info_ident = l2cap_get_ident(conn);
403
404 mod_timer(&conn->info_timer, jiffies +
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
406
407 l2cap_send_cmd(conn, conn->info_ident,
408 L2CAP_INFO_REQ, sizeof(req), &req);
409 }
410 }
411
412 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
413 {
414 struct l2cap_disconn_req req;
415
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
420 }
421
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn *conn)
424 {
425 struct l2cap_chan_list *l = &conn->chan_list;
426 struct sock *sk;
427
428 BT_DBG("conn %p", conn);
429
430 read_lock(&l->lock);
431
432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
433 bh_lock_sock(sk);
434
435 if (sk->sk_type != SOCK_SEQPACKET) {
436 bh_unlock_sock(sk);
437 continue;
438 }
439
440 if (sk->sk_state == BT_CONNECT) {
441 if (l2cap_check_security(sk)) {
442 struct l2cap_conn_req req;
443 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
444 req.psm = l2cap_pi(sk)->psm;
445
446 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
447
448 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
449 L2CAP_CONN_REQ, sizeof(req), &req);
450 }
451 } else if (sk->sk_state == BT_CONNECT2) {
452 struct l2cap_conn_rsp rsp;
453 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
454 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
455
456 if (l2cap_check_security(sk)) {
457 if (bt_sk(sk)->defer_setup) {
458 struct sock *parent = bt_sk(sk)->parent;
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
461 parent->sk_data_ready(parent, 0);
462
463 } else {
464 sk->sk_state = BT_CONFIG;
465 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
467 }
468 } else {
469 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
470 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
471 }
472
473 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
474 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
475 }
476
477 bh_unlock_sock(sk);
478 }
479
480 read_unlock(&l->lock);
481 }
482
483 static void l2cap_conn_ready(struct l2cap_conn *conn)
484 {
485 struct l2cap_chan_list *l = &conn->chan_list;
486 struct sock *sk;
487
488 BT_DBG("conn %p", conn);
489
490 read_lock(&l->lock);
491
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
493 bh_lock_sock(sk);
494
495 if (sk->sk_type != SOCK_SEQPACKET) {
496 l2cap_sock_clear_timer(sk);
497 sk->sk_state = BT_CONNECTED;
498 sk->sk_state_change(sk);
499 } else if (sk->sk_state == BT_CONNECT)
500 l2cap_do_start(sk);
501
502 bh_unlock_sock(sk);
503 }
504
505 read_unlock(&l->lock);
506 }
507
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
510 {
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
513
514 BT_DBG("conn %p", conn);
515
516 read_lock(&l->lock);
517
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (l2cap_pi(sk)->force_reliable)
520 sk->sk_err = err;
521 }
522
523 read_unlock(&l->lock);
524 }
525
526 static void l2cap_info_timeout(unsigned long arg)
527 {
528 struct l2cap_conn *conn = (void *) arg;
529
530 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
531 conn->info_ident = 0;
532
533 l2cap_conn_start(conn);
534 }
535
536 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
537 {
538 struct l2cap_conn *conn = hcon->l2cap_data;
539
540 if (conn || status)
541 return conn;
542
543 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
544 if (!conn)
545 return NULL;
546
547 hcon->l2cap_data = conn;
548 conn->hcon = hcon;
549
550 BT_DBG("hcon %p conn %p", hcon, conn);
551
552 conn->mtu = hcon->hdev->acl_mtu;
553 conn->src = &hcon->hdev->bdaddr;
554 conn->dst = &hcon->dst;
555
556 conn->feat_mask = 0;
557
558 spin_lock_init(&conn->lock);
559 rwlock_init(&conn->chan_list.lock);
560
561 setup_timer(&conn->info_timer, l2cap_info_timeout,
562 (unsigned long) conn);
563
564 conn->disc_reason = 0x13;
565
566 return conn;
567 }
568
569 static void l2cap_conn_del(struct hci_conn *hcon, int err)
570 {
571 struct l2cap_conn *conn = hcon->l2cap_data;
572 struct sock *sk;
573
574 if (!conn)
575 return;
576
577 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
578
579 kfree_skb(conn->rx_skb);
580
581 /* Kill channels */
582 while ((sk = conn->chan_list.head)) {
583 bh_lock_sock(sk);
584 l2cap_chan_del(sk, err);
585 bh_unlock_sock(sk);
586 l2cap_sock_kill(sk);
587 }
588
589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
590 del_timer_sync(&conn->info_timer);
591
592 hcon->l2cap_data = NULL;
593 kfree(conn);
594 }
595
596 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
597 {
598 struct l2cap_chan_list *l = &conn->chan_list;
599 write_lock_bh(&l->lock);
600 __l2cap_chan_add(conn, sk, parent);
601 write_unlock_bh(&l->lock);
602 }
603
604 /* ---- Socket interface ---- */
605 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
606 {
607 struct sock *sk;
608 struct hlist_node *node;
609 sk_for_each(sk, node, &l2cap_sk_list.head)
610 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
611 goto found;
612 sk = NULL;
613 found:
614 return sk;
615 }
616
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
619 */
620 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
621 {
622 struct sock *sk = NULL, *sk1 = NULL;
623 struct hlist_node *node;
624
625 sk_for_each(sk, node, &l2cap_sk_list.head) {
626 if (state && sk->sk_state != state)
627 continue;
628
629 if (l2cap_pi(sk)->psm == psm) {
630 /* Exact match. */
631 if (!bacmp(&bt_sk(sk)->src, src))
632 break;
633
634 /* Closest match */
635 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
636 sk1 = sk;
637 }
638 }
639 return node ? sk : sk1;
640 }
641
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
645 {
646 struct sock *s;
647 read_lock(&l2cap_sk_list.lock);
648 s = __l2cap_get_sock_by_psm(state, psm, src);
649 if (s)
650 bh_lock_sock(s);
651 read_unlock(&l2cap_sk_list.lock);
652 return s;
653 }
654
655 static void l2cap_sock_destruct(struct sock *sk)
656 {
657 BT_DBG("sk %p", sk);
658
659 skb_queue_purge(&sk->sk_receive_queue);
660 skb_queue_purge(&sk->sk_write_queue);
661 }
662
663 static void l2cap_sock_cleanup_listen(struct sock *parent)
664 {
665 struct sock *sk;
666
667 BT_DBG("parent %p", parent);
668
669 /* Close not yet accepted channels */
670 while ((sk = bt_accept_dequeue(parent, NULL)))
671 l2cap_sock_close(sk);
672
673 parent->sk_state = BT_CLOSED;
674 sock_set_flag(parent, SOCK_ZAPPED);
675 }
676
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
679 */
680 static void l2cap_sock_kill(struct sock *sk)
681 {
682 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
683 return;
684
685 BT_DBG("sk %p state %d", sk, sk->sk_state);
686
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list, sk);
689 sock_set_flag(sk, SOCK_DEAD);
690 sock_put(sk);
691 }
692
693 static void __l2cap_sock_close(struct sock *sk, int reason)
694 {
695 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
696
697 switch (sk->sk_state) {
698 case BT_LISTEN:
699 l2cap_sock_cleanup_listen(sk);
700 break;
701
702 case BT_CONNECTED:
703 case BT_CONFIG:
704 if (sk->sk_type == SOCK_SEQPACKET) {
705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
706
707 sk->sk_state = BT_DISCONN;
708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
709 l2cap_send_disconn_req(conn, sk);
710 } else
711 l2cap_chan_del(sk, reason);
712 break;
713
714 case BT_CONNECT2:
715 if (sk->sk_type == SOCK_SEQPACKET) {
716 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
717 struct l2cap_conn_rsp rsp;
718 __u16 result;
719
720 if (bt_sk(sk)->defer_setup)
721 result = L2CAP_CR_SEC_BLOCK;
722 else
723 result = L2CAP_CR_BAD_PSM;
724
725 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
726 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
727 rsp.result = cpu_to_le16(result);
728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
730 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
731 } else
732 l2cap_chan_del(sk, reason);
733 break;
734
735 case BT_CONNECT:
736 case BT_DISCONN:
737 l2cap_chan_del(sk, reason);
738 break;
739
740 default:
741 sock_set_flag(sk, SOCK_ZAPPED);
742 break;
743 }
744 }
745
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock *sk)
748 {
749 l2cap_sock_clear_timer(sk);
750 lock_sock(sk);
751 __l2cap_sock_close(sk, ECONNRESET);
752 release_sock(sk);
753 l2cap_sock_kill(sk);
754 }
755
756 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
757 {
758 struct l2cap_pinfo *pi = l2cap_pi(sk);
759
760 BT_DBG("sk %p", sk);
761
762 if (parent) {
763 sk->sk_type = parent->sk_type;
764 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
765
766 pi->imtu = l2cap_pi(parent)->imtu;
767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
770 pi->sec_level = l2cap_pi(parent)->sec_level;
771 pi->role_switch = l2cap_pi(parent)->role_switch;
772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
773 } else {
774 pi->imtu = L2CAP_DEFAULT_MTU;
775 pi->omtu = 0;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
778 pi->sec_level = BT_SECURITY_LOW;
779 pi->role_switch = 0;
780 pi->force_reliable = 0;
781 }
782
783 /* Default config options */
784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
786 skb_queue_head_init(TX_QUEUE(sk));
787 skb_queue_head_init(SREJ_QUEUE(sk));
788 INIT_LIST_HEAD(SREJ_LIST(sk));
789 }
790
791 static struct proto l2cap_proto = {
792 .name = "L2CAP",
793 .owner = THIS_MODULE,
794 .obj_size = sizeof(struct l2cap_pinfo)
795 };
796
797 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
798 {
799 struct sock *sk;
800
801 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
802 if (!sk)
803 return NULL;
804
805 sock_init_data(sock, sk);
806 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
807
808 sk->sk_destruct = l2cap_sock_destruct;
809 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
810
811 sock_reset_flag(sk, SOCK_ZAPPED);
812
813 sk->sk_protocol = proto;
814 sk->sk_state = BT_OPEN;
815
816 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
817
818 bt_sock_link(&l2cap_sk_list, sk);
819 return sk;
820 }
821
822 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
823 int kern)
824 {
825 struct sock *sk;
826
827 BT_DBG("sock %p", sock);
828
829 sock->state = SS_UNCONNECTED;
830
831 if (sock->type != SOCK_SEQPACKET &&
832 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
833 return -ESOCKTNOSUPPORT;
834
835 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
836 return -EPERM;
837
838 sock->ops = &l2cap_sock_ops;
839
840 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
841 if (!sk)
842 return -ENOMEM;
843
844 l2cap_sock_init(sk, NULL);
845 return 0;
846 }
847
848 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
849 {
850 struct sock *sk = sock->sk;
851 struct sockaddr_l2 la;
852 int len, err = 0;
853
854 BT_DBG("sk %p", sk);
855
856 if (!addr || addr->sa_family != AF_BLUETOOTH)
857 return -EINVAL;
858
859 memset(&la, 0, sizeof(la));
860 len = min_t(unsigned int, sizeof(la), alen);
861 memcpy(&la, addr, len);
862
863 if (la.l2_cid)
864 return -EINVAL;
865
866 lock_sock(sk);
867
868 if (sk->sk_state != BT_OPEN) {
869 err = -EBADFD;
870 goto done;
871 }
872
873 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
874 !capable(CAP_NET_BIND_SERVICE)) {
875 err = -EACCES;
876 goto done;
877 }
878
879 write_lock_bh(&l2cap_sk_list.lock);
880
881 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
882 err = -EADDRINUSE;
883 } else {
884 /* Save source address */
885 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
886 l2cap_pi(sk)->psm = la.l2_psm;
887 l2cap_pi(sk)->sport = la.l2_psm;
888 sk->sk_state = BT_BOUND;
889
890 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
891 __le16_to_cpu(la.l2_psm) == 0x0003)
892 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
893 }
894
895 write_unlock_bh(&l2cap_sk_list.lock);
896
897 done:
898 release_sock(sk);
899 return err;
900 }
901
902 static int l2cap_do_connect(struct sock *sk)
903 {
904 bdaddr_t *src = &bt_sk(sk)->src;
905 bdaddr_t *dst = &bt_sk(sk)->dst;
906 struct l2cap_conn *conn;
907 struct hci_conn *hcon;
908 struct hci_dev *hdev;
909 __u8 auth_type;
910 int err;
911
912 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
913 l2cap_pi(sk)->psm);
914
915 hdev = hci_get_route(dst, src);
916 if (!hdev)
917 return -EHOSTUNREACH;
918
919 hci_dev_lock_bh(hdev);
920
921 err = -ENOMEM;
922
923 if (sk->sk_type == SOCK_RAW) {
924 switch (l2cap_pi(sk)->sec_level) {
925 case BT_SECURITY_HIGH:
926 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
927 break;
928 case BT_SECURITY_MEDIUM:
929 auth_type = HCI_AT_DEDICATED_BONDING;
930 break;
931 default:
932 auth_type = HCI_AT_NO_BONDING;
933 break;
934 }
935 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
936 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
937 auth_type = HCI_AT_NO_BONDING_MITM;
938 else
939 auth_type = HCI_AT_NO_BONDING;
940
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
942 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
943 } else {
944 switch (l2cap_pi(sk)->sec_level) {
945 case BT_SECURITY_HIGH:
946 auth_type = HCI_AT_GENERAL_BONDING_MITM;
947 break;
948 case BT_SECURITY_MEDIUM:
949 auth_type = HCI_AT_GENERAL_BONDING;
950 break;
951 default:
952 auth_type = HCI_AT_NO_BONDING;
953 break;
954 }
955 }
956
957 hcon = hci_connect(hdev, ACL_LINK, dst,
958 l2cap_pi(sk)->sec_level, auth_type);
959 if (!hcon)
960 goto done;
961
962 conn = l2cap_conn_add(hcon, 0);
963 if (!conn) {
964 hci_conn_put(hcon);
965 goto done;
966 }
967
968 err = 0;
969
970 /* Update source addr of the socket */
971 bacpy(src, conn->src);
972
973 l2cap_chan_add(conn, sk, NULL);
974
975 sk->sk_state = BT_CONNECT;
976 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
977
978 if (hcon->state == BT_CONNECTED) {
979 if (sk->sk_type != SOCK_SEQPACKET) {
980 l2cap_sock_clear_timer(sk);
981 sk->sk_state = BT_CONNECTED;
982 } else
983 l2cap_do_start(sk);
984 }
985
986 done:
987 hci_dev_unlock_bh(hdev);
988 hci_dev_put(hdev);
989 return err;
990 }
991
992 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
993 {
994 struct sock *sk = sock->sk;
995 struct sockaddr_l2 la;
996 int len, err = 0;
997
998 BT_DBG("sk %p", sk);
999
1000 if (!addr || addr->sa_family != AF_BLUETOOTH)
1001 return -EINVAL;
1002
1003 memset(&la, 0, sizeof(la));
1004 len = min_t(unsigned int, sizeof(la), alen);
1005 memcpy(&la, addr, len);
1006
1007 if (la.l2_cid)
1008 return -EINVAL;
1009
1010 lock_sock(sk);
1011
1012 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1013 err = -EINVAL;
1014 goto done;
1015 }
1016
1017 switch (l2cap_pi(sk)->mode) {
1018 case L2CAP_MODE_BASIC:
1019 break;
1020 case L2CAP_MODE_ERTM:
1021 case L2CAP_MODE_STREAMING:
1022 if (enable_ertm)
1023 break;
1024 /* fall through */
1025 default:
1026 err = -ENOTSUPP;
1027 goto done;
1028 }
1029
1030 switch (sk->sk_state) {
1031 case BT_CONNECT:
1032 case BT_CONNECT2:
1033 case BT_CONFIG:
1034 /* Already connecting */
1035 goto wait;
1036
1037 case BT_CONNECTED:
1038 /* Already connected */
1039 goto done;
1040
1041 case BT_OPEN:
1042 case BT_BOUND:
1043 /* Can connect */
1044 break;
1045
1046 default:
1047 err = -EBADFD;
1048 goto done;
1049 }
1050
1051 /* Set destination address and psm */
1052 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1053 l2cap_pi(sk)->psm = la.l2_psm;
1054
1055 err = l2cap_do_connect(sk);
1056 if (err)
1057 goto done;
1058
1059 wait:
1060 err = bt_sock_wait_state(sk, BT_CONNECTED,
1061 sock_sndtimeo(sk, flags & O_NONBLOCK));
1062 done:
1063 release_sock(sk);
1064 return err;
1065 }
1066
1067 static int l2cap_sock_listen(struct socket *sock, int backlog)
1068 {
1069 struct sock *sk = sock->sk;
1070 int err = 0;
1071
1072 BT_DBG("sk %p backlog %d", sk, backlog);
1073
1074 lock_sock(sk);
1075
1076 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1077 err = -EBADFD;
1078 goto done;
1079 }
1080
1081 switch (l2cap_pi(sk)->mode) {
1082 case L2CAP_MODE_BASIC:
1083 break;
1084 case L2CAP_MODE_ERTM:
1085 case L2CAP_MODE_STREAMING:
1086 if (enable_ertm)
1087 break;
1088 /* fall through */
1089 default:
1090 err = -ENOTSUPP;
1091 goto done;
1092 }
1093
1094 if (!l2cap_pi(sk)->psm) {
1095 bdaddr_t *src = &bt_sk(sk)->src;
1096 u16 psm;
1097
1098 err = -EINVAL;
1099
1100 write_lock_bh(&l2cap_sk_list.lock);
1101
1102 for (psm = 0x1001; psm < 0x1100; psm += 2)
1103 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1104 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1105 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1106 err = 0;
1107 break;
1108 }
1109
1110 write_unlock_bh(&l2cap_sk_list.lock);
1111
1112 if (err < 0)
1113 goto done;
1114 }
1115
1116 sk->sk_max_ack_backlog = backlog;
1117 sk->sk_ack_backlog = 0;
1118 sk->sk_state = BT_LISTEN;
1119
1120 done:
1121 release_sock(sk);
1122 return err;
1123 }
1124
1125 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1126 {
1127 DECLARE_WAITQUEUE(wait, current);
1128 struct sock *sk = sock->sk, *nsk;
1129 long timeo;
1130 int err = 0;
1131
1132 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1133
1134 if (sk->sk_state != BT_LISTEN) {
1135 err = -EBADFD;
1136 goto done;
1137 }
1138
1139 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1140
1141 BT_DBG("sk %p timeo %ld", sk, timeo);
1142
1143 /* Wait for an incoming connection. (wake-one). */
1144 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1145 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1146 set_current_state(TASK_INTERRUPTIBLE);
1147 if (!timeo) {
1148 err = -EAGAIN;
1149 break;
1150 }
1151
1152 release_sock(sk);
1153 timeo = schedule_timeout(timeo);
1154 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1155
1156 if (sk->sk_state != BT_LISTEN) {
1157 err = -EBADFD;
1158 break;
1159 }
1160
1161 if (signal_pending(current)) {
1162 err = sock_intr_errno(timeo);
1163 break;
1164 }
1165 }
1166 set_current_state(TASK_RUNNING);
1167 remove_wait_queue(sk->sk_sleep, &wait);
1168
1169 if (err)
1170 goto done;
1171
1172 newsock->state = SS_CONNECTED;
1173
1174 BT_DBG("new socket %p", nsk);
1175
1176 done:
1177 release_sock(sk);
1178 return err;
1179 }
1180
1181 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1182 {
1183 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1184 struct sock *sk = sock->sk;
1185
1186 BT_DBG("sock %p, sk %p", sock, sk);
1187
1188 addr->sa_family = AF_BLUETOOTH;
1189 *len = sizeof(struct sockaddr_l2);
1190
1191 if (peer) {
1192 la->l2_psm = l2cap_pi(sk)->psm;
1193 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1194 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1195 } else {
1196 la->l2_psm = l2cap_pi(sk)->sport;
1197 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1198 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1199 }
1200
1201 return 0;
1202 }
1203
1204 static void l2cap_monitor_timeout(unsigned long arg)
1205 {
1206 struct sock *sk = (void *) arg;
1207 u16 control;
1208
1209 bh_lock_sock(sk);
1210 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1211 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1212 return;
1213 }
1214
1215 l2cap_pi(sk)->retry_count++;
1216 __mod_monitor_timer();
1217
1218 control = L2CAP_CTRL_POLL;
1219 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1220 bh_unlock_sock(sk);
1221 }
1222
1223 static void l2cap_retrans_timeout(unsigned long arg)
1224 {
1225 struct sock *sk = (void *) arg;
1226 u16 control;
1227
1228 bh_lock_sock(sk);
1229 l2cap_pi(sk)->retry_count = 1;
1230 __mod_monitor_timer();
1231
1232 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1233
1234 control = L2CAP_CTRL_POLL;
1235 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1236 bh_unlock_sock(sk);
1237 }
1238
1239 static void l2cap_drop_acked_frames(struct sock *sk)
1240 {
1241 struct sk_buff *skb;
1242
1243 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1244 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1245 break;
1246
1247 skb = skb_dequeue(TX_QUEUE(sk));
1248 kfree_skb(skb);
1249
1250 l2cap_pi(sk)->unacked_frames--;
1251 }
1252
1253 if (!l2cap_pi(sk)->unacked_frames)
1254 del_timer(&l2cap_pi(sk)->retrans_timer);
1255
1256 return;
1257 }
1258
1259 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1260 {
1261 struct l2cap_pinfo *pi = l2cap_pi(sk);
1262 int err;
1263
1264 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1265
1266 err = hci_send_acl(pi->conn->hcon, skb, 0);
1267 if (err < 0)
1268 kfree_skb(skb);
1269
1270 return err;
1271 }
1272
1273 static int l2cap_streaming_send(struct sock *sk)
1274 {
1275 struct sk_buff *skb, *tx_skb;
1276 struct l2cap_pinfo *pi = l2cap_pi(sk);
1277 u16 control, fcs;
1278 int err;
1279
1280 while ((skb = sk->sk_send_head)) {
1281 tx_skb = skb_clone(skb, GFP_ATOMIC);
1282
1283 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1284 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1285 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1286
1287 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1288 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1289 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1290 }
1291
1292 err = l2cap_do_send(sk, tx_skb);
1293 if (err < 0) {
1294 l2cap_send_disconn_req(pi->conn, sk);
1295 return err;
1296 }
1297
1298 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1299
1300 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1301 sk->sk_send_head = NULL;
1302 else
1303 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1304
1305 skb = skb_dequeue(TX_QUEUE(sk));
1306 kfree_skb(skb);
1307 }
1308 return 0;
1309 }
1310
1311 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1312 {
1313 struct l2cap_pinfo *pi = l2cap_pi(sk);
1314 struct sk_buff *skb, *tx_skb;
1315 u16 control, fcs;
1316 int err;
1317
1318 skb = skb_peek(TX_QUEUE(sk));
1319 do {
1320 if (bt_cb(skb)->tx_seq != tx_seq) {
1321 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1322 break;
1323 skb = skb_queue_next(TX_QUEUE(sk), skb);
1324 continue;
1325 }
1326
1327 if (pi->remote_max_tx &&
1328 bt_cb(skb)->retries == pi->remote_max_tx) {
1329 l2cap_send_disconn_req(pi->conn, sk);
1330 break;
1331 }
1332
1333 tx_skb = skb_clone(skb, GFP_ATOMIC);
1334 bt_cb(skb)->retries++;
1335 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1336 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1337 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1338 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1339
1340 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1341 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1342 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1343 }
1344
1345 err = l2cap_do_send(sk, tx_skb);
1346 if (err < 0) {
1347 l2cap_send_disconn_req(pi->conn, sk);
1348 return err;
1349 }
1350 break;
1351 } while(1);
1352 return 0;
1353 }
1354
1355 static int l2cap_ertm_send(struct sock *sk)
1356 {
1357 struct sk_buff *skb, *tx_skb;
1358 struct l2cap_pinfo *pi = l2cap_pi(sk);
1359 u16 control, fcs;
1360 int err;
1361
1362 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1363 return 0;
1364
1365 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1366 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1367 tx_skb = skb_clone(skb, GFP_ATOMIC);
1368
1369 if (pi->remote_max_tx &&
1370 bt_cb(skb)->retries == pi->remote_max_tx) {
1371 l2cap_send_disconn_req(pi->conn, sk);
1372 break;
1373 }
1374
1375 bt_cb(skb)->retries++;
1376
1377 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1378 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1379 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1380 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1381
1382
1383 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1384 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1385 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1386 }
1387
1388 err = l2cap_do_send(sk, tx_skb);
1389 if (err < 0) {
1390 l2cap_send_disconn_req(pi->conn, sk);
1391 return err;
1392 }
1393 __mod_retrans_timer();
1394
1395 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1396 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1397
1398 pi->unacked_frames++;
1399
1400 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1401 sk->sk_send_head = NULL;
1402 else
1403 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1404 }
1405
1406 return 0;
1407 }
1408
1409 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1410 {
1411 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1412 struct sk_buff **frag;
1413 int err, sent = 0;
1414
1415 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1416 return -EFAULT;
1417 }
1418
1419 sent += count;
1420 len -= count;
1421
1422 /* Continuation fragments (no L2CAP header) */
1423 frag = &skb_shinfo(skb)->frag_list;
1424 while (len) {
1425 count = min_t(unsigned int, conn->mtu, len);
1426
1427 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1428 if (!*frag)
1429 return -EFAULT;
1430 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1431 return -EFAULT;
1432
1433 sent += count;
1434 len -= count;
1435
1436 frag = &(*frag)->next;
1437 }
1438
1439 return sent;
1440 }
1441
1442 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1443 {
1444 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1445 struct sk_buff *skb;
1446 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1447 struct l2cap_hdr *lh;
1448
1449 BT_DBG("sk %p len %d", sk, (int)len);
1450
1451 count = min_t(unsigned int, (conn->mtu - hlen), len);
1452 skb = bt_skb_send_alloc(sk, count + hlen,
1453 msg->msg_flags & MSG_DONTWAIT, &err);
1454 if (!skb)
1455 return ERR_PTR(-ENOMEM);
1456
1457 /* Create L2CAP header */
1458 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1459 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1460 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1461 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1462
1463 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1464 if (unlikely(err < 0)) {
1465 kfree_skb(skb);
1466 return ERR_PTR(err);
1467 }
1468 return skb;
1469 }
1470
1471 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1472 {
1473 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1474 struct sk_buff *skb;
1475 int err, count, hlen = L2CAP_HDR_SIZE;
1476 struct l2cap_hdr *lh;
1477
1478 BT_DBG("sk %p len %d", sk, (int)len);
1479
1480 count = min_t(unsigned int, (conn->mtu - hlen), len);
1481 skb = bt_skb_send_alloc(sk, count + hlen,
1482 msg->msg_flags & MSG_DONTWAIT, &err);
1483 if (!skb)
1484 return ERR_PTR(-ENOMEM);
1485
1486 /* Create L2CAP header */
1487 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1488 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1489 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1490
1491 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1492 if (unlikely(err < 0)) {
1493 kfree_skb(skb);
1494 return ERR_PTR(err);
1495 }
1496 return skb;
1497 }
1498
1499 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1500 {
1501 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1502 struct sk_buff *skb;
1503 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1504 struct l2cap_hdr *lh;
1505
1506 BT_DBG("sk %p len %d", sk, (int)len);
1507
1508 if (sdulen)
1509 hlen += 2;
1510
1511 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1512 hlen += 2;
1513
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1517 if (!skb)
1518 return ERR_PTR(-ENOMEM);
1519
1520 /* Create L2CAP header */
1521 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1522 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1523 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1524 put_unaligned_le16(control, skb_put(skb, 2));
1525 if (sdulen)
1526 put_unaligned_le16(sdulen, skb_put(skb, 2));
1527
1528 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1529 if (unlikely(err < 0)) {
1530 kfree_skb(skb);
1531 return ERR_PTR(err);
1532 }
1533
1534 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1535 put_unaligned_le16(0, skb_put(skb, 2));
1536
1537 bt_cb(skb)->retries = 0;
1538 return skb;
1539 }
1540
1541 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1542 {
1543 struct l2cap_pinfo *pi = l2cap_pi(sk);
1544 struct sk_buff *skb;
1545 struct sk_buff_head sar_queue;
1546 u16 control;
1547 size_t size = 0;
1548
1549 __skb_queue_head_init(&sar_queue);
1550 control = L2CAP_SDU_START;
1551 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1552 if (IS_ERR(skb))
1553 return PTR_ERR(skb);
1554
1555 __skb_queue_tail(&sar_queue, skb);
1556 len -= pi->max_pdu_size;
1557 size +=pi->max_pdu_size;
1558 control = 0;
1559
1560 while (len > 0) {
1561 size_t buflen;
1562
1563 if (len > pi->max_pdu_size) {
1564 control |= L2CAP_SDU_CONTINUE;
1565 buflen = pi->max_pdu_size;
1566 } else {
1567 control |= L2CAP_SDU_END;
1568 buflen = len;
1569 }
1570
1571 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1572 if (IS_ERR(skb)) {
1573 skb_queue_purge(&sar_queue);
1574 return PTR_ERR(skb);
1575 }
1576
1577 __skb_queue_tail(&sar_queue, skb);
1578 len -= buflen;
1579 size += buflen;
1580 control = 0;
1581 }
1582 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1583 if (sk->sk_send_head == NULL)
1584 sk->sk_send_head = sar_queue.next;
1585
1586 return size;
1587 }
1588
1589 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1590 {
1591 struct sock *sk = sock->sk;
1592 struct l2cap_pinfo *pi = l2cap_pi(sk);
1593 struct sk_buff *skb;
1594 u16 control;
1595 int err;
1596
1597 BT_DBG("sock %p, sk %p", sock, sk);
1598
1599 err = sock_error(sk);
1600 if (err)
1601 return err;
1602
1603 if (msg->msg_flags & MSG_OOB)
1604 return -EOPNOTSUPP;
1605
1606 /* Check outgoing MTU */
1607 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1608 len > pi->omtu)
1609 return -EINVAL;
1610
1611 lock_sock(sk);
1612
1613 if (sk->sk_state != BT_CONNECTED) {
1614 err = -ENOTCONN;
1615 goto done;
1616 }
1617
1618 /* Connectionless channel */
1619 if (sk->sk_type == SOCK_DGRAM) {
1620 skb = l2cap_create_connless_pdu(sk, msg, len);
1621 err = l2cap_do_send(sk, skb);
1622 goto done;
1623 }
1624
1625 switch (pi->mode) {
1626 case L2CAP_MODE_BASIC:
1627 /* Create a basic PDU */
1628 skb = l2cap_create_basic_pdu(sk, msg, len);
1629 if (IS_ERR(skb)) {
1630 err = PTR_ERR(skb);
1631 goto done;
1632 }
1633
1634 err = l2cap_do_send(sk, skb);
1635 if (!err)
1636 err = len;
1637 break;
1638
1639 case L2CAP_MODE_ERTM:
1640 case L2CAP_MODE_STREAMING:
1641 /* Entire SDU fits into one PDU */
1642 if (len <= pi->max_pdu_size) {
1643 control = L2CAP_SDU_UNSEGMENTED;
1644 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1645 if (IS_ERR(skb)) {
1646 err = PTR_ERR(skb);
1647 goto done;
1648 }
1649 __skb_queue_tail(TX_QUEUE(sk), skb);
1650 if (sk->sk_send_head == NULL)
1651 sk->sk_send_head = skb;
1652 } else {
1653 /* Segment SDU into multiples PDUs */
1654 err = l2cap_sar_segment_sdu(sk, msg, len);
1655 if (err < 0)
1656 goto done;
1657 }
1658
1659 if (pi->mode == L2CAP_MODE_STREAMING)
1660 err = l2cap_streaming_send(sk);
1661 else
1662 err = l2cap_ertm_send(sk);
1663
1664 if (!err)
1665 err = len;
1666 break;
1667
1668 default:
1669 BT_DBG("bad state %1.1x", pi->mode);
1670 err = -EINVAL;
1671 }
1672
1673 done:
1674 release_sock(sk);
1675 return err;
1676 }
1677
1678 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1679 {
1680 struct sock *sk = sock->sk;
1681
1682 lock_sock(sk);
1683
1684 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1685 struct l2cap_conn_rsp rsp;
1686
1687 sk->sk_state = BT_CONFIG;
1688
1689 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1690 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1691 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1692 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1693 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1694 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1695
1696 release_sock(sk);
1697 return 0;
1698 }
1699
1700 release_sock(sk);
1701
1702 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1703 }
1704
1705 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1706 {
1707 struct sock *sk = sock->sk;
1708 struct l2cap_options opts;
1709 int len, err = 0;
1710 u32 opt;
1711
1712 BT_DBG("sk %p", sk);
1713
1714 lock_sock(sk);
1715
1716 switch (optname) {
1717 case L2CAP_OPTIONS:
1718 opts.imtu = l2cap_pi(sk)->imtu;
1719 opts.omtu = l2cap_pi(sk)->omtu;
1720 opts.flush_to = l2cap_pi(sk)->flush_to;
1721 opts.mode = l2cap_pi(sk)->mode;
1722 opts.fcs = l2cap_pi(sk)->fcs;
1723
1724 len = min_t(unsigned int, sizeof(opts), optlen);
1725 if (copy_from_user((char *) &opts, optval, len)) {
1726 err = -EFAULT;
1727 break;
1728 }
1729
1730 l2cap_pi(sk)->imtu = opts.imtu;
1731 l2cap_pi(sk)->omtu = opts.omtu;
1732 l2cap_pi(sk)->mode = opts.mode;
1733 l2cap_pi(sk)->fcs = opts.fcs;
1734 break;
1735
1736 case L2CAP_LM:
1737 if (get_user(opt, (u32 __user *) optval)) {
1738 err = -EFAULT;
1739 break;
1740 }
1741
1742 if (opt & L2CAP_LM_AUTH)
1743 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1744 if (opt & L2CAP_LM_ENCRYPT)
1745 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1746 if (opt & L2CAP_LM_SECURE)
1747 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1748
1749 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1750 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1751 break;
1752
1753 default:
1754 err = -ENOPROTOOPT;
1755 break;
1756 }
1757
1758 release_sock(sk);
1759 return err;
1760 }
1761
1762 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1763 {
1764 struct sock *sk = sock->sk;
1765 struct bt_security sec;
1766 int len, err = 0;
1767 u32 opt;
1768
1769 BT_DBG("sk %p", sk);
1770
1771 if (level == SOL_L2CAP)
1772 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1773
1774 if (level != SOL_BLUETOOTH)
1775 return -ENOPROTOOPT;
1776
1777 lock_sock(sk);
1778
1779 switch (optname) {
1780 case BT_SECURITY:
1781 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1782 err = -EINVAL;
1783 break;
1784 }
1785
1786 sec.level = BT_SECURITY_LOW;
1787
1788 len = min_t(unsigned int, sizeof(sec), optlen);
1789 if (copy_from_user((char *) &sec, optval, len)) {
1790 err = -EFAULT;
1791 break;
1792 }
1793
1794 if (sec.level < BT_SECURITY_LOW ||
1795 sec.level > BT_SECURITY_HIGH) {
1796 err = -EINVAL;
1797 break;
1798 }
1799
1800 l2cap_pi(sk)->sec_level = sec.level;
1801 break;
1802
1803 case BT_DEFER_SETUP:
1804 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1805 err = -EINVAL;
1806 break;
1807 }
1808
1809 if (get_user(opt, (u32 __user *) optval)) {
1810 err = -EFAULT;
1811 break;
1812 }
1813
1814 bt_sk(sk)->defer_setup = opt;
1815 break;
1816
1817 default:
1818 err = -ENOPROTOOPT;
1819 break;
1820 }
1821
1822 release_sock(sk);
1823 return err;
1824 }
1825
1826 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1827 {
1828 struct sock *sk = sock->sk;
1829 struct l2cap_options opts;
1830 struct l2cap_conninfo cinfo;
1831 int len, err = 0;
1832 u32 opt;
1833
1834 BT_DBG("sk %p", sk);
1835
1836 if (get_user(len, optlen))
1837 return -EFAULT;
1838
1839 lock_sock(sk);
1840
1841 switch (optname) {
1842 case L2CAP_OPTIONS:
1843 opts.imtu = l2cap_pi(sk)->imtu;
1844 opts.omtu = l2cap_pi(sk)->omtu;
1845 opts.flush_to = l2cap_pi(sk)->flush_to;
1846 opts.mode = l2cap_pi(sk)->mode;
1847 opts.fcs = l2cap_pi(sk)->fcs;
1848
1849 len = min_t(unsigned int, len, sizeof(opts));
1850 if (copy_to_user(optval, (char *) &opts, len))
1851 err = -EFAULT;
1852
1853 break;
1854
1855 case L2CAP_LM:
1856 switch (l2cap_pi(sk)->sec_level) {
1857 case BT_SECURITY_LOW:
1858 opt = L2CAP_LM_AUTH;
1859 break;
1860 case BT_SECURITY_MEDIUM:
1861 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1862 break;
1863 case BT_SECURITY_HIGH:
1864 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1865 L2CAP_LM_SECURE;
1866 break;
1867 default:
1868 opt = 0;
1869 break;
1870 }
1871
1872 if (l2cap_pi(sk)->role_switch)
1873 opt |= L2CAP_LM_MASTER;
1874
1875 if (l2cap_pi(sk)->force_reliable)
1876 opt |= L2CAP_LM_RELIABLE;
1877
1878 if (put_user(opt, (u32 __user *) optval))
1879 err = -EFAULT;
1880 break;
1881
1882 case L2CAP_CONNINFO:
1883 if (sk->sk_state != BT_CONNECTED &&
1884 !(sk->sk_state == BT_CONNECT2 &&
1885 bt_sk(sk)->defer_setup)) {
1886 err = -ENOTCONN;
1887 break;
1888 }
1889
1890 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1891 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1892
1893 len = min_t(unsigned int, len, sizeof(cinfo));
1894 if (copy_to_user(optval, (char *) &cinfo, len))
1895 err = -EFAULT;
1896
1897 break;
1898
1899 default:
1900 err = -ENOPROTOOPT;
1901 break;
1902 }
1903
1904 release_sock(sk);
1905 return err;
1906 }
1907
1908 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1909 {
1910 struct sock *sk = sock->sk;
1911 struct bt_security sec;
1912 int len, err = 0;
1913
1914 BT_DBG("sk %p", sk);
1915
1916 if (level == SOL_L2CAP)
1917 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1918
1919 if (level != SOL_BLUETOOTH)
1920 return -ENOPROTOOPT;
1921
1922 if (get_user(len, optlen))
1923 return -EFAULT;
1924
1925 lock_sock(sk);
1926
1927 switch (optname) {
1928 case BT_SECURITY:
1929 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1930 err = -EINVAL;
1931 break;
1932 }
1933
1934 sec.level = l2cap_pi(sk)->sec_level;
1935
1936 len = min_t(unsigned int, len, sizeof(sec));
1937 if (copy_to_user(optval, (char *) &sec, len))
1938 err = -EFAULT;
1939
1940 break;
1941
1942 case BT_DEFER_SETUP:
1943 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1944 err = -EINVAL;
1945 break;
1946 }
1947
1948 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1949 err = -EFAULT;
1950
1951 break;
1952
1953 default:
1954 err = -ENOPROTOOPT;
1955 break;
1956 }
1957
1958 release_sock(sk);
1959 return err;
1960 }
1961
1962 static int l2cap_sock_shutdown(struct socket *sock, int how)
1963 {
1964 struct sock *sk = sock->sk;
1965 int err = 0;
1966
1967 BT_DBG("sock %p, sk %p", sock, sk);
1968
1969 if (!sk)
1970 return 0;
1971
1972 lock_sock(sk);
1973 if (!sk->sk_shutdown) {
1974 sk->sk_shutdown = SHUTDOWN_MASK;
1975 l2cap_sock_clear_timer(sk);
1976 __l2cap_sock_close(sk, 0);
1977
1978 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1979 err = bt_sock_wait_state(sk, BT_CLOSED,
1980 sk->sk_lingertime);
1981 }
1982 release_sock(sk);
1983 return err;
1984 }
1985
1986 static int l2cap_sock_release(struct socket *sock)
1987 {
1988 struct sock *sk = sock->sk;
1989 int err;
1990
1991 BT_DBG("sock %p, sk %p", sock, sk);
1992
1993 if (!sk)
1994 return 0;
1995
1996 err = l2cap_sock_shutdown(sock, 2);
1997
1998 sock_orphan(sk);
1999 l2cap_sock_kill(sk);
2000 return err;
2001 }
2002
2003 static void l2cap_chan_ready(struct sock *sk)
2004 {
2005 struct sock *parent = bt_sk(sk)->parent;
2006
2007 BT_DBG("sk %p, parent %p", sk, parent);
2008
2009 l2cap_pi(sk)->conf_state = 0;
2010 l2cap_sock_clear_timer(sk);
2011
2012 if (!parent) {
2013 /* Outgoing channel.
2014 * Wake up socket sleeping on connect.
2015 */
2016 sk->sk_state = BT_CONNECTED;
2017 sk->sk_state_change(sk);
2018 } else {
2019 /* Incoming channel.
2020 * Wake up socket sleeping on accept.
2021 */
2022 parent->sk_data_ready(parent, 0);
2023 }
2024 }
2025
2026 /* Copy frame to all raw sockets on that connection */
2027 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2028 {
2029 struct l2cap_chan_list *l = &conn->chan_list;
2030 struct sk_buff *nskb;
2031 struct sock *sk;
2032
2033 BT_DBG("conn %p", conn);
2034
2035 read_lock(&l->lock);
2036 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2037 if (sk->sk_type != SOCK_RAW)
2038 continue;
2039
2040 /* Don't send frame to the socket it came from */
2041 if (skb->sk == sk)
2042 continue;
2043 nskb = skb_clone(skb, GFP_ATOMIC);
2044 if (!nskb)
2045 continue;
2046
2047 if (sock_queue_rcv_skb(sk, nskb))
2048 kfree_skb(nskb);
2049 }
2050 read_unlock(&l->lock);
2051 }
2052
2053 /* ---- L2CAP signalling commands ---- */
2054 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2055 u8 code, u8 ident, u16 dlen, void *data)
2056 {
2057 struct sk_buff *skb, **frag;
2058 struct l2cap_cmd_hdr *cmd;
2059 struct l2cap_hdr *lh;
2060 int len, count;
2061
2062 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2063 conn, code, ident, dlen);
2064
2065 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2066 count = min_t(unsigned int, conn->mtu, len);
2067
2068 skb = bt_skb_alloc(count, GFP_ATOMIC);
2069 if (!skb)
2070 return NULL;
2071
2072 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2073 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2074 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2075
2076 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2077 cmd->code = code;
2078 cmd->ident = ident;
2079 cmd->len = cpu_to_le16(dlen);
2080
2081 if (dlen) {
2082 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2083 memcpy(skb_put(skb, count), data, count);
2084 data += count;
2085 }
2086
2087 len -= skb->len;
2088
2089 /* Continuation fragments (no L2CAP header) */
2090 frag = &skb_shinfo(skb)->frag_list;
2091 while (len) {
2092 count = min_t(unsigned int, conn->mtu, len);
2093
2094 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2095 if (!*frag)
2096 goto fail;
2097
2098 memcpy(skb_put(*frag, count), data, count);
2099
2100 len -= count;
2101 data += count;
2102
2103 frag = &(*frag)->next;
2104 }
2105
2106 return skb;
2107
2108 fail:
2109 kfree_skb(skb);
2110 return NULL;
2111 }
2112
2113 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2114 {
2115 struct l2cap_conf_opt *opt = *ptr;
2116 int len;
2117
2118 len = L2CAP_CONF_OPT_SIZE + opt->len;
2119 *ptr += len;
2120
2121 *type = opt->type;
2122 *olen = opt->len;
2123
2124 switch (opt->len) {
2125 case 1:
2126 *val = *((u8 *) opt->val);
2127 break;
2128
2129 case 2:
2130 *val = __le16_to_cpu(*((__le16 *) opt->val));
2131 break;
2132
2133 case 4:
2134 *val = __le32_to_cpu(*((__le32 *) opt->val));
2135 break;
2136
2137 default:
2138 *val = (unsigned long) opt->val;
2139 break;
2140 }
2141
2142 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2143 return len;
2144 }
2145
2146 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2147 {
2148 struct l2cap_conf_opt *opt = *ptr;
2149
2150 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2151
2152 opt->type = type;
2153 opt->len = len;
2154
2155 switch (len) {
2156 case 1:
2157 *((u8 *) opt->val) = val;
2158 break;
2159
2160 case 2:
2161 *((__le16 *) opt->val) = cpu_to_le16(val);
2162 break;
2163
2164 case 4:
2165 *((__le32 *) opt->val) = cpu_to_le32(val);
2166 break;
2167
2168 default:
2169 memcpy(opt->val, (void *) val, len);
2170 break;
2171 }
2172
2173 *ptr += L2CAP_CONF_OPT_SIZE + len;
2174 }
2175
2176 static inline void l2cap_ertm_init(struct sock *sk)
2177 {
2178 l2cap_pi(sk)->expected_ack_seq = 0;
2179 l2cap_pi(sk)->unacked_frames = 0;
2180 l2cap_pi(sk)->buffer_seq = 0;
2181 l2cap_pi(sk)->num_to_ack = 0;
2182
2183 setup_timer(&l2cap_pi(sk)->retrans_timer,
2184 l2cap_retrans_timeout, (unsigned long) sk);
2185 setup_timer(&l2cap_pi(sk)->monitor_timer,
2186 l2cap_monitor_timeout, (unsigned long) sk);
2187
2188 __skb_queue_head_init(SREJ_QUEUE(sk));
2189 }
2190
2191 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2192 {
2193 u32 local_feat_mask = l2cap_feat_mask;
2194 if (enable_ertm)
2195 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2196
2197 switch (mode) {
2198 case L2CAP_MODE_ERTM:
2199 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2200 case L2CAP_MODE_STREAMING:
2201 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2202 default:
2203 return 0x00;
2204 }
2205 }
2206
2207 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2208 {
2209 switch (mode) {
2210 case L2CAP_MODE_STREAMING:
2211 case L2CAP_MODE_ERTM:
2212 if (l2cap_mode_supported(mode, remote_feat_mask))
2213 return mode;
2214 /* fall through */
2215 default:
2216 return L2CAP_MODE_BASIC;
2217 }
2218 }
2219
2220 static int l2cap_build_conf_req(struct sock *sk, void *data)
2221 {
2222 struct l2cap_pinfo *pi = l2cap_pi(sk);
2223 struct l2cap_conf_req *req = data;
2224 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2225 void *ptr = req->data;
2226
2227 BT_DBG("sk %p", sk);
2228
2229 if (pi->num_conf_req || pi->num_conf_rsp)
2230 goto done;
2231
2232 switch (pi->mode) {
2233 case L2CAP_MODE_STREAMING:
2234 case L2CAP_MODE_ERTM:
2235 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2236 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2237 l2cap_send_disconn_req(pi->conn, sk);
2238 break;
2239 default:
2240 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2241 break;
2242 }
2243
2244 done:
2245 switch (pi->mode) {
2246 case L2CAP_MODE_BASIC:
2247 if (pi->imtu != L2CAP_DEFAULT_MTU)
2248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2249 break;
2250
2251 case L2CAP_MODE_ERTM:
2252 rfc.mode = L2CAP_MODE_ERTM;
2253 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2254 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2255 rfc.retrans_timeout = 0;
2256 rfc.monitor_timeout = 0;
2257 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2258
2259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2260 sizeof(rfc), (unsigned long) &rfc);
2261
2262 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2263 break;
2264
2265 if (pi->fcs == L2CAP_FCS_NONE ||
2266 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2267 pi->fcs = L2CAP_FCS_NONE;
2268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2269 }
2270 break;
2271
2272 case L2CAP_MODE_STREAMING:
2273 rfc.mode = L2CAP_MODE_STREAMING;
2274 rfc.txwin_size = 0;
2275 rfc.max_transmit = 0;
2276 rfc.retrans_timeout = 0;
2277 rfc.monitor_timeout = 0;
2278 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2279
2280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2281 sizeof(rfc), (unsigned long) &rfc);
2282
2283 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2284 break;
2285
2286 if (pi->fcs == L2CAP_FCS_NONE ||
2287 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2288 pi->fcs = L2CAP_FCS_NONE;
2289 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2290 }
2291 break;
2292 }
2293
2294 /* FIXME: Need actual value of the flush timeout */
2295 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2296 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2297
2298 req->dcid = cpu_to_le16(pi->dcid);
2299 req->flags = cpu_to_le16(0);
2300
2301 return ptr - data;
2302 }
2303
2304 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2305 {
2306 struct l2cap_pinfo *pi = l2cap_pi(sk);
2307 struct l2cap_conf_rsp *rsp = data;
2308 void *ptr = rsp->data;
2309 void *req = pi->conf_req;
2310 int len = pi->conf_len;
2311 int type, hint, olen;
2312 unsigned long val;
2313 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2314 u16 mtu = L2CAP_DEFAULT_MTU;
2315 u16 result = L2CAP_CONF_SUCCESS;
2316
2317 BT_DBG("sk %p", sk);
2318
2319 while (len >= L2CAP_CONF_OPT_SIZE) {
2320 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2321
2322 hint = type & L2CAP_CONF_HINT;
2323 type &= L2CAP_CONF_MASK;
2324
2325 switch (type) {
2326 case L2CAP_CONF_MTU:
2327 mtu = val;
2328 break;
2329
2330 case L2CAP_CONF_FLUSH_TO:
2331 pi->flush_to = val;
2332 break;
2333
2334 case L2CAP_CONF_QOS:
2335 break;
2336
2337 case L2CAP_CONF_RFC:
2338 if (olen == sizeof(rfc))
2339 memcpy(&rfc, (void *) val, olen);
2340 break;
2341
2342 case L2CAP_CONF_FCS:
2343 if (val == L2CAP_FCS_NONE)
2344 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2345
2346 break;
2347
2348 default:
2349 if (hint)
2350 break;
2351
2352 result = L2CAP_CONF_UNKNOWN;
2353 *((u8 *) ptr++) = type;
2354 break;
2355 }
2356 }
2357
2358 if (pi->num_conf_rsp || pi->num_conf_req)
2359 goto done;
2360
2361 switch (pi->mode) {
2362 case L2CAP_MODE_STREAMING:
2363 case L2CAP_MODE_ERTM:
2364 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2365 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2366 return -ECONNREFUSED;
2367 break;
2368 default:
2369 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2370 break;
2371 }
2372
2373 done:
2374 if (pi->mode != rfc.mode) {
2375 result = L2CAP_CONF_UNACCEPT;
2376 rfc.mode = pi->mode;
2377
2378 if (pi->num_conf_rsp == 1)
2379 return -ECONNREFUSED;
2380
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2382 sizeof(rfc), (unsigned long) &rfc);
2383 }
2384
2385
2386 if (result == L2CAP_CONF_SUCCESS) {
2387 /* Configure output options and let the other side know
2388 * which ones we don't like. */
2389
2390 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2391 result = L2CAP_CONF_UNACCEPT;
2392 else {
2393 pi->omtu = mtu;
2394 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2395 }
2396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2397
2398 switch (rfc.mode) {
2399 case L2CAP_MODE_BASIC:
2400 pi->fcs = L2CAP_FCS_NONE;
2401 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2402 break;
2403
2404 case L2CAP_MODE_ERTM:
2405 pi->remote_tx_win = rfc.txwin_size;
2406 pi->remote_max_tx = rfc.max_transmit;
2407 pi->max_pdu_size = rfc.max_pdu_size;
2408
2409 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2410 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2411
2412 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2413
2414 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2415 sizeof(rfc), (unsigned long) &rfc);
2416
2417 break;
2418
2419 case L2CAP_MODE_STREAMING:
2420 pi->remote_tx_win = rfc.txwin_size;
2421 pi->max_pdu_size = rfc.max_pdu_size;
2422
2423 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2424
2425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2426 sizeof(rfc), (unsigned long) &rfc);
2427
2428 break;
2429
2430 default:
2431 result = L2CAP_CONF_UNACCEPT;
2432
2433 memset(&rfc, 0, sizeof(rfc));
2434 rfc.mode = pi->mode;
2435 }
2436
2437 if (result == L2CAP_CONF_SUCCESS)
2438 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2439 }
2440 rsp->scid = cpu_to_le16(pi->dcid);
2441 rsp->result = cpu_to_le16(result);
2442 rsp->flags = cpu_to_le16(0x0000);
2443
2444 return ptr - data;
2445 }
2446
2447 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2448 {
2449 struct l2cap_pinfo *pi = l2cap_pi(sk);
2450 struct l2cap_conf_req *req = data;
2451 void *ptr = req->data;
2452 int type, olen;
2453 unsigned long val;
2454 struct l2cap_conf_rfc rfc;
2455
2456 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2457
2458 while (len >= L2CAP_CONF_OPT_SIZE) {
2459 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2460
2461 switch (type) {
2462 case L2CAP_CONF_MTU:
2463 if (val < L2CAP_DEFAULT_MIN_MTU) {
2464 *result = L2CAP_CONF_UNACCEPT;
2465 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2466 } else
2467 pi->omtu = val;
2468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2469 break;
2470
2471 case L2CAP_CONF_FLUSH_TO:
2472 pi->flush_to = val;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2474 2, pi->flush_to);
2475 break;
2476
2477 case L2CAP_CONF_RFC:
2478 if (olen == sizeof(rfc))
2479 memcpy(&rfc, (void *)val, olen);
2480
2481 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2482 rfc.mode != pi->mode)
2483 return -ECONNREFUSED;
2484
2485 pi->mode = rfc.mode;
2486 pi->fcs = 0;
2487
2488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2489 sizeof(rfc), (unsigned long) &rfc);
2490 break;
2491 }
2492 }
2493
2494 if (*result == L2CAP_CONF_SUCCESS) {
2495 switch (rfc.mode) {
2496 case L2CAP_MODE_ERTM:
2497 pi->remote_tx_win = rfc.txwin_size;
2498 pi->retrans_timeout = rfc.retrans_timeout;
2499 pi->monitor_timeout = rfc.monitor_timeout;
2500 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2501 break;
2502 case L2CAP_MODE_STREAMING:
2503 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2504 break;
2505 }
2506 }
2507
2508 req->dcid = cpu_to_le16(pi->dcid);
2509 req->flags = cpu_to_le16(0x0000);
2510
2511 return ptr - data;
2512 }
2513
2514 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2515 {
2516 struct l2cap_conf_rsp *rsp = data;
2517 void *ptr = rsp->data;
2518
2519 BT_DBG("sk %p", sk);
2520
2521 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2522 rsp->result = cpu_to_le16(result);
2523 rsp->flags = cpu_to_le16(flags);
2524
2525 return ptr - data;
2526 }
2527
2528 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2529 {
2530 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2531
2532 if (rej->reason != 0x0000)
2533 return 0;
2534
2535 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2536 cmd->ident == conn->info_ident) {
2537 del_timer(&conn->info_timer);
2538
2539 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2540 conn->info_ident = 0;
2541
2542 l2cap_conn_start(conn);
2543 }
2544
2545 return 0;
2546 }
2547
2548 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2549 {
2550 struct l2cap_chan_list *list = &conn->chan_list;
2551 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2552 struct l2cap_conn_rsp rsp;
2553 struct sock *sk, *parent;
2554 int result, status = L2CAP_CS_NO_INFO;
2555
2556 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2557 __le16 psm = req->psm;
2558
2559 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2560
2561 /* Check if we have socket listening on psm */
2562 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2563 if (!parent) {
2564 result = L2CAP_CR_BAD_PSM;
2565 goto sendresp;
2566 }
2567
2568 /* Check if the ACL is secure enough (if not SDP) */
2569 if (psm != cpu_to_le16(0x0001) &&
2570 !hci_conn_check_link_mode(conn->hcon)) {
2571 conn->disc_reason = 0x05;
2572 result = L2CAP_CR_SEC_BLOCK;
2573 goto response;
2574 }
2575
2576 result = L2CAP_CR_NO_MEM;
2577
2578 /* Check for backlog size */
2579 if (sk_acceptq_is_full(parent)) {
2580 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2581 goto response;
2582 }
2583
2584 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2585 if (!sk)
2586 goto response;
2587
2588 write_lock_bh(&list->lock);
2589
2590 /* Check if we already have channel with that dcid */
2591 if (__l2cap_get_chan_by_dcid(list, scid)) {
2592 write_unlock_bh(&list->lock);
2593 sock_set_flag(sk, SOCK_ZAPPED);
2594 l2cap_sock_kill(sk);
2595 goto response;
2596 }
2597
2598 hci_conn_hold(conn->hcon);
2599
2600 l2cap_sock_init(sk, parent);
2601 bacpy(&bt_sk(sk)->src, conn->src);
2602 bacpy(&bt_sk(sk)->dst, conn->dst);
2603 l2cap_pi(sk)->psm = psm;
2604 l2cap_pi(sk)->dcid = scid;
2605
2606 __l2cap_chan_add(conn, sk, parent);
2607 dcid = l2cap_pi(sk)->scid;
2608
2609 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2610
2611 l2cap_pi(sk)->ident = cmd->ident;
2612
2613 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2614 if (l2cap_check_security(sk)) {
2615 if (bt_sk(sk)->defer_setup) {
2616 sk->sk_state = BT_CONNECT2;
2617 result = L2CAP_CR_PEND;
2618 status = L2CAP_CS_AUTHOR_PEND;
2619 parent->sk_data_ready(parent, 0);
2620 } else {
2621 sk->sk_state = BT_CONFIG;
2622 result = L2CAP_CR_SUCCESS;
2623 status = L2CAP_CS_NO_INFO;
2624 }
2625 } else {
2626 sk->sk_state = BT_CONNECT2;
2627 result = L2CAP_CR_PEND;
2628 status = L2CAP_CS_AUTHEN_PEND;
2629 }
2630 } else {
2631 sk->sk_state = BT_CONNECT2;
2632 result = L2CAP_CR_PEND;
2633 status = L2CAP_CS_NO_INFO;
2634 }
2635
2636 write_unlock_bh(&list->lock);
2637
2638 response:
2639 bh_unlock_sock(parent);
2640
2641 sendresp:
2642 rsp.scid = cpu_to_le16(scid);
2643 rsp.dcid = cpu_to_le16(dcid);
2644 rsp.result = cpu_to_le16(result);
2645 rsp.status = cpu_to_le16(status);
2646 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2647
2648 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2649 struct l2cap_info_req info;
2650 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2651
2652 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2653 conn->info_ident = l2cap_get_ident(conn);
2654
2655 mod_timer(&conn->info_timer, jiffies +
2656 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2657
2658 l2cap_send_cmd(conn, conn->info_ident,
2659 L2CAP_INFO_REQ, sizeof(info), &info);
2660 }
2661
2662 return 0;
2663 }
2664
2665 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2666 {
2667 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2668 u16 scid, dcid, result, status;
2669 struct sock *sk;
2670 u8 req[128];
2671
2672 scid = __le16_to_cpu(rsp->scid);
2673 dcid = __le16_to_cpu(rsp->dcid);
2674 result = __le16_to_cpu(rsp->result);
2675 status = __le16_to_cpu(rsp->status);
2676
2677 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2678
2679 if (scid) {
2680 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2681 if (!sk)
2682 return 0;
2683 } else {
2684 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2685 if (!sk)
2686 return 0;
2687 }
2688
2689 switch (result) {
2690 case L2CAP_CR_SUCCESS:
2691 sk->sk_state = BT_CONFIG;
2692 l2cap_pi(sk)->ident = 0;
2693 l2cap_pi(sk)->dcid = dcid;
2694 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2695
2696 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2697
2698 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2699 l2cap_build_conf_req(sk, req), req);
2700 l2cap_pi(sk)->num_conf_req++;
2701 break;
2702
2703 case L2CAP_CR_PEND:
2704 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2705 break;
2706
2707 default:
2708 l2cap_chan_del(sk, ECONNREFUSED);
2709 break;
2710 }
2711
2712 bh_unlock_sock(sk);
2713 return 0;
2714 }
2715
2716 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2717 {
2718 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2719 u16 dcid, flags;
2720 u8 rsp[64];
2721 struct sock *sk;
2722 int len;
2723
2724 dcid = __le16_to_cpu(req->dcid);
2725 flags = __le16_to_cpu(req->flags);
2726
2727 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2728
2729 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2730 if (!sk)
2731 return -ENOENT;
2732
2733 if (sk->sk_state == BT_DISCONN)
2734 goto unlock;
2735
2736 /* Reject if config buffer is too small. */
2737 len = cmd_len - sizeof(*req);
2738 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2739 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2740 l2cap_build_conf_rsp(sk, rsp,
2741 L2CAP_CONF_REJECT, flags), rsp);
2742 goto unlock;
2743 }
2744
2745 /* Store config. */
2746 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2747 l2cap_pi(sk)->conf_len += len;
2748
2749 if (flags & 0x0001) {
2750 /* Incomplete config. Send empty response. */
2751 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2752 l2cap_build_conf_rsp(sk, rsp,
2753 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2754 goto unlock;
2755 }
2756
2757 /* Complete config. */
2758 len = l2cap_parse_conf_req(sk, rsp);
2759 if (len < 0) {
2760 l2cap_send_disconn_req(conn, sk);
2761 goto unlock;
2762 }
2763
2764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2765 l2cap_pi(sk)->num_conf_rsp++;
2766
2767 /* Reset config buffer. */
2768 l2cap_pi(sk)->conf_len = 0;
2769
2770 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2771 goto unlock;
2772
2773 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2774 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2775 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2776 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2777
2778 sk->sk_state = BT_CONNECTED;
2779
2780 l2cap_pi(sk)->next_tx_seq = 0;
2781 l2cap_pi(sk)->expected_tx_seq = 0;
2782 __skb_queue_head_init(TX_QUEUE(sk));
2783 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2784 l2cap_ertm_init(sk);
2785
2786 l2cap_chan_ready(sk);
2787 goto unlock;
2788 }
2789
2790 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2791 u8 buf[64];
2792 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2793 l2cap_build_conf_req(sk, buf), buf);
2794 l2cap_pi(sk)->num_conf_req++;
2795 }
2796
2797 unlock:
2798 bh_unlock_sock(sk);
2799 return 0;
2800 }
2801
2802 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2803 {
2804 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2805 u16 scid, flags, result;
2806 struct sock *sk;
2807
2808 scid = __le16_to_cpu(rsp->scid);
2809 flags = __le16_to_cpu(rsp->flags);
2810 result = __le16_to_cpu(rsp->result);
2811
2812 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2813 scid, flags, result);
2814
2815 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2816 if (!sk)
2817 return 0;
2818
2819 switch (result) {
2820 case L2CAP_CONF_SUCCESS:
2821 break;
2822
2823 case L2CAP_CONF_UNACCEPT:
2824 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2825 int len = cmd->len - sizeof(*rsp);
2826 char req[64];
2827
2828 /* throw out any old stored conf requests */
2829 result = L2CAP_CONF_SUCCESS;
2830 len = l2cap_parse_conf_rsp(sk, rsp->data,
2831 len, req, &result);
2832 if (len < 0) {
2833 l2cap_send_disconn_req(conn, sk);
2834 goto done;
2835 }
2836
2837 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2838 L2CAP_CONF_REQ, len, req);
2839 l2cap_pi(sk)->num_conf_req++;
2840 if (result != L2CAP_CONF_SUCCESS)
2841 goto done;
2842 break;
2843 }
2844
2845 default:
2846 sk->sk_state = BT_DISCONN;
2847 sk->sk_err = ECONNRESET;
2848 l2cap_sock_set_timer(sk, HZ * 5);
2849 l2cap_send_disconn_req(conn, sk);
2850 goto done;
2851 }
2852
2853 if (flags & 0x01)
2854 goto done;
2855
2856 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2857
2858 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2859 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2860 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2861 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2862
2863 sk->sk_state = BT_CONNECTED;
2864 l2cap_pi(sk)->next_tx_seq = 0;
2865 l2cap_pi(sk)->expected_tx_seq = 0;
2866 __skb_queue_head_init(TX_QUEUE(sk));
2867 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2868 l2cap_ertm_init(sk);
2869
2870 l2cap_chan_ready(sk);
2871 }
2872
2873 done:
2874 bh_unlock_sock(sk);
2875 return 0;
2876 }
2877
2878 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2879 {
2880 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2881 struct l2cap_disconn_rsp rsp;
2882 u16 dcid, scid;
2883 struct sock *sk;
2884
2885 scid = __le16_to_cpu(req->scid);
2886 dcid = __le16_to_cpu(req->dcid);
2887
2888 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2889
2890 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2891 if (!sk)
2892 return 0;
2893
2894 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2895 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2896 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2897
2898 sk->sk_shutdown = SHUTDOWN_MASK;
2899
2900 skb_queue_purge(TX_QUEUE(sk));
2901
2902 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2903 skb_queue_purge(SREJ_QUEUE(sk));
2904 del_timer(&l2cap_pi(sk)->retrans_timer);
2905 del_timer(&l2cap_pi(sk)->monitor_timer);
2906 }
2907
2908 l2cap_chan_del(sk, ECONNRESET);
2909 bh_unlock_sock(sk);
2910
2911 l2cap_sock_kill(sk);
2912 return 0;
2913 }
2914
2915 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2916 {
2917 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2918 u16 dcid, scid;
2919 struct sock *sk;
2920
2921 scid = __le16_to_cpu(rsp->scid);
2922 dcid = __le16_to_cpu(rsp->dcid);
2923
2924 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2925
2926 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2927 if (!sk)
2928 return 0;
2929
2930 skb_queue_purge(TX_QUEUE(sk));
2931
2932 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2933 skb_queue_purge(SREJ_QUEUE(sk));
2934 del_timer(&l2cap_pi(sk)->retrans_timer);
2935 del_timer(&l2cap_pi(sk)->monitor_timer);
2936 }
2937
2938 l2cap_chan_del(sk, 0);
2939 bh_unlock_sock(sk);
2940
2941 l2cap_sock_kill(sk);
2942 return 0;
2943 }
2944
2945 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2946 {
2947 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2948 u16 type;
2949
2950 type = __le16_to_cpu(req->type);
2951
2952 BT_DBG("type 0x%4.4x", type);
2953
2954 if (type == L2CAP_IT_FEAT_MASK) {
2955 u8 buf[8];
2956 u32 feat_mask = l2cap_feat_mask;
2957 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2958 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2959 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2960 if (enable_ertm)
2961 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2962 | L2CAP_FEAT_FCS;
2963 put_unaligned_le32(feat_mask, rsp->data);
2964 l2cap_send_cmd(conn, cmd->ident,
2965 L2CAP_INFO_RSP, sizeof(buf), buf);
2966 } else if (type == L2CAP_IT_FIXED_CHAN) {
2967 u8 buf[12];
2968 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2969 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2970 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2971 memcpy(buf + 4, l2cap_fixed_chan, 8);
2972 l2cap_send_cmd(conn, cmd->ident,
2973 L2CAP_INFO_RSP, sizeof(buf), buf);
2974 } else {
2975 struct l2cap_info_rsp rsp;
2976 rsp.type = cpu_to_le16(type);
2977 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2978 l2cap_send_cmd(conn, cmd->ident,
2979 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2980 }
2981
2982 return 0;
2983 }
2984
2985 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2986 {
2987 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2988 u16 type, result;
2989
2990 type = __le16_to_cpu(rsp->type);
2991 result = __le16_to_cpu(rsp->result);
2992
2993 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2994
2995 del_timer(&conn->info_timer);
2996
2997 if (type == L2CAP_IT_FEAT_MASK) {
2998 conn->feat_mask = get_unaligned_le32(rsp->data);
2999
3000 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3001 struct l2cap_info_req req;
3002 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3003
3004 conn->info_ident = l2cap_get_ident(conn);
3005
3006 l2cap_send_cmd(conn, conn->info_ident,
3007 L2CAP_INFO_REQ, sizeof(req), &req);
3008 } else {
3009 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3010 conn->info_ident = 0;
3011
3012 l2cap_conn_start(conn);
3013 }
3014 } else if (type == L2CAP_IT_FIXED_CHAN) {
3015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3016 conn->info_ident = 0;
3017
3018 l2cap_conn_start(conn);
3019 }
3020
3021 return 0;
3022 }
3023
3024 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3025 {
3026 u8 *data = skb->data;
3027 int len = skb->len;
3028 struct l2cap_cmd_hdr cmd;
3029 int err = 0;
3030
3031 l2cap_raw_recv(conn, skb);
3032
3033 while (len >= L2CAP_CMD_HDR_SIZE) {
3034 u16 cmd_len;
3035 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3036 data += L2CAP_CMD_HDR_SIZE;
3037 len -= L2CAP_CMD_HDR_SIZE;
3038
3039 cmd_len = le16_to_cpu(cmd.len);
3040
3041 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3042
3043 if (cmd_len > len || !cmd.ident) {
3044 BT_DBG("corrupted command");
3045 break;
3046 }
3047
3048 switch (cmd.code) {
3049 case L2CAP_COMMAND_REJ:
3050 l2cap_command_rej(conn, &cmd, data);
3051 break;
3052
3053 case L2CAP_CONN_REQ:
3054 err = l2cap_connect_req(conn, &cmd, data);
3055 break;
3056
3057 case L2CAP_CONN_RSP:
3058 err = l2cap_connect_rsp(conn, &cmd, data);
3059 break;
3060
3061 case L2CAP_CONF_REQ:
3062 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3063 break;
3064
3065 case L2CAP_CONF_RSP:
3066 err = l2cap_config_rsp(conn, &cmd, data);
3067 break;
3068
3069 case L2CAP_DISCONN_REQ:
3070 err = l2cap_disconnect_req(conn, &cmd, data);
3071 break;
3072
3073 case L2CAP_DISCONN_RSP:
3074 err = l2cap_disconnect_rsp(conn, &cmd, data);
3075 break;
3076
3077 case L2CAP_ECHO_REQ:
3078 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3079 break;
3080
3081 case L2CAP_ECHO_RSP:
3082 break;
3083
3084 case L2CAP_INFO_REQ:
3085 err = l2cap_information_req(conn, &cmd, data);
3086 break;
3087
3088 case L2CAP_INFO_RSP:
3089 err = l2cap_information_rsp(conn, &cmd, data);
3090 break;
3091
3092 default:
3093 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3094 err = -EINVAL;
3095 break;
3096 }
3097
3098 if (err) {
3099 struct l2cap_cmd_rej rej;
3100 BT_DBG("error %d", err);
3101
3102 /* FIXME: Map err to a valid reason */
3103 rej.reason = cpu_to_le16(0);
3104 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3105 }
3106
3107 data += cmd_len;
3108 len -= cmd_len;
3109 }
3110
3111 kfree_skb(skb);
3112 }
3113
3114 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3115 {
3116 u16 our_fcs, rcv_fcs;
3117 int hdr_size = L2CAP_HDR_SIZE + 2;
3118
3119 if (pi->fcs == L2CAP_FCS_CRC16) {
3120 skb_trim(skb, skb->len - 2);
3121 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3122 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3123
3124 if (our_fcs != rcv_fcs)
3125 return -EINVAL;
3126 }
3127 return 0;
3128 }
3129
3130 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3131 {
3132 struct sk_buff *next_skb;
3133
3134 bt_cb(skb)->tx_seq = tx_seq;
3135 bt_cb(skb)->sar = sar;
3136
3137 next_skb = skb_peek(SREJ_QUEUE(sk));
3138 if (!next_skb) {
3139 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3140 return;
3141 }
3142
3143 do {
3144 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3145 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3146 return;
3147 }
3148
3149 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3150 break;
3151
3152 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3153
3154 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3155 }
3156
3157 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3158 {
3159 struct l2cap_pinfo *pi = l2cap_pi(sk);
3160 struct sk_buff *_skb;
3161 int err = -EINVAL;
3162
3163 switch (control & L2CAP_CTRL_SAR) {
3164 case L2CAP_SDU_UNSEGMENTED:
3165 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3166 kfree_skb(pi->sdu);
3167 break;
3168 }
3169
3170 err = sock_queue_rcv_skb(sk, skb);
3171 if (!err)
3172 return 0;
3173
3174 break;
3175
3176 case L2CAP_SDU_START:
3177 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3178 kfree_skb(pi->sdu);
3179 break;
3180 }
3181
3182 pi->sdu_len = get_unaligned_le16(skb->data);
3183 skb_pull(skb, 2);
3184
3185 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3186 if (!pi->sdu) {
3187 err = -ENOMEM;
3188 break;
3189 }
3190
3191 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3192
3193 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3194 pi->partial_sdu_len = skb->len;
3195 err = 0;
3196 break;
3197
3198 case L2CAP_SDU_CONTINUE:
3199 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3200 break;
3201
3202 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3203
3204 pi->partial_sdu_len += skb->len;
3205 if (pi->partial_sdu_len > pi->sdu_len)
3206 kfree_skb(pi->sdu);
3207 else
3208 err = 0;
3209
3210 break;
3211
3212 case L2CAP_SDU_END:
3213 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3214 break;
3215
3216 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3217
3218 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3219 pi->partial_sdu_len += skb->len;
3220
3221 if (pi->partial_sdu_len == pi->sdu_len) {
3222 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3223 err = sock_queue_rcv_skb(sk, _skb);
3224 if (err < 0)
3225 kfree_skb(_skb);
3226 }
3227 kfree_skb(pi->sdu);
3228 err = 0;
3229
3230 break;
3231 }
3232
3233 kfree_skb(skb);
3234 return err;
3235 }
3236
3237 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3238 {
3239 struct sk_buff *skb;
3240 u16 control = 0;
3241
3242 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3243 if (bt_cb(skb)->tx_seq != tx_seq)
3244 break;
3245
3246 skb = skb_dequeue(SREJ_QUEUE(sk));
3247 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3248 l2cap_sar_reassembly_sdu(sk, skb, control);
3249 l2cap_pi(sk)->buffer_seq_srej =
3250 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3251 tx_seq++;
3252 }
3253 }
3254
3255 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3256 {
3257 struct l2cap_pinfo *pi = l2cap_pi(sk);
3258 struct srej_list *l, *tmp;
3259 u16 control;
3260
3261 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3262 if (l->tx_seq == tx_seq) {
3263 list_del(&l->list);
3264 kfree(l);
3265 return;
3266 }
3267 control = L2CAP_SUPER_SELECT_REJECT;
3268 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3269 l2cap_send_sframe(pi, control);
3270 list_del(&l->list);
3271 list_add_tail(&l->list, SREJ_LIST(sk));
3272 }
3273 }
3274
3275 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3276 {
3277 struct l2cap_pinfo *pi = l2cap_pi(sk);
3278 struct srej_list *new;
3279 u16 control;
3280
3281 while (tx_seq != pi->expected_tx_seq) {
3282 control = L2CAP_SUPER_SELECT_REJECT;
3283 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3284 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3285 control |= L2CAP_CTRL_POLL;
3286 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3287 }
3288 l2cap_send_sframe(pi, control);
3289
3290 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3291 new->tx_seq = pi->expected_tx_seq++;
3292 list_add_tail(&new->list, SREJ_LIST(sk));
3293 }
3294 pi->expected_tx_seq++;
3295 }
3296
3297 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3298 {
3299 struct l2cap_pinfo *pi = l2cap_pi(sk);
3300 u8 tx_seq = __get_txseq(rx_control);
3301 u8 req_seq = __get_reqseq(rx_control);
3302 u16 tx_control = 0;
3303 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3304 int err = 0;
3305
3306 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3307
3308 pi->expected_ack_seq = req_seq;
3309 l2cap_drop_acked_frames(sk);
3310
3311 if (tx_seq == pi->expected_tx_seq)
3312 goto expected;
3313
3314 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3315 struct srej_list *first;
3316
3317 first = list_first_entry(SREJ_LIST(sk),
3318 struct srej_list, list);
3319 if (tx_seq == first->tx_seq) {
3320 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3321 l2cap_check_srej_gap(sk, tx_seq);
3322
3323 list_del(&first->list);
3324 kfree(first);
3325
3326 if (list_empty(SREJ_LIST(sk))) {
3327 pi->buffer_seq = pi->buffer_seq_srej;
3328 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3329 }
3330 } else {
3331 struct srej_list *l;
3332 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3333
3334 list_for_each_entry(l, SREJ_LIST(sk), list) {
3335 if (l->tx_seq == tx_seq) {
3336 l2cap_resend_srejframe(sk, tx_seq);
3337 return 0;
3338 }
3339 }
3340 l2cap_send_srejframe(sk, tx_seq);
3341 }
3342 } else {
3343 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3344
3345 INIT_LIST_HEAD(SREJ_LIST(sk));
3346 pi->buffer_seq_srej = pi->buffer_seq;
3347
3348 __skb_queue_head_init(SREJ_QUEUE(sk));
3349 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3350
3351 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3352
3353 l2cap_send_srejframe(sk, tx_seq);
3354 }
3355 return 0;
3356
3357 expected:
3358 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3359
3360 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3361 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3362 return 0;
3363 }
3364
3365 if (rx_control & L2CAP_CTRL_FINAL) {
3366 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3367 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3368 else {
3369 sk->sk_send_head = TX_QUEUE(sk)->next;
3370 pi->next_tx_seq = pi->expected_ack_seq;
3371 l2cap_ertm_send(sk);
3372 }
3373 }
3374
3375 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3376
3377 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3378 if (err < 0)
3379 return err;
3380
3381 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3382 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3383 tx_control |= L2CAP_SUPER_RCV_READY;
3384 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3385 l2cap_send_sframe(pi, tx_control);
3386 }
3387 return 0;
3388 }
3389
3390 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3391 {
3392 struct l2cap_pinfo *pi = l2cap_pi(sk);
3393 u8 tx_seq = __get_reqseq(rx_control);
3394
3395 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3396
3397 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3398 case L2CAP_SUPER_RCV_READY:
3399 if (rx_control & L2CAP_CTRL_POLL) {
3400 u16 control = L2CAP_CTRL_FINAL;
3401 control |= L2CAP_SUPER_RCV_READY |
3402 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3403 l2cap_send_sframe(l2cap_pi(sk), control);
3404 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3405
3406 } else if (rx_control & L2CAP_CTRL_FINAL) {
3407 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3408 pi->expected_ack_seq = tx_seq;
3409 l2cap_drop_acked_frames(sk);
3410
3411 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3412 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3413 else {
3414 sk->sk_send_head = TX_QUEUE(sk)->next;
3415 pi->next_tx_seq = pi->expected_ack_seq;
3416 l2cap_ertm_send(sk);
3417 }
3418
3419 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3420 break;
3421
3422 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3423 del_timer(&pi->monitor_timer);
3424
3425 if (pi->unacked_frames > 0)
3426 __mod_retrans_timer();
3427 } else {
3428 pi->expected_ack_seq = tx_seq;
3429 l2cap_drop_acked_frames(sk);
3430
3431 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3432 (pi->unacked_frames > 0))
3433 __mod_retrans_timer();
3434
3435 l2cap_ertm_send(sk);
3436 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3437 }
3438 break;
3439
3440 case L2CAP_SUPER_REJECT:
3441 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3442
3443 pi->expected_ack_seq = __get_reqseq(rx_control);
3444 l2cap_drop_acked_frames(sk);
3445
3446 if (rx_control & L2CAP_CTRL_FINAL) {
3447 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3448 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3449 else {
3450 sk->sk_send_head = TX_QUEUE(sk)->next;
3451 pi->next_tx_seq = pi->expected_ack_seq;
3452 l2cap_ertm_send(sk);
3453 }
3454 } else {
3455 sk->sk_send_head = TX_QUEUE(sk)->next;
3456 pi->next_tx_seq = pi->expected_ack_seq;
3457 l2cap_ertm_send(sk);
3458
3459 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3460 pi->srej_save_reqseq = tx_seq;
3461 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3462 }
3463 }
3464
3465 break;
3466
3467 case L2CAP_SUPER_SELECT_REJECT:
3468 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3469
3470 if (rx_control & L2CAP_CTRL_POLL) {
3471 l2cap_retransmit_frame(sk, tx_seq);
3472 pi->expected_ack_seq = tx_seq;
3473 l2cap_drop_acked_frames(sk);
3474 l2cap_ertm_send(sk);
3475 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3476 pi->srej_save_reqseq = tx_seq;
3477 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3478 }
3479 } else if (rx_control & L2CAP_CTRL_FINAL) {
3480 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3481 pi->srej_save_reqseq == tx_seq)
3482 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3483 else
3484 l2cap_retransmit_frame(sk, tx_seq);
3485 }
3486 else {
3487 l2cap_retransmit_frame(sk, tx_seq);
3488 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3489 pi->srej_save_reqseq = tx_seq;
3490 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3491 }
3492 }
3493 break;
3494
3495 case L2CAP_SUPER_RCV_NOT_READY:
3496 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3497 pi->expected_ack_seq = tx_seq;
3498 l2cap_drop_acked_frames(sk);
3499
3500 del_timer(&l2cap_pi(sk)->retrans_timer);
3501 if (rx_control & L2CAP_CTRL_POLL) {
3502 u16 control = L2CAP_CTRL_FINAL;
3503 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3504 }
3505 break;
3506 }
3507
3508 return 0;
3509 }
3510
3511 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3512 {
3513 struct sock *sk;
3514 struct l2cap_pinfo *pi;
3515 u16 control, len;
3516 u8 tx_seq;
3517 int err;
3518
3519 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3520 if (!sk) {
3521 BT_DBG("unknown cid 0x%4.4x", cid);
3522 goto drop;
3523 }
3524
3525 pi = l2cap_pi(sk);
3526
3527 BT_DBG("sk %p, len %d", sk, skb->len);
3528
3529 if (sk->sk_state != BT_CONNECTED)
3530 goto drop;
3531
3532 switch (pi->mode) {
3533 case L2CAP_MODE_BASIC:
3534 /* If socket recv buffers overflows we drop data here
3535 * which is *bad* because L2CAP has to be reliable.
3536 * But we don't have any other choice. L2CAP doesn't
3537 * provide flow control mechanism. */
3538
3539 if (pi->imtu < skb->len)
3540 goto drop;
3541
3542 if (!sock_queue_rcv_skb(sk, skb))
3543 goto done;
3544 break;
3545
3546 case L2CAP_MODE_ERTM:
3547 control = get_unaligned_le16(skb->data);
3548 skb_pull(skb, 2);
3549 len = skb->len;
3550
3551 if (__is_sar_start(control))
3552 len -= 2;
3553
3554 if (pi->fcs == L2CAP_FCS_CRC16)
3555 len -= 2;
3556
3557 /*
3558 * We can just drop the corrupted I-frame here.
3559 * Receiver will miss it and start proper recovery
3560 * procedures and ask retransmission.
3561 */
3562 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3563 goto drop;
3564
3565 if (l2cap_check_fcs(pi, skb))
3566 goto drop;
3567
3568 if (__is_iframe(control))
3569 err = l2cap_data_channel_iframe(sk, control, skb);
3570 else
3571 err = l2cap_data_channel_sframe(sk, control, skb);
3572
3573 if (!err)
3574 goto done;
3575 break;
3576
3577 case L2CAP_MODE_STREAMING:
3578 control = get_unaligned_le16(skb->data);
3579 skb_pull(skb, 2);
3580 len = skb->len;
3581
3582 if (__is_sar_start(control))
3583 len -= 2;
3584
3585 if (pi->fcs == L2CAP_FCS_CRC16)
3586 len -= 2;
3587
3588 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3589 goto drop;
3590
3591 if (l2cap_check_fcs(pi, skb))
3592 goto drop;
3593
3594 tx_seq = __get_txseq(control);
3595
3596 if (pi->expected_tx_seq == tx_seq)
3597 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3598 else
3599 pi->expected_tx_seq = tx_seq + 1;
3600
3601 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3602
3603 goto done;
3604
3605 default:
3606 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3607 break;
3608 }
3609
3610 drop:
3611 kfree_skb(skb);
3612
3613 done:
3614 if (sk)
3615 bh_unlock_sock(sk);
3616
3617 return 0;
3618 }
3619
3620 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3621 {
3622 struct sock *sk;
3623
3624 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3625 if (!sk)
3626 goto drop;
3627
3628 BT_DBG("sk %p, len %d", sk, skb->len);
3629
3630 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3631 goto drop;
3632
3633 if (l2cap_pi(sk)->imtu < skb->len)
3634 goto drop;
3635
3636 if (!sock_queue_rcv_skb(sk, skb))
3637 goto done;
3638
3639 drop:
3640 kfree_skb(skb);
3641
3642 done:
3643 if (sk)
3644 bh_unlock_sock(sk);
3645 return 0;
3646 }
3647
3648 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3649 {
3650 struct l2cap_hdr *lh = (void *) skb->data;
3651 u16 cid, len;
3652 __le16 psm;
3653
3654 skb_pull(skb, L2CAP_HDR_SIZE);
3655 cid = __le16_to_cpu(lh->cid);
3656 len = __le16_to_cpu(lh->len);
3657
3658 if (len != skb->len) {
3659 kfree_skb(skb);
3660 return;
3661 }
3662
3663 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3664
3665 switch (cid) {
3666 case L2CAP_CID_SIGNALING:
3667 l2cap_sig_channel(conn, skb);
3668 break;
3669
3670 case L2CAP_CID_CONN_LESS:
3671 psm = get_unaligned_le16(skb->data);
3672 skb_pull(skb, 2);
3673 l2cap_conless_channel(conn, psm, skb);
3674 break;
3675
3676 default:
3677 l2cap_data_channel(conn, cid, skb);
3678 break;
3679 }
3680 }
3681
3682 /* ---- L2CAP interface with lower layer (HCI) ---- */
3683
3684 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3685 {
3686 int exact = 0, lm1 = 0, lm2 = 0;
3687 register struct sock *sk;
3688 struct hlist_node *node;
3689
3690 if (type != ACL_LINK)
3691 return 0;
3692
3693 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3694
3695 /* Find listening sockets and check their link_mode */
3696 read_lock(&l2cap_sk_list.lock);
3697 sk_for_each(sk, node, &l2cap_sk_list.head) {
3698 if (sk->sk_state != BT_LISTEN)
3699 continue;
3700
3701 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3702 lm1 |= HCI_LM_ACCEPT;
3703 if (l2cap_pi(sk)->role_switch)
3704 lm1 |= HCI_LM_MASTER;
3705 exact++;
3706 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3707 lm2 |= HCI_LM_ACCEPT;
3708 if (l2cap_pi(sk)->role_switch)
3709 lm2 |= HCI_LM_MASTER;
3710 }
3711 }
3712 read_unlock(&l2cap_sk_list.lock);
3713
3714 return exact ? lm1 : lm2;
3715 }
3716
3717 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3718 {
3719 struct l2cap_conn *conn;
3720
3721 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3722
3723 if (hcon->type != ACL_LINK)
3724 return 0;
3725
3726 if (!status) {
3727 conn = l2cap_conn_add(hcon, status);
3728 if (conn)
3729 l2cap_conn_ready(conn);
3730 } else
3731 l2cap_conn_del(hcon, bt_err(status));
3732
3733 return 0;
3734 }
3735
3736 static int l2cap_disconn_ind(struct hci_conn *hcon)
3737 {
3738 struct l2cap_conn *conn = hcon->l2cap_data;
3739
3740 BT_DBG("hcon %p", hcon);
3741
3742 if (hcon->type != ACL_LINK || !conn)
3743 return 0x13;
3744
3745 return conn->disc_reason;
3746 }
3747
3748 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3749 {
3750 BT_DBG("hcon %p reason %d", hcon, reason);
3751
3752 if (hcon->type != ACL_LINK)
3753 return 0;
3754
3755 l2cap_conn_del(hcon, bt_err(reason));
3756
3757 return 0;
3758 }
3759
3760 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3761 {
3762 if (sk->sk_type != SOCK_SEQPACKET)
3763 return;
3764
3765 if (encrypt == 0x00) {
3766 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3767 l2cap_sock_clear_timer(sk);
3768 l2cap_sock_set_timer(sk, HZ * 5);
3769 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3770 __l2cap_sock_close(sk, ECONNREFUSED);
3771 } else {
3772 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3773 l2cap_sock_clear_timer(sk);
3774 }
3775 }
3776
3777 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3778 {
3779 struct l2cap_chan_list *l;
3780 struct l2cap_conn *conn = hcon->l2cap_data;
3781 struct sock *sk;
3782
3783 if (!conn)
3784 return 0;
3785
3786 l = &conn->chan_list;
3787
3788 BT_DBG("conn %p", conn);
3789
3790 read_lock(&l->lock);
3791
3792 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3793 bh_lock_sock(sk);
3794
3795 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3796 bh_unlock_sock(sk);
3797 continue;
3798 }
3799
3800 if (!status && (sk->sk_state == BT_CONNECTED ||
3801 sk->sk_state == BT_CONFIG)) {
3802 l2cap_check_encryption(sk, encrypt);
3803 bh_unlock_sock(sk);
3804 continue;
3805 }
3806
3807 if (sk->sk_state == BT_CONNECT) {
3808 if (!status) {
3809 struct l2cap_conn_req req;
3810 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3811 req.psm = l2cap_pi(sk)->psm;
3812
3813 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3814
3815 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3816 L2CAP_CONN_REQ, sizeof(req), &req);
3817 } else {
3818 l2cap_sock_clear_timer(sk);
3819 l2cap_sock_set_timer(sk, HZ / 10);
3820 }
3821 } else if (sk->sk_state == BT_CONNECT2) {
3822 struct l2cap_conn_rsp rsp;
3823 __u16 result;
3824
3825 if (!status) {
3826 sk->sk_state = BT_CONFIG;
3827 result = L2CAP_CR_SUCCESS;
3828 } else {
3829 sk->sk_state = BT_DISCONN;
3830 l2cap_sock_set_timer(sk, HZ / 10);
3831 result = L2CAP_CR_SEC_BLOCK;
3832 }
3833
3834 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3835 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3836 rsp.result = cpu_to_le16(result);
3837 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3838 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3839 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3840 }
3841
3842 bh_unlock_sock(sk);
3843 }
3844
3845 read_unlock(&l->lock);
3846
3847 return 0;
3848 }
3849
3850 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3851 {
3852 struct l2cap_conn *conn = hcon->l2cap_data;
3853
3854 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3855 goto drop;
3856
3857 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3858
3859 if (flags & ACL_START) {
3860 struct l2cap_hdr *hdr;
3861 int len;
3862
3863 if (conn->rx_len) {
3864 BT_ERR("Unexpected start frame (len %d)", skb->len);
3865 kfree_skb(conn->rx_skb);
3866 conn->rx_skb = NULL;
3867 conn->rx_len = 0;
3868 l2cap_conn_unreliable(conn, ECOMM);
3869 }
3870
3871 if (skb->len < 2) {
3872 BT_ERR("Frame is too short (len %d)", skb->len);
3873 l2cap_conn_unreliable(conn, ECOMM);
3874 goto drop;
3875 }
3876
3877 hdr = (struct l2cap_hdr *) skb->data;
3878 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3879
3880 if (len == skb->len) {
3881 /* Complete frame received */
3882 l2cap_recv_frame(conn, skb);
3883 return 0;
3884 }
3885
3886 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3887
3888 if (skb->len > len) {
3889 BT_ERR("Frame is too long (len %d, expected len %d)",
3890 skb->len, len);
3891 l2cap_conn_unreliable(conn, ECOMM);
3892 goto drop;
3893 }
3894
3895 /* Allocate skb for the complete frame (with header) */
3896 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3897 if (!conn->rx_skb)
3898 goto drop;
3899
3900 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3901 skb->len);
3902 conn->rx_len = len - skb->len;
3903 } else {
3904 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3905
3906 if (!conn->rx_len) {
3907 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3908 l2cap_conn_unreliable(conn, ECOMM);
3909 goto drop;
3910 }
3911
3912 if (skb->len > conn->rx_len) {
3913 BT_ERR("Fragment is too long (len %d, expected %d)",
3914 skb->len, conn->rx_len);
3915 kfree_skb(conn->rx_skb);
3916 conn->rx_skb = NULL;
3917 conn->rx_len = 0;
3918 l2cap_conn_unreliable(conn, ECOMM);
3919 goto drop;
3920 }
3921
3922 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3923 skb->len);
3924 conn->rx_len -= skb->len;
3925
3926 if (!conn->rx_len) {
3927 /* Complete frame received */
3928 l2cap_recv_frame(conn, conn->rx_skb);
3929 conn->rx_skb = NULL;
3930 }
3931 }
3932
3933 drop:
3934 kfree_skb(skb);
3935 return 0;
3936 }
3937
3938 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3939 {
3940 struct sock *sk;
3941 struct hlist_node *node;
3942 char *str = buf;
3943
3944 read_lock_bh(&l2cap_sk_list.lock);
3945
3946 sk_for_each(sk, node, &l2cap_sk_list.head) {
3947 struct l2cap_pinfo *pi = l2cap_pi(sk);
3948
3949 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3950 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3951 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3952 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3953 }
3954
3955 read_unlock_bh(&l2cap_sk_list.lock);
3956
3957 return str - buf;
3958 }
3959
3960 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3961
3962 static const struct proto_ops l2cap_sock_ops = {
3963 .family = PF_BLUETOOTH,
3964 .owner = THIS_MODULE,
3965 .release = l2cap_sock_release,
3966 .bind = l2cap_sock_bind,
3967 .connect = l2cap_sock_connect,
3968 .listen = l2cap_sock_listen,
3969 .accept = l2cap_sock_accept,
3970 .getname = l2cap_sock_getname,
3971 .sendmsg = l2cap_sock_sendmsg,
3972 .recvmsg = l2cap_sock_recvmsg,
3973 .poll = bt_sock_poll,
3974 .ioctl = bt_sock_ioctl,
3975 .mmap = sock_no_mmap,
3976 .socketpair = sock_no_socketpair,
3977 .shutdown = l2cap_sock_shutdown,
3978 .setsockopt = l2cap_sock_setsockopt,
3979 .getsockopt = l2cap_sock_getsockopt
3980 };
3981
3982 static const struct net_proto_family l2cap_sock_family_ops = {
3983 .family = PF_BLUETOOTH,
3984 .owner = THIS_MODULE,
3985 .create = l2cap_sock_create,
3986 };
3987
3988 static struct hci_proto l2cap_hci_proto = {
3989 .name = "L2CAP",
3990 .id = HCI_PROTO_L2CAP,
3991 .connect_ind = l2cap_connect_ind,
3992 .connect_cfm = l2cap_connect_cfm,
3993 .disconn_ind = l2cap_disconn_ind,
3994 .disconn_cfm = l2cap_disconn_cfm,
3995 .security_cfm = l2cap_security_cfm,
3996 .recv_acldata = l2cap_recv_acldata
3997 };
3998
3999 static int __init l2cap_init(void)
4000 {
4001 int err;
4002
4003 err = proto_register(&l2cap_proto, 0);
4004 if (err < 0)
4005 return err;
4006
4007 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4008 if (err < 0) {
4009 BT_ERR("L2CAP socket registration failed");
4010 goto error;
4011 }
4012
4013 err = hci_register_proto(&l2cap_hci_proto);
4014 if (err < 0) {
4015 BT_ERR("L2CAP protocol registration failed");
4016 bt_sock_unregister(BTPROTO_L2CAP);
4017 goto error;
4018 }
4019
4020 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
4021 BT_ERR("Failed to create L2CAP info file");
4022
4023 BT_INFO("L2CAP ver %s", VERSION);
4024 BT_INFO("L2CAP socket layer initialized");
4025
4026 return 0;
4027
4028 error:
4029 proto_unregister(&l2cap_proto);
4030 return err;
4031 }
4032
4033 static void __exit l2cap_exit(void)
4034 {
4035 class_remove_file(bt_class, &class_attr_l2cap);
4036
4037 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4038 BT_ERR("L2CAP socket unregistration failed");
4039
4040 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4041 BT_ERR("L2CAP protocol unregistration failed");
4042
4043 proto_unregister(&l2cap_proto);
4044 }
4045
4046 void l2cap_load(void)
4047 {
4048 /* Dummy function to trigger automatic L2CAP module loading by
4049 * other modules that use L2CAP sockets but don't use any other
4050 * symbols from it. */
4051 return;
4052 }
4053 EXPORT_SYMBOL(l2cap_load);
4054
4055 module_init(l2cap_init);
4056 module_exit(l2cap_exit);
4057
4058 module_param(enable_ertm, bool, 0644);
4059 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4060
4061 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4062 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4063 MODULE_VERSION(VERSION);
4064 MODULE_LICENSE("GPL");
4065 MODULE_ALIAS("bt-proto-0");