]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/l2cap.c
Bluetooth: Use non-flushable by default L2CAP data packets
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core and sockets. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 #define VERSION "2.15"
59
60 static int disable_ertm;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static const struct proto_ops l2cap_sock_ops;
66
67 static struct workqueue_struct *_busy_wq;
68
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 };
72
73 static void l2cap_busy_work(struct work_struct *work);
74
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
78
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
82
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
87 {
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
90 }
91
92 static void l2cap_sock_clear_timer(struct sock *sk)
93 {
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
96 }
97
98 static void l2cap_sock_timeout(unsigned long arg)
99 {
100 struct sock *sk = (struct sock *) arg;
101 int reason;
102
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
104
105 bh_lock_sock(sk);
106
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
110 bh_unlock_sock(sk);
111 sock_put(sk);
112 return;
113 }
114
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
120 else
121 reason = ETIMEDOUT;
122
123 __l2cap_sock_close(sk, reason);
124
125 bh_unlock_sock(sk);
126
127 l2cap_sock_kill(sk);
128 sock_put(sk);
129 }
130
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
133 {
134 struct sock *s;
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
137 break;
138 }
139 return s;
140 }
141
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 {
144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
147 break;
148 }
149 return s;
150 }
151
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
155 {
156 struct sock *s;
157 read_lock(&l->lock);
158 s = __l2cap_get_chan_by_scid(l, cid);
159 if (s)
160 bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163 }
164
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 {
167 struct sock *s;
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
170 break;
171 }
172 return s;
173 }
174
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
176 {
177 struct sock *s;
178 read_lock(&l->lock);
179 s = __l2cap_get_chan_by_ident(l, ident);
180 if (s)
181 bh_lock_sock(s);
182 read_unlock(&l->lock);
183 return s;
184 }
185
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
187 {
188 u16 cid = L2CAP_CID_DYN_START;
189
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
192 return cid;
193 }
194
195 return 0;
196 }
197
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
199 {
200 sock_hold(sk);
201
202 if (l->head)
203 l2cap_pi(l->head)->prev_c = sk;
204
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
207 l->head = sk;
208 }
209
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
211 {
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
213
214 write_lock_bh(&l->lock);
215 if (sk == l->head)
216 l->head = next;
217
218 if (next)
219 l2cap_pi(next)->prev_c = prev;
220 if (prev)
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
223
224 __sock_put(sk);
225 }
226
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
228 {
229 struct l2cap_chan_list *l = &conn->chan_list;
230
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
233
234 conn->disc_reason = 0x13;
235
236 l2cap_pi(sk)->conn = conn;
237
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 } else {
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
251 }
252
253 __l2cap_chan_link(l, sk);
254
255 if (parent)
256 bt_accept_enqueue(parent, sk);
257 }
258
259 /* Delete channel.
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
262 {
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
265
266 l2cap_sock_clear_timer(sk);
267
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
269
270 if (conn) {
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
275 }
276
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
279
280 if (err)
281 sk->sk_err = err;
282
283 if (parent) {
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
286 } else
287 sk->sk_state_change(sk);
288
289 skb_queue_purge(TX_QUEUE(sk));
290
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
293
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
297
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
300
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
302 list_del(&l->list);
303 kfree(l);
304 }
305 }
306 }
307
308 static inline u8 l2cap_get_auth_type(struct sock *sk)
309 {
310 if (sk->sk_type == SOCK_RAW) {
311 switch (l2cap_pi(sk)->sec_level) {
312 case BT_SECURITY_HIGH:
313 return HCI_AT_DEDICATED_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 return HCI_AT_DEDICATED_BONDING;
316 default:
317 return HCI_AT_NO_BONDING;
318 }
319 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
322
323 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
324 return HCI_AT_NO_BONDING_MITM;
325 else
326 return HCI_AT_NO_BONDING;
327 } else {
328 switch (l2cap_pi(sk)->sec_level) {
329 case BT_SECURITY_HIGH:
330 return HCI_AT_GENERAL_BONDING_MITM;
331 case BT_SECURITY_MEDIUM:
332 return HCI_AT_GENERAL_BONDING;
333 default:
334 return HCI_AT_NO_BONDING;
335 }
336 }
337 }
338
339 /* Service level security */
340 static inline int l2cap_check_security(struct sock *sk)
341 {
342 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
343 __u8 auth_type;
344
345 auth_type = l2cap_get_auth_type(sk);
346
347 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
348 auth_type);
349 }
350
351 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
352 {
353 u8 id;
354
355 /* Get next available identificator.
356 * 1 - 128 are used by kernel.
357 * 129 - 199 are reserved.
358 * 200 - 254 are used by utilities like l2ping, etc.
359 */
360
361 spin_lock_bh(&conn->lock);
362
363 if (++conn->tx_ident > 128)
364 conn->tx_ident = 1;
365
366 id = conn->tx_ident;
367
368 spin_unlock_bh(&conn->lock);
369
370 return id;
371 }
372
373 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
374 {
375 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
376 u8 flags;
377
378 BT_DBG("code 0x%2.2x", code);
379
380 if (!skb)
381 return;
382
383 if (lmp_no_flush_capable(conn->hcon->hdev))
384 flags = ACL_START_NO_FLUSH;
385 else
386 flags = ACL_START;
387
388 hci_send_acl(conn->hcon, skb, flags);
389 }
390
391 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
392 {
393 struct sk_buff *skb;
394 struct l2cap_hdr *lh;
395 struct l2cap_conn *conn = pi->conn;
396 struct sock *sk = (struct sock *)pi;
397 int count, hlen = L2CAP_HDR_SIZE + 2;
398 u8 flags;
399
400 if (sk->sk_state != BT_CONNECTED)
401 return;
402
403 if (pi->fcs == L2CAP_FCS_CRC16)
404 hlen += 2;
405
406 BT_DBG("pi %p, control 0x%2.2x", pi, control);
407
408 count = min_t(unsigned int, conn->mtu, hlen);
409 control |= L2CAP_CTRL_FRAME_TYPE;
410
411 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
412 control |= L2CAP_CTRL_FINAL;
413 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
414 }
415
416 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
417 control |= L2CAP_CTRL_POLL;
418 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
419 }
420
421 skb = bt_skb_alloc(count, GFP_ATOMIC);
422 if (!skb)
423 return;
424
425 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
426 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
427 lh->cid = cpu_to_le16(pi->dcid);
428 put_unaligned_le16(control, skb_put(skb, 2));
429
430 if (pi->fcs == L2CAP_FCS_CRC16) {
431 u16 fcs = crc16(0, (u8 *)lh, count - 2);
432 put_unaligned_le16(fcs, skb_put(skb, 2));
433 }
434
435 if (lmp_no_flush_capable(conn->hcon->hdev))
436 flags = ACL_START_NO_FLUSH;
437 else
438 flags = ACL_START;
439
440 hci_send_acl(pi->conn->hcon, skb, flags);
441 }
442
443 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
444 {
445 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
446 control |= L2CAP_SUPER_RCV_NOT_READY;
447 pi->conn_state |= L2CAP_CONN_RNR_SENT;
448 } else
449 control |= L2CAP_SUPER_RCV_READY;
450
451 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
452
453 l2cap_send_sframe(pi, control);
454 }
455
456 static inline int __l2cap_no_conn_pending(struct sock *sk)
457 {
458 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
459 }
460
461 static void l2cap_do_start(struct sock *sk)
462 {
463 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
464
465 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
466 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
467 return;
468
469 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
470 struct l2cap_conn_req req;
471 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
472 req.psm = l2cap_pi(sk)->psm;
473
474 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
475 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
476
477 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
478 L2CAP_CONN_REQ, sizeof(req), &req);
479 }
480 } else {
481 struct l2cap_info_req req;
482 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
483
484 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
485 conn->info_ident = l2cap_get_ident(conn);
486
487 mod_timer(&conn->info_timer, jiffies +
488 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
489
490 l2cap_send_cmd(conn, conn->info_ident,
491 L2CAP_INFO_REQ, sizeof(req), &req);
492 }
493 }
494
495 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
496 {
497 u32 local_feat_mask = l2cap_feat_mask;
498 if (!disable_ertm)
499 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
500
501 switch (mode) {
502 case L2CAP_MODE_ERTM:
503 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
504 case L2CAP_MODE_STREAMING:
505 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
506 default:
507 return 0x00;
508 }
509 }
510
511 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
512 {
513 struct l2cap_disconn_req req;
514
515 if (!conn)
516 return;
517
518 skb_queue_purge(TX_QUEUE(sk));
519
520 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
521 del_timer(&l2cap_pi(sk)->retrans_timer);
522 del_timer(&l2cap_pi(sk)->monitor_timer);
523 del_timer(&l2cap_pi(sk)->ack_timer);
524 }
525
526 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
527 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
528 l2cap_send_cmd(conn, l2cap_get_ident(conn),
529 L2CAP_DISCONN_REQ, sizeof(req), &req);
530
531 sk->sk_state = BT_DISCONN;
532 sk->sk_err = err;
533 }
534
535 /* ---- L2CAP connections ---- */
536 static void l2cap_conn_start(struct l2cap_conn *conn)
537 {
538 struct l2cap_chan_list *l = &conn->chan_list;
539 struct sock_del_list del, *tmp1, *tmp2;
540 struct sock *sk;
541
542 BT_DBG("conn %p", conn);
543
544 INIT_LIST_HEAD(&del.list);
545
546 read_lock(&l->lock);
547
548 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
549 bh_lock_sock(sk);
550
551 if (sk->sk_type != SOCK_SEQPACKET &&
552 sk->sk_type != SOCK_STREAM) {
553 bh_unlock_sock(sk);
554 continue;
555 }
556
557 if (sk->sk_state == BT_CONNECT) {
558 struct l2cap_conn_req req;
559
560 if (!l2cap_check_security(sk) ||
561 !__l2cap_no_conn_pending(sk)) {
562 bh_unlock_sock(sk);
563 continue;
564 }
565
566 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
567 conn->feat_mask)
568 && l2cap_pi(sk)->conf_state &
569 L2CAP_CONF_STATE2_DEVICE) {
570 tmp1 = kzalloc(sizeof(struct sock_del_list),
571 GFP_ATOMIC);
572 tmp1->sk = sk;
573 list_add_tail(&tmp1->list, &del.list);
574 bh_unlock_sock(sk);
575 continue;
576 }
577
578 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
579 req.psm = l2cap_pi(sk)->psm;
580
581 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
582 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
583
584 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
585 L2CAP_CONN_REQ, sizeof(req), &req);
586
587 } else if (sk->sk_state == BT_CONNECT2) {
588 struct l2cap_conn_rsp rsp;
589 char buf[128];
590 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
591 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
592
593 if (l2cap_check_security(sk)) {
594 if (bt_sk(sk)->defer_setup) {
595 struct sock *parent = bt_sk(sk)->parent;
596 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
597 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
598 parent->sk_data_ready(parent, 0);
599
600 } else {
601 sk->sk_state = BT_CONFIG;
602 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
603 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
604 }
605 } else {
606 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
607 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
608 }
609
610 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
611 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
612
613 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
614 rsp.result != L2CAP_CR_SUCCESS) {
615 bh_unlock_sock(sk);
616 continue;
617 }
618
619 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
620 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
621 l2cap_build_conf_req(sk, buf), buf);
622 l2cap_pi(sk)->num_conf_req++;
623 }
624
625 bh_unlock_sock(sk);
626 }
627
628 read_unlock(&l->lock);
629
630 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
631 bh_lock_sock(tmp1->sk);
632 __l2cap_sock_close(tmp1->sk, ECONNRESET);
633 bh_unlock_sock(tmp1->sk);
634 list_del(&tmp1->list);
635 kfree(tmp1);
636 }
637 }
638
639 static void l2cap_conn_ready(struct l2cap_conn *conn)
640 {
641 struct l2cap_chan_list *l = &conn->chan_list;
642 struct sock *sk;
643
644 BT_DBG("conn %p", conn);
645
646 read_lock(&l->lock);
647
648 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
649 bh_lock_sock(sk);
650
651 if (sk->sk_type != SOCK_SEQPACKET &&
652 sk->sk_type != SOCK_STREAM) {
653 l2cap_sock_clear_timer(sk);
654 sk->sk_state = BT_CONNECTED;
655 sk->sk_state_change(sk);
656 } else if (sk->sk_state == BT_CONNECT)
657 l2cap_do_start(sk);
658
659 bh_unlock_sock(sk);
660 }
661
662 read_unlock(&l->lock);
663 }
664
665 /* Notify sockets that we cannot guaranty reliability anymore */
666 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
667 {
668 struct l2cap_chan_list *l = &conn->chan_list;
669 struct sock *sk;
670
671 BT_DBG("conn %p", conn);
672
673 read_lock(&l->lock);
674
675 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
676 if (l2cap_pi(sk)->force_reliable)
677 sk->sk_err = err;
678 }
679
680 read_unlock(&l->lock);
681 }
682
683 static void l2cap_info_timeout(unsigned long arg)
684 {
685 struct l2cap_conn *conn = (void *) arg;
686
687 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
688 conn->info_ident = 0;
689
690 l2cap_conn_start(conn);
691 }
692
693 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
694 {
695 struct l2cap_conn *conn = hcon->l2cap_data;
696
697 if (conn || status)
698 return conn;
699
700 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
701 if (!conn)
702 return NULL;
703
704 hcon->l2cap_data = conn;
705 conn->hcon = hcon;
706
707 BT_DBG("hcon %p conn %p", hcon, conn);
708
709 conn->mtu = hcon->hdev->acl_mtu;
710 conn->src = &hcon->hdev->bdaddr;
711 conn->dst = &hcon->dst;
712
713 conn->feat_mask = 0;
714
715 spin_lock_init(&conn->lock);
716 rwlock_init(&conn->chan_list.lock);
717
718 setup_timer(&conn->info_timer, l2cap_info_timeout,
719 (unsigned long) conn);
720
721 conn->disc_reason = 0x13;
722
723 return conn;
724 }
725
726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
727 {
728 struct l2cap_conn *conn = hcon->l2cap_data;
729 struct sock *sk;
730
731 if (!conn)
732 return;
733
734 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
735
736 kfree_skb(conn->rx_skb);
737
738 /* Kill channels */
739 while ((sk = conn->chan_list.head)) {
740 bh_lock_sock(sk);
741 l2cap_chan_del(sk, err);
742 bh_unlock_sock(sk);
743 l2cap_sock_kill(sk);
744 }
745
746 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
747 del_timer_sync(&conn->info_timer);
748
749 hcon->l2cap_data = NULL;
750 kfree(conn);
751 }
752
753 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
754 {
755 struct l2cap_chan_list *l = &conn->chan_list;
756 write_lock_bh(&l->lock);
757 __l2cap_chan_add(conn, sk, parent);
758 write_unlock_bh(&l->lock);
759 }
760
761 /* ---- Socket interface ---- */
762 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
763 {
764 struct sock *sk;
765 struct hlist_node *node;
766 sk_for_each(sk, node, &l2cap_sk_list.head)
767 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
768 goto found;
769 sk = NULL;
770 found:
771 return sk;
772 }
773
774 /* Find socket with psm and source bdaddr.
775 * Returns closest match.
776 */
777 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
778 {
779 struct sock *sk = NULL, *sk1 = NULL;
780 struct hlist_node *node;
781
782 read_lock(&l2cap_sk_list.lock);
783
784 sk_for_each(sk, node, &l2cap_sk_list.head) {
785 if (state && sk->sk_state != state)
786 continue;
787
788 if (l2cap_pi(sk)->psm == psm) {
789 /* Exact match. */
790 if (!bacmp(&bt_sk(sk)->src, src))
791 break;
792
793 /* Closest match */
794 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
795 sk1 = sk;
796 }
797 }
798
799 read_unlock(&l2cap_sk_list.lock);
800
801 return node ? sk : sk1;
802 }
803
804 static void l2cap_sock_destruct(struct sock *sk)
805 {
806 BT_DBG("sk %p", sk);
807
808 skb_queue_purge(&sk->sk_receive_queue);
809 skb_queue_purge(&sk->sk_write_queue);
810 }
811
812 static void l2cap_sock_cleanup_listen(struct sock *parent)
813 {
814 struct sock *sk;
815
816 BT_DBG("parent %p", parent);
817
818 /* Close not yet accepted channels */
819 while ((sk = bt_accept_dequeue(parent, NULL)))
820 l2cap_sock_close(sk);
821
822 parent->sk_state = BT_CLOSED;
823 sock_set_flag(parent, SOCK_ZAPPED);
824 }
825
826 /* Kill socket (only if zapped and orphan)
827 * Must be called on unlocked socket.
828 */
829 static void l2cap_sock_kill(struct sock *sk)
830 {
831 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
832 return;
833
834 BT_DBG("sk %p state %d", sk, sk->sk_state);
835
836 /* Kill poor orphan */
837 bt_sock_unlink(&l2cap_sk_list, sk);
838 sock_set_flag(sk, SOCK_DEAD);
839 sock_put(sk);
840 }
841
842 static void __l2cap_sock_close(struct sock *sk, int reason)
843 {
844 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
845
846 switch (sk->sk_state) {
847 case BT_LISTEN:
848 l2cap_sock_cleanup_listen(sk);
849 break;
850
851 case BT_CONNECTED:
852 case BT_CONFIG:
853 if (sk->sk_type == SOCK_SEQPACKET ||
854 sk->sk_type == SOCK_STREAM) {
855 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
856
857 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
858 l2cap_send_disconn_req(conn, sk, reason);
859 } else
860 l2cap_chan_del(sk, reason);
861 break;
862
863 case BT_CONNECT2:
864 if (sk->sk_type == SOCK_SEQPACKET ||
865 sk->sk_type == SOCK_STREAM) {
866 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
867 struct l2cap_conn_rsp rsp;
868 __u16 result;
869
870 if (bt_sk(sk)->defer_setup)
871 result = L2CAP_CR_SEC_BLOCK;
872 else
873 result = L2CAP_CR_BAD_PSM;
874 sk->sk_state = BT_DISCONN;
875
876 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
877 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
878 rsp.result = cpu_to_le16(result);
879 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
880 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
881 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
882 } else
883 l2cap_chan_del(sk, reason);
884 break;
885
886 case BT_CONNECT:
887 case BT_DISCONN:
888 l2cap_chan_del(sk, reason);
889 break;
890
891 default:
892 sock_set_flag(sk, SOCK_ZAPPED);
893 break;
894 }
895 }
896
897 /* Must be called on unlocked socket. */
898 static void l2cap_sock_close(struct sock *sk)
899 {
900 l2cap_sock_clear_timer(sk);
901 lock_sock(sk);
902 __l2cap_sock_close(sk, ECONNRESET);
903 release_sock(sk);
904 l2cap_sock_kill(sk);
905 }
906
907 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
908 {
909 struct l2cap_pinfo *pi = l2cap_pi(sk);
910
911 BT_DBG("sk %p", sk);
912
913 if (parent) {
914 sk->sk_type = parent->sk_type;
915 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
916
917 pi->imtu = l2cap_pi(parent)->imtu;
918 pi->omtu = l2cap_pi(parent)->omtu;
919 pi->conf_state = l2cap_pi(parent)->conf_state;
920 pi->mode = l2cap_pi(parent)->mode;
921 pi->fcs = l2cap_pi(parent)->fcs;
922 pi->max_tx = l2cap_pi(parent)->max_tx;
923 pi->tx_win = l2cap_pi(parent)->tx_win;
924 pi->sec_level = l2cap_pi(parent)->sec_level;
925 pi->role_switch = l2cap_pi(parent)->role_switch;
926 pi->force_reliable = l2cap_pi(parent)->force_reliable;
927 pi->flushable = l2cap_pi(parent)->flushable;
928 } else {
929 pi->imtu = L2CAP_DEFAULT_MTU;
930 pi->omtu = 0;
931 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
932 pi->mode = L2CAP_MODE_ERTM;
933 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
934 } else {
935 pi->mode = L2CAP_MODE_BASIC;
936 }
937 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
938 pi->fcs = L2CAP_FCS_CRC16;
939 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
940 pi->sec_level = BT_SECURITY_LOW;
941 pi->role_switch = 0;
942 pi->force_reliable = 0;
943 pi->flushable = BT_FLUSHABLE_OFF;
944 }
945
946 /* Default config options */
947 pi->conf_len = 0;
948 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
949 skb_queue_head_init(TX_QUEUE(sk));
950 skb_queue_head_init(SREJ_QUEUE(sk));
951 skb_queue_head_init(BUSY_QUEUE(sk));
952 INIT_LIST_HEAD(SREJ_LIST(sk));
953 }
954
955 static struct proto l2cap_proto = {
956 .name = "L2CAP",
957 .owner = THIS_MODULE,
958 .obj_size = sizeof(struct l2cap_pinfo)
959 };
960
961 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
962 {
963 struct sock *sk;
964
965 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
966 if (!sk)
967 return NULL;
968
969 sock_init_data(sock, sk);
970 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
971
972 sk->sk_destruct = l2cap_sock_destruct;
973 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
974
975 sock_reset_flag(sk, SOCK_ZAPPED);
976
977 sk->sk_protocol = proto;
978 sk->sk_state = BT_OPEN;
979
980 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
981
982 bt_sock_link(&l2cap_sk_list, sk);
983 return sk;
984 }
985
986 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
987 int kern)
988 {
989 struct sock *sk;
990
991 BT_DBG("sock %p", sock);
992
993 sock->state = SS_UNCONNECTED;
994
995 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
996 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
997 return -ESOCKTNOSUPPORT;
998
999 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
1000 return -EPERM;
1001
1002 sock->ops = &l2cap_sock_ops;
1003
1004 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
1005 if (!sk)
1006 return -ENOMEM;
1007
1008 l2cap_sock_init(sk, NULL);
1009 return 0;
1010 }
1011
1012 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
1013 {
1014 struct sock *sk = sock->sk;
1015 struct sockaddr_l2 la;
1016 int len, err = 0;
1017
1018 BT_DBG("sk %p", sk);
1019
1020 if (!addr || addr->sa_family != AF_BLUETOOTH)
1021 return -EINVAL;
1022
1023 memset(&la, 0, sizeof(la));
1024 len = min_t(unsigned int, sizeof(la), alen);
1025 memcpy(&la, addr, len);
1026
1027 if (la.l2_cid)
1028 return -EINVAL;
1029
1030 lock_sock(sk);
1031
1032 if (sk->sk_state != BT_OPEN) {
1033 err = -EBADFD;
1034 goto done;
1035 }
1036
1037 if (la.l2_psm) {
1038 __u16 psm = __le16_to_cpu(la.l2_psm);
1039
1040 /* PSM must be odd and lsb of upper byte must be 0 */
1041 if ((psm & 0x0101) != 0x0001) {
1042 err = -EINVAL;
1043 goto done;
1044 }
1045
1046 /* Restrict usage of well-known PSMs */
1047 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1048 err = -EACCES;
1049 goto done;
1050 }
1051 }
1052
1053 write_lock_bh(&l2cap_sk_list.lock);
1054
1055 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1056 err = -EADDRINUSE;
1057 } else {
1058 /* Save source address */
1059 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1060 l2cap_pi(sk)->psm = la.l2_psm;
1061 l2cap_pi(sk)->sport = la.l2_psm;
1062 sk->sk_state = BT_BOUND;
1063
1064 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1065 __le16_to_cpu(la.l2_psm) == 0x0003)
1066 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1067 }
1068
1069 write_unlock_bh(&l2cap_sk_list.lock);
1070
1071 done:
1072 release_sock(sk);
1073 return err;
1074 }
1075
1076 static int l2cap_do_connect(struct sock *sk)
1077 {
1078 bdaddr_t *src = &bt_sk(sk)->src;
1079 bdaddr_t *dst = &bt_sk(sk)->dst;
1080 struct l2cap_conn *conn;
1081 struct hci_conn *hcon;
1082 struct hci_dev *hdev;
1083 __u8 auth_type;
1084 int err;
1085
1086 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1087 l2cap_pi(sk)->psm);
1088
1089 hdev = hci_get_route(dst, src);
1090 if (!hdev)
1091 return -EHOSTUNREACH;
1092
1093 hci_dev_lock_bh(hdev);
1094
1095 err = -ENOMEM;
1096
1097 auth_type = l2cap_get_auth_type(sk);
1098
1099 hcon = hci_connect(hdev, ACL_LINK, dst,
1100 l2cap_pi(sk)->sec_level, auth_type);
1101 if (!hcon)
1102 goto done;
1103
1104 conn = l2cap_conn_add(hcon, 0);
1105 if (!conn) {
1106 hci_conn_put(hcon);
1107 goto done;
1108 }
1109
1110 err = 0;
1111
1112 /* Update source addr of the socket */
1113 bacpy(src, conn->src);
1114
1115 l2cap_chan_add(conn, sk, NULL);
1116
1117 sk->sk_state = BT_CONNECT;
1118 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1119
1120 if (hcon->state == BT_CONNECTED) {
1121 if (sk->sk_type != SOCK_SEQPACKET &&
1122 sk->sk_type != SOCK_STREAM) {
1123 l2cap_sock_clear_timer(sk);
1124 if (l2cap_check_security(sk))
1125 sk->sk_state = BT_CONNECTED;
1126 } else
1127 l2cap_do_start(sk);
1128 }
1129
1130 done:
1131 hci_dev_unlock_bh(hdev);
1132 hci_dev_put(hdev);
1133 return err;
1134 }
1135
1136 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1137 {
1138 struct sock *sk = sock->sk;
1139 struct sockaddr_l2 la;
1140 int len, err = 0;
1141
1142 BT_DBG("sk %p", sk);
1143
1144 if (!addr || alen < sizeof(addr->sa_family) ||
1145 addr->sa_family != AF_BLUETOOTH)
1146 return -EINVAL;
1147
1148 memset(&la, 0, sizeof(la));
1149 len = min_t(unsigned int, sizeof(la), alen);
1150 memcpy(&la, addr, len);
1151
1152 if (la.l2_cid)
1153 return -EINVAL;
1154
1155 lock_sock(sk);
1156
1157 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1158 && !la.l2_psm) {
1159 err = -EINVAL;
1160 goto done;
1161 }
1162
1163 switch (l2cap_pi(sk)->mode) {
1164 case L2CAP_MODE_BASIC:
1165 break;
1166 case L2CAP_MODE_ERTM:
1167 case L2CAP_MODE_STREAMING:
1168 if (!disable_ertm)
1169 break;
1170 /* fall through */
1171 default:
1172 err = -ENOTSUPP;
1173 goto done;
1174 }
1175
1176 switch (sk->sk_state) {
1177 case BT_CONNECT:
1178 case BT_CONNECT2:
1179 case BT_CONFIG:
1180 /* Already connecting */
1181 goto wait;
1182
1183 case BT_CONNECTED:
1184 /* Already connected */
1185 err = -EISCONN;
1186 goto done;
1187
1188 case BT_OPEN:
1189 case BT_BOUND:
1190 /* Can connect */
1191 break;
1192
1193 default:
1194 err = -EBADFD;
1195 goto done;
1196 }
1197
1198 /* PSM must be odd and lsb of upper byte must be 0 */
1199 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1200 sk->sk_type != SOCK_RAW) {
1201 err = -EINVAL;
1202 goto done;
1203 }
1204
1205 /* Set destination address and psm */
1206 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1207 l2cap_pi(sk)->psm = la.l2_psm;
1208
1209 err = l2cap_do_connect(sk);
1210 if (err)
1211 goto done;
1212
1213 wait:
1214 err = bt_sock_wait_state(sk, BT_CONNECTED,
1215 sock_sndtimeo(sk, flags & O_NONBLOCK));
1216 done:
1217 release_sock(sk);
1218 return err;
1219 }
1220
1221 static int l2cap_sock_listen(struct socket *sock, int backlog)
1222 {
1223 struct sock *sk = sock->sk;
1224 int err = 0;
1225
1226 BT_DBG("sk %p backlog %d", sk, backlog);
1227
1228 lock_sock(sk);
1229
1230 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1231 || sk->sk_state != BT_BOUND) {
1232 err = -EBADFD;
1233 goto done;
1234 }
1235
1236 switch (l2cap_pi(sk)->mode) {
1237 case L2CAP_MODE_BASIC:
1238 break;
1239 case L2CAP_MODE_ERTM:
1240 case L2CAP_MODE_STREAMING:
1241 if (!disable_ertm)
1242 break;
1243 /* fall through */
1244 default:
1245 err = -ENOTSUPP;
1246 goto done;
1247 }
1248
1249 if (!l2cap_pi(sk)->psm) {
1250 bdaddr_t *src = &bt_sk(sk)->src;
1251 u16 psm;
1252
1253 err = -EINVAL;
1254
1255 write_lock_bh(&l2cap_sk_list.lock);
1256
1257 for (psm = 0x1001; psm < 0x1100; psm += 2)
1258 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1259 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1260 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1261 err = 0;
1262 break;
1263 }
1264
1265 write_unlock_bh(&l2cap_sk_list.lock);
1266
1267 if (err < 0)
1268 goto done;
1269 }
1270
1271 sk->sk_max_ack_backlog = backlog;
1272 sk->sk_ack_backlog = 0;
1273 sk->sk_state = BT_LISTEN;
1274
1275 done:
1276 release_sock(sk);
1277 return err;
1278 }
1279
1280 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1281 {
1282 DECLARE_WAITQUEUE(wait, current);
1283 struct sock *sk = sock->sk, *nsk;
1284 long timeo;
1285 int err = 0;
1286
1287 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1288
1289 if (sk->sk_state != BT_LISTEN) {
1290 err = -EBADFD;
1291 goto done;
1292 }
1293
1294 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1295
1296 BT_DBG("sk %p timeo %ld", sk, timeo);
1297
1298 /* Wait for an incoming connection. (wake-one). */
1299 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1300 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1301 set_current_state(TASK_INTERRUPTIBLE);
1302 if (!timeo) {
1303 err = -EAGAIN;
1304 break;
1305 }
1306
1307 release_sock(sk);
1308 timeo = schedule_timeout(timeo);
1309 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1310
1311 if (sk->sk_state != BT_LISTEN) {
1312 err = -EBADFD;
1313 break;
1314 }
1315
1316 if (signal_pending(current)) {
1317 err = sock_intr_errno(timeo);
1318 break;
1319 }
1320 }
1321 set_current_state(TASK_RUNNING);
1322 remove_wait_queue(sk_sleep(sk), &wait);
1323
1324 if (err)
1325 goto done;
1326
1327 newsock->state = SS_CONNECTED;
1328
1329 BT_DBG("new socket %p", nsk);
1330
1331 done:
1332 release_sock(sk);
1333 return err;
1334 }
1335
1336 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1337 {
1338 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1339 struct sock *sk = sock->sk;
1340
1341 BT_DBG("sock %p, sk %p", sock, sk);
1342
1343 addr->sa_family = AF_BLUETOOTH;
1344 *len = sizeof(struct sockaddr_l2);
1345
1346 if (peer) {
1347 la->l2_psm = l2cap_pi(sk)->psm;
1348 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1349 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1350 } else {
1351 la->l2_psm = l2cap_pi(sk)->sport;
1352 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1353 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1354 }
1355
1356 return 0;
1357 }
1358
1359 static int __l2cap_wait_ack(struct sock *sk)
1360 {
1361 DECLARE_WAITQUEUE(wait, current);
1362 int err = 0;
1363 int timeo = HZ/5;
1364
1365 add_wait_queue(sk_sleep(sk), &wait);
1366 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1367 set_current_state(TASK_INTERRUPTIBLE);
1368
1369 if (!timeo)
1370 timeo = HZ/5;
1371
1372 if (signal_pending(current)) {
1373 err = sock_intr_errno(timeo);
1374 break;
1375 }
1376
1377 release_sock(sk);
1378 timeo = schedule_timeout(timeo);
1379 lock_sock(sk);
1380
1381 err = sock_error(sk);
1382 if (err)
1383 break;
1384 }
1385 set_current_state(TASK_RUNNING);
1386 remove_wait_queue(sk_sleep(sk), &wait);
1387 return err;
1388 }
1389
1390 static void l2cap_monitor_timeout(unsigned long arg)
1391 {
1392 struct sock *sk = (void *) arg;
1393
1394 BT_DBG("sk %p", sk);
1395
1396 bh_lock_sock(sk);
1397 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1398 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1399 bh_unlock_sock(sk);
1400 return;
1401 }
1402
1403 l2cap_pi(sk)->retry_count++;
1404 __mod_monitor_timer();
1405
1406 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1407 bh_unlock_sock(sk);
1408 }
1409
1410 static void l2cap_retrans_timeout(unsigned long arg)
1411 {
1412 struct sock *sk = (void *) arg;
1413
1414 BT_DBG("sk %p", sk);
1415
1416 bh_lock_sock(sk);
1417 l2cap_pi(sk)->retry_count = 1;
1418 __mod_monitor_timer();
1419
1420 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1421
1422 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1423 bh_unlock_sock(sk);
1424 }
1425
1426 static void l2cap_drop_acked_frames(struct sock *sk)
1427 {
1428 struct sk_buff *skb;
1429
1430 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1431 l2cap_pi(sk)->unacked_frames) {
1432 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1433 break;
1434
1435 skb = skb_dequeue(TX_QUEUE(sk));
1436 kfree_skb(skb);
1437
1438 l2cap_pi(sk)->unacked_frames--;
1439 }
1440
1441 if (!l2cap_pi(sk)->unacked_frames)
1442 del_timer(&l2cap_pi(sk)->retrans_timer);
1443 }
1444
1445 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1446 {
1447 struct l2cap_pinfo *pi = l2cap_pi(sk);
1448 struct hci_conn *hcon = pi->conn->hcon;
1449 u16 flags;
1450
1451 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1452
1453 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
1454 flags = ACL_START_NO_FLUSH;
1455 else
1456 flags = ACL_START;
1457
1458 hci_send_acl(hcon, skb, flags);
1459 }
1460
1461 static void l2cap_streaming_send(struct sock *sk)
1462 {
1463 struct sk_buff *skb;
1464 struct l2cap_pinfo *pi = l2cap_pi(sk);
1465 u16 control, fcs;
1466
1467 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1468 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1469 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1470 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1471
1472 if (pi->fcs == L2CAP_FCS_CRC16) {
1473 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1474 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1475 }
1476
1477 l2cap_do_send(sk, skb);
1478
1479 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1480 }
1481 }
1482
1483 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1484 {
1485 struct l2cap_pinfo *pi = l2cap_pi(sk);
1486 struct sk_buff *skb, *tx_skb;
1487 u16 control, fcs;
1488
1489 skb = skb_peek(TX_QUEUE(sk));
1490 if (!skb)
1491 return;
1492
1493 do {
1494 if (bt_cb(skb)->tx_seq == tx_seq)
1495 break;
1496
1497 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1498 return;
1499
1500 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1501
1502 if (pi->remote_max_tx &&
1503 bt_cb(skb)->retries == pi->remote_max_tx) {
1504 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1505 return;
1506 }
1507
1508 tx_skb = skb_clone(skb, GFP_ATOMIC);
1509 bt_cb(skb)->retries++;
1510 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1511
1512 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1513 control |= L2CAP_CTRL_FINAL;
1514 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1515 }
1516
1517 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1518 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1519
1520 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1521
1522 if (pi->fcs == L2CAP_FCS_CRC16) {
1523 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1524 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1525 }
1526
1527 l2cap_do_send(sk, tx_skb);
1528 }
1529
1530 static int l2cap_ertm_send(struct sock *sk)
1531 {
1532 struct sk_buff *skb, *tx_skb;
1533 struct l2cap_pinfo *pi = l2cap_pi(sk);
1534 u16 control, fcs;
1535 int nsent = 0;
1536
1537 if (sk->sk_state != BT_CONNECTED)
1538 return -ENOTCONN;
1539
1540 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1541
1542 if (pi->remote_max_tx &&
1543 bt_cb(skb)->retries == pi->remote_max_tx) {
1544 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1545 break;
1546 }
1547
1548 tx_skb = skb_clone(skb, GFP_ATOMIC);
1549
1550 bt_cb(skb)->retries++;
1551
1552 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1553 control &= L2CAP_CTRL_SAR;
1554
1555 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1556 control |= L2CAP_CTRL_FINAL;
1557 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1558 }
1559 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1560 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1561 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1562
1563
1564 if (pi->fcs == L2CAP_FCS_CRC16) {
1565 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1566 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1567 }
1568
1569 l2cap_do_send(sk, tx_skb);
1570
1571 __mod_retrans_timer();
1572
1573 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1574 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1575
1576 pi->unacked_frames++;
1577 pi->frames_sent++;
1578
1579 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1580 sk->sk_send_head = NULL;
1581 else
1582 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1583
1584 nsent++;
1585 }
1586
1587 return nsent;
1588 }
1589
1590 static int l2cap_retransmit_frames(struct sock *sk)
1591 {
1592 struct l2cap_pinfo *pi = l2cap_pi(sk);
1593 int ret;
1594
1595 if (!skb_queue_empty(TX_QUEUE(sk)))
1596 sk->sk_send_head = TX_QUEUE(sk)->next;
1597
1598 pi->next_tx_seq = pi->expected_ack_seq;
1599 ret = l2cap_ertm_send(sk);
1600 return ret;
1601 }
1602
1603 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1604 {
1605 struct sock *sk = (struct sock *)pi;
1606 u16 control = 0;
1607
1608 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1609
1610 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1611 control |= L2CAP_SUPER_RCV_NOT_READY;
1612 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1613 l2cap_send_sframe(pi, control);
1614 return;
1615 }
1616
1617 if (l2cap_ertm_send(sk) > 0)
1618 return;
1619
1620 control |= L2CAP_SUPER_RCV_READY;
1621 l2cap_send_sframe(pi, control);
1622 }
1623
1624 static void l2cap_send_srejtail(struct sock *sk)
1625 {
1626 struct srej_list *tail;
1627 u16 control;
1628
1629 control = L2CAP_SUPER_SELECT_REJECT;
1630 control |= L2CAP_CTRL_FINAL;
1631
1632 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1633 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1634
1635 l2cap_send_sframe(l2cap_pi(sk), control);
1636 }
1637
1638 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1639 {
1640 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1641 struct sk_buff **frag;
1642 int err, sent = 0;
1643
1644 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1645 return -EFAULT;
1646
1647 sent += count;
1648 len -= count;
1649
1650 /* Continuation fragments (no L2CAP header) */
1651 frag = &skb_shinfo(skb)->frag_list;
1652 while (len) {
1653 count = min_t(unsigned int, conn->mtu, len);
1654
1655 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1656 if (!*frag)
1657 return err;
1658 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1659 return -EFAULT;
1660
1661 sent += count;
1662 len -= count;
1663
1664 frag = &(*frag)->next;
1665 }
1666
1667 return sent;
1668 }
1669
1670 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1671 {
1672 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1673 struct sk_buff *skb;
1674 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1675 struct l2cap_hdr *lh;
1676
1677 BT_DBG("sk %p len %d", sk, (int)len);
1678
1679 count = min_t(unsigned int, (conn->mtu - hlen), len);
1680 skb = bt_skb_send_alloc(sk, count + hlen,
1681 msg->msg_flags & MSG_DONTWAIT, &err);
1682 if (!skb)
1683 return ERR_PTR(err);
1684
1685 /* Create L2CAP header */
1686 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1687 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1688 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1689 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1690
1691 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1692 if (unlikely(err < 0)) {
1693 kfree_skb(skb);
1694 return ERR_PTR(err);
1695 }
1696 return skb;
1697 }
1698
1699 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1700 {
1701 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1702 struct sk_buff *skb;
1703 int err, count, hlen = L2CAP_HDR_SIZE;
1704 struct l2cap_hdr *lh;
1705
1706 BT_DBG("sk %p len %d", sk, (int)len);
1707
1708 count = min_t(unsigned int, (conn->mtu - hlen), len);
1709 skb = bt_skb_send_alloc(sk, count + hlen,
1710 msg->msg_flags & MSG_DONTWAIT, &err);
1711 if (!skb)
1712 return ERR_PTR(err);
1713
1714 /* Create L2CAP header */
1715 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1716 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1717 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1718
1719 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1720 if (unlikely(err < 0)) {
1721 kfree_skb(skb);
1722 return ERR_PTR(err);
1723 }
1724 return skb;
1725 }
1726
1727 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1728 {
1729 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1730 struct sk_buff *skb;
1731 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1732 struct l2cap_hdr *lh;
1733
1734 BT_DBG("sk %p len %d", sk, (int)len);
1735
1736 if (!conn)
1737 return ERR_PTR(-ENOTCONN);
1738
1739 if (sdulen)
1740 hlen += 2;
1741
1742 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1743 hlen += 2;
1744
1745 count = min_t(unsigned int, (conn->mtu - hlen), len);
1746 skb = bt_skb_send_alloc(sk, count + hlen,
1747 msg->msg_flags & MSG_DONTWAIT, &err);
1748 if (!skb)
1749 return ERR_PTR(err);
1750
1751 /* Create L2CAP header */
1752 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1753 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1754 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1755 put_unaligned_le16(control, skb_put(skb, 2));
1756 if (sdulen)
1757 put_unaligned_le16(sdulen, skb_put(skb, 2));
1758
1759 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1760 if (unlikely(err < 0)) {
1761 kfree_skb(skb);
1762 return ERR_PTR(err);
1763 }
1764
1765 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1766 put_unaligned_le16(0, skb_put(skb, 2));
1767
1768 bt_cb(skb)->retries = 0;
1769 return skb;
1770 }
1771
1772 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1773 {
1774 struct l2cap_pinfo *pi = l2cap_pi(sk);
1775 struct sk_buff *skb;
1776 struct sk_buff_head sar_queue;
1777 u16 control;
1778 size_t size = 0;
1779
1780 skb_queue_head_init(&sar_queue);
1781 control = L2CAP_SDU_START;
1782 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1783 if (IS_ERR(skb))
1784 return PTR_ERR(skb);
1785
1786 __skb_queue_tail(&sar_queue, skb);
1787 len -= pi->remote_mps;
1788 size += pi->remote_mps;
1789
1790 while (len > 0) {
1791 size_t buflen;
1792
1793 if (len > pi->remote_mps) {
1794 control = L2CAP_SDU_CONTINUE;
1795 buflen = pi->remote_mps;
1796 } else {
1797 control = L2CAP_SDU_END;
1798 buflen = len;
1799 }
1800
1801 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1802 if (IS_ERR(skb)) {
1803 skb_queue_purge(&sar_queue);
1804 return PTR_ERR(skb);
1805 }
1806
1807 __skb_queue_tail(&sar_queue, skb);
1808 len -= buflen;
1809 size += buflen;
1810 }
1811 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1812 if (sk->sk_send_head == NULL)
1813 sk->sk_send_head = sar_queue.next;
1814
1815 return size;
1816 }
1817
1818 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1819 {
1820 struct sock *sk = sock->sk;
1821 struct l2cap_pinfo *pi = l2cap_pi(sk);
1822 struct sk_buff *skb;
1823 u16 control;
1824 int err;
1825
1826 BT_DBG("sock %p, sk %p", sock, sk);
1827
1828 err = sock_error(sk);
1829 if (err)
1830 return err;
1831
1832 if (msg->msg_flags & MSG_OOB)
1833 return -EOPNOTSUPP;
1834
1835 lock_sock(sk);
1836
1837 if (sk->sk_state != BT_CONNECTED) {
1838 err = -ENOTCONN;
1839 goto done;
1840 }
1841
1842 /* Connectionless channel */
1843 if (sk->sk_type == SOCK_DGRAM) {
1844 skb = l2cap_create_connless_pdu(sk, msg, len);
1845 if (IS_ERR(skb)) {
1846 err = PTR_ERR(skb);
1847 } else {
1848 l2cap_do_send(sk, skb);
1849 err = len;
1850 }
1851 goto done;
1852 }
1853
1854 switch (pi->mode) {
1855 case L2CAP_MODE_BASIC:
1856 /* Check outgoing MTU */
1857 if (len > pi->omtu) {
1858 err = -EMSGSIZE;
1859 goto done;
1860 }
1861
1862 /* Create a basic PDU */
1863 skb = l2cap_create_basic_pdu(sk, msg, len);
1864 if (IS_ERR(skb)) {
1865 err = PTR_ERR(skb);
1866 goto done;
1867 }
1868
1869 l2cap_do_send(sk, skb);
1870 err = len;
1871 break;
1872
1873 case L2CAP_MODE_ERTM:
1874 case L2CAP_MODE_STREAMING:
1875 /* Entire SDU fits into one PDU */
1876 if (len <= pi->remote_mps) {
1877 control = L2CAP_SDU_UNSEGMENTED;
1878 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1879 if (IS_ERR(skb)) {
1880 err = PTR_ERR(skb);
1881 goto done;
1882 }
1883 __skb_queue_tail(TX_QUEUE(sk), skb);
1884
1885 if (sk->sk_send_head == NULL)
1886 sk->sk_send_head = skb;
1887
1888 } else {
1889 /* Segment SDU into multiples PDUs */
1890 err = l2cap_sar_segment_sdu(sk, msg, len);
1891 if (err < 0)
1892 goto done;
1893 }
1894
1895 if (pi->mode == L2CAP_MODE_STREAMING) {
1896 l2cap_streaming_send(sk);
1897 } else {
1898 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1899 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1900 err = len;
1901 break;
1902 }
1903 err = l2cap_ertm_send(sk);
1904 }
1905
1906 if (err >= 0)
1907 err = len;
1908 break;
1909
1910 default:
1911 BT_DBG("bad state %1.1x", pi->mode);
1912 err = -EBADFD;
1913 }
1914
1915 done:
1916 release_sock(sk);
1917 return err;
1918 }
1919
1920 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1921 {
1922 struct sock *sk = sock->sk;
1923
1924 lock_sock(sk);
1925
1926 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1927 struct l2cap_conn_rsp rsp;
1928 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1929 u8 buf[128];
1930
1931 sk->sk_state = BT_CONFIG;
1932
1933 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1934 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1935 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1936 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1937 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1938 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1939
1940 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1941 release_sock(sk);
1942 return 0;
1943 }
1944
1945 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1946 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1947 l2cap_build_conf_req(sk, buf), buf);
1948 l2cap_pi(sk)->num_conf_req++;
1949
1950 release_sock(sk);
1951 return 0;
1952 }
1953
1954 release_sock(sk);
1955
1956 if (sock->type == SOCK_STREAM)
1957 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1958
1959 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1960 }
1961
1962 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1963 {
1964 struct sock *sk = sock->sk;
1965 struct l2cap_options opts;
1966 int len, err = 0;
1967 u32 opt;
1968
1969 BT_DBG("sk %p", sk);
1970
1971 lock_sock(sk);
1972
1973 switch (optname) {
1974 case L2CAP_OPTIONS:
1975 if (sk->sk_state == BT_CONNECTED) {
1976 err = -EINVAL;
1977 break;
1978 }
1979
1980 opts.imtu = l2cap_pi(sk)->imtu;
1981 opts.omtu = l2cap_pi(sk)->omtu;
1982 opts.flush_to = l2cap_pi(sk)->flush_to;
1983 opts.mode = l2cap_pi(sk)->mode;
1984 opts.fcs = l2cap_pi(sk)->fcs;
1985 opts.max_tx = l2cap_pi(sk)->max_tx;
1986 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1987
1988 len = min_t(unsigned int, sizeof(opts), optlen);
1989 if (copy_from_user((char *) &opts, optval, len)) {
1990 err = -EFAULT;
1991 break;
1992 }
1993
1994 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1995 err = -EINVAL;
1996 break;
1997 }
1998
1999 l2cap_pi(sk)->mode = opts.mode;
2000 switch (l2cap_pi(sk)->mode) {
2001 case L2CAP_MODE_BASIC:
2002 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2003 break;
2004 case L2CAP_MODE_ERTM:
2005 case L2CAP_MODE_STREAMING:
2006 if (!disable_ertm)
2007 break;
2008 /* fall through */
2009 default:
2010 err = -EINVAL;
2011 break;
2012 }
2013
2014 l2cap_pi(sk)->imtu = opts.imtu;
2015 l2cap_pi(sk)->omtu = opts.omtu;
2016 l2cap_pi(sk)->fcs = opts.fcs;
2017 l2cap_pi(sk)->max_tx = opts.max_tx;
2018 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2019 break;
2020
2021 case L2CAP_LM:
2022 if (get_user(opt, (u32 __user *) optval)) {
2023 err = -EFAULT;
2024 break;
2025 }
2026
2027 if (opt & L2CAP_LM_AUTH)
2028 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2029 if (opt & L2CAP_LM_ENCRYPT)
2030 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2031 if (opt & L2CAP_LM_SECURE)
2032 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2033
2034 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2035 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2036 break;
2037
2038 default:
2039 err = -ENOPROTOOPT;
2040 break;
2041 }
2042
2043 release_sock(sk);
2044 return err;
2045 }
2046
2047 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2048 {
2049 struct sock *sk = sock->sk;
2050 struct bt_security sec;
2051 int len, err = 0;
2052 u32 opt;
2053
2054 BT_DBG("sk %p", sk);
2055
2056 if (level == SOL_L2CAP)
2057 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2058
2059 if (level != SOL_BLUETOOTH)
2060 return -ENOPROTOOPT;
2061
2062 lock_sock(sk);
2063
2064 switch (optname) {
2065 case BT_SECURITY:
2066 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2067 && sk->sk_type != SOCK_RAW) {
2068 err = -EINVAL;
2069 break;
2070 }
2071
2072 sec.level = BT_SECURITY_LOW;
2073
2074 len = min_t(unsigned int, sizeof(sec), optlen);
2075 if (copy_from_user((char *) &sec, optval, len)) {
2076 err = -EFAULT;
2077 break;
2078 }
2079
2080 if (sec.level < BT_SECURITY_LOW ||
2081 sec.level > BT_SECURITY_HIGH) {
2082 err = -EINVAL;
2083 break;
2084 }
2085
2086 l2cap_pi(sk)->sec_level = sec.level;
2087 break;
2088
2089 case BT_DEFER_SETUP:
2090 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2091 err = -EINVAL;
2092 break;
2093 }
2094
2095 if (get_user(opt, (u32 __user *) optval)) {
2096 err = -EFAULT;
2097 break;
2098 }
2099
2100 bt_sk(sk)->defer_setup = opt;
2101 break;
2102
2103 case BT_FLUSHABLE:
2104 if (get_user(opt, (u32 __user *) optval)) {
2105 err = -EFAULT;
2106 break;
2107 }
2108
2109 if (opt > BT_FLUSHABLE_ON) {
2110 err = -EINVAL;
2111 break;
2112 }
2113
2114 if (opt == BT_FLUSHABLE_OFF) {
2115 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
2116 /* proceed futher only when we have l2cap_conn and
2117 No Flush support in the LM */
2118 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
2119 err = -EINVAL;
2120 break;
2121 }
2122 }
2123
2124 l2cap_pi(sk)->flushable = opt;
2125 break;
2126
2127 default:
2128 err = -ENOPROTOOPT;
2129 break;
2130 }
2131
2132 release_sock(sk);
2133 return err;
2134 }
2135
2136 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2137 {
2138 struct sock *sk = sock->sk;
2139 struct l2cap_options opts;
2140 struct l2cap_conninfo cinfo;
2141 int len, err = 0;
2142 u32 opt;
2143
2144 BT_DBG("sk %p", sk);
2145
2146 if (get_user(len, optlen))
2147 return -EFAULT;
2148
2149 lock_sock(sk);
2150
2151 switch (optname) {
2152 case L2CAP_OPTIONS:
2153 opts.imtu = l2cap_pi(sk)->imtu;
2154 opts.omtu = l2cap_pi(sk)->omtu;
2155 opts.flush_to = l2cap_pi(sk)->flush_to;
2156 opts.mode = l2cap_pi(sk)->mode;
2157 opts.fcs = l2cap_pi(sk)->fcs;
2158 opts.max_tx = l2cap_pi(sk)->max_tx;
2159 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2160
2161 len = min_t(unsigned int, len, sizeof(opts));
2162 if (copy_to_user(optval, (char *) &opts, len))
2163 err = -EFAULT;
2164
2165 break;
2166
2167 case L2CAP_LM:
2168 switch (l2cap_pi(sk)->sec_level) {
2169 case BT_SECURITY_LOW:
2170 opt = L2CAP_LM_AUTH;
2171 break;
2172 case BT_SECURITY_MEDIUM:
2173 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2174 break;
2175 case BT_SECURITY_HIGH:
2176 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2177 L2CAP_LM_SECURE;
2178 break;
2179 default:
2180 opt = 0;
2181 break;
2182 }
2183
2184 if (l2cap_pi(sk)->role_switch)
2185 opt |= L2CAP_LM_MASTER;
2186
2187 if (l2cap_pi(sk)->force_reliable)
2188 opt |= L2CAP_LM_RELIABLE;
2189
2190 if (put_user(opt, (u32 __user *) optval))
2191 err = -EFAULT;
2192 break;
2193
2194 case L2CAP_CONNINFO:
2195 if (sk->sk_state != BT_CONNECTED &&
2196 !(sk->sk_state == BT_CONNECT2 &&
2197 bt_sk(sk)->defer_setup)) {
2198 err = -ENOTCONN;
2199 break;
2200 }
2201
2202 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2203 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2204
2205 len = min_t(unsigned int, len, sizeof(cinfo));
2206 if (copy_to_user(optval, (char *) &cinfo, len))
2207 err = -EFAULT;
2208
2209 break;
2210
2211 default:
2212 err = -ENOPROTOOPT;
2213 break;
2214 }
2215
2216 release_sock(sk);
2217 return err;
2218 }
2219
2220 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2221 {
2222 struct sock *sk = sock->sk;
2223 struct bt_security sec;
2224 int len, err = 0;
2225
2226 BT_DBG("sk %p", sk);
2227
2228 if (level == SOL_L2CAP)
2229 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2230
2231 if (level != SOL_BLUETOOTH)
2232 return -ENOPROTOOPT;
2233
2234 if (get_user(len, optlen))
2235 return -EFAULT;
2236
2237 lock_sock(sk);
2238
2239 switch (optname) {
2240 case BT_SECURITY:
2241 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2242 && sk->sk_type != SOCK_RAW) {
2243 err = -EINVAL;
2244 break;
2245 }
2246
2247 sec.level = l2cap_pi(sk)->sec_level;
2248
2249 len = min_t(unsigned int, len, sizeof(sec));
2250 if (copy_to_user(optval, (char *) &sec, len))
2251 err = -EFAULT;
2252
2253 break;
2254
2255 case BT_DEFER_SETUP:
2256 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2257 err = -EINVAL;
2258 break;
2259 }
2260
2261 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2262 err = -EFAULT;
2263
2264 break;
2265
2266 case BT_FLUSHABLE:
2267 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval))
2268 err = -EFAULT;
2269
2270 break;
2271
2272 default:
2273 err = -ENOPROTOOPT;
2274 break;
2275 }
2276
2277 release_sock(sk);
2278 return err;
2279 }
2280
2281 static int l2cap_sock_shutdown(struct socket *sock, int how)
2282 {
2283 struct sock *sk = sock->sk;
2284 int err = 0;
2285
2286 BT_DBG("sock %p, sk %p", sock, sk);
2287
2288 if (!sk)
2289 return 0;
2290
2291 lock_sock(sk);
2292 if (!sk->sk_shutdown) {
2293 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2294 err = __l2cap_wait_ack(sk);
2295
2296 sk->sk_shutdown = SHUTDOWN_MASK;
2297 l2cap_sock_clear_timer(sk);
2298 __l2cap_sock_close(sk, 0);
2299
2300 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2301 err = bt_sock_wait_state(sk, BT_CLOSED,
2302 sk->sk_lingertime);
2303 }
2304
2305 if (!err && sk->sk_err)
2306 err = -sk->sk_err;
2307
2308 release_sock(sk);
2309 return err;
2310 }
2311
2312 static int l2cap_sock_release(struct socket *sock)
2313 {
2314 struct sock *sk = sock->sk;
2315 int err;
2316
2317 BT_DBG("sock %p, sk %p", sock, sk);
2318
2319 if (!sk)
2320 return 0;
2321
2322 err = l2cap_sock_shutdown(sock, 2);
2323
2324 sock_orphan(sk);
2325 l2cap_sock_kill(sk);
2326 return err;
2327 }
2328
2329 static void l2cap_chan_ready(struct sock *sk)
2330 {
2331 struct sock *parent = bt_sk(sk)->parent;
2332
2333 BT_DBG("sk %p, parent %p", sk, parent);
2334
2335 l2cap_pi(sk)->conf_state = 0;
2336 l2cap_sock_clear_timer(sk);
2337
2338 if (!parent) {
2339 /* Outgoing channel.
2340 * Wake up socket sleeping on connect.
2341 */
2342 sk->sk_state = BT_CONNECTED;
2343 sk->sk_state_change(sk);
2344 } else {
2345 /* Incoming channel.
2346 * Wake up socket sleeping on accept.
2347 */
2348 parent->sk_data_ready(parent, 0);
2349 }
2350 }
2351
2352 /* Copy frame to all raw sockets on that connection */
2353 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2354 {
2355 struct l2cap_chan_list *l = &conn->chan_list;
2356 struct sk_buff *nskb;
2357 struct sock *sk;
2358
2359 BT_DBG("conn %p", conn);
2360
2361 read_lock(&l->lock);
2362 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2363 if (sk->sk_type != SOCK_RAW)
2364 continue;
2365
2366 /* Don't send frame to the socket it came from */
2367 if (skb->sk == sk)
2368 continue;
2369 nskb = skb_clone(skb, GFP_ATOMIC);
2370 if (!nskb)
2371 continue;
2372
2373 if (sock_queue_rcv_skb(sk, nskb))
2374 kfree_skb(nskb);
2375 }
2376 read_unlock(&l->lock);
2377 }
2378
2379 /* ---- L2CAP signalling commands ---- */
2380 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2381 u8 code, u8 ident, u16 dlen, void *data)
2382 {
2383 struct sk_buff *skb, **frag;
2384 struct l2cap_cmd_hdr *cmd;
2385 struct l2cap_hdr *lh;
2386 int len, count;
2387
2388 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2389 conn, code, ident, dlen);
2390
2391 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2392 count = min_t(unsigned int, conn->mtu, len);
2393
2394 skb = bt_skb_alloc(count, GFP_ATOMIC);
2395 if (!skb)
2396 return NULL;
2397
2398 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2399 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2400 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2401
2402 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2403 cmd->code = code;
2404 cmd->ident = ident;
2405 cmd->len = cpu_to_le16(dlen);
2406
2407 if (dlen) {
2408 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2409 memcpy(skb_put(skb, count), data, count);
2410 data += count;
2411 }
2412
2413 len -= skb->len;
2414
2415 /* Continuation fragments (no L2CAP header) */
2416 frag = &skb_shinfo(skb)->frag_list;
2417 while (len) {
2418 count = min_t(unsigned int, conn->mtu, len);
2419
2420 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2421 if (!*frag)
2422 goto fail;
2423
2424 memcpy(skb_put(*frag, count), data, count);
2425
2426 len -= count;
2427 data += count;
2428
2429 frag = &(*frag)->next;
2430 }
2431
2432 return skb;
2433
2434 fail:
2435 kfree_skb(skb);
2436 return NULL;
2437 }
2438
2439 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2440 {
2441 struct l2cap_conf_opt *opt = *ptr;
2442 int len;
2443
2444 len = L2CAP_CONF_OPT_SIZE + opt->len;
2445 *ptr += len;
2446
2447 *type = opt->type;
2448 *olen = opt->len;
2449
2450 switch (opt->len) {
2451 case 1:
2452 *val = *((u8 *) opt->val);
2453 break;
2454
2455 case 2:
2456 *val = get_unaligned_le16(opt->val);
2457 break;
2458
2459 case 4:
2460 *val = get_unaligned_le32(opt->val);
2461 break;
2462
2463 default:
2464 *val = (unsigned long) opt->val;
2465 break;
2466 }
2467
2468 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2469 return len;
2470 }
2471
2472 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2473 {
2474 struct l2cap_conf_opt *opt = *ptr;
2475
2476 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2477
2478 opt->type = type;
2479 opt->len = len;
2480
2481 switch (len) {
2482 case 1:
2483 *((u8 *) opt->val) = val;
2484 break;
2485
2486 case 2:
2487 put_unaligned_le16(val, opt->val);
2488 break;
2489
2490 case 4:
2491 put_unaligned_le32(val, opt->val);
2492 break;
2493
2494 default:
2495 memcpy(opt->val, (void *) val, len);
2496 break;
2497 }
2498
2499 *ptr += L2CAP_CONF_OPT_SIZE + len;
2500 }
2501
2502 static void l2cap_ack_timeout(unsigned long arg)
2503 {
2504 struct sock *sk = (void *) arg;
2505
2506 bh_lock_sock(sk);
2507 l2cap_send_ack(l2cap_pi(sk));
2508 bh_unlock_sock(sk);
2509 }
2510
2511 static inline void l2cap_ertm_init(struct sock *sk)
2512 {
2513 l2cap_pi(sk)->expected_ack_seq = 0;
2514 l2cap_pi(sk)->unacked_frames = 0;
2515 l2cap_pi(sk)->buffer_seq = 0;
2516 l2cap_pi(sk)->num_acked = 0;
2517 l2cap_pi(sk)->frames_sent = 0;
2518
2519 setup_timer(&l2cap_pi(sk)->retrans_timer,
2520 l2cap_retrans_timeout, (unsigned long) sk);
2521 setup_timer(&l2cap_pi(sk)->monitor_timer,
2522 l2cap_monitor_timeout, (unsigned long) sk);
2523 setup_timer(&l2cap_pi(sk)->ack_timer,
2524 l2cap_ack_timeout, (unsigned long) sk);
2525
2526 __skb_queue_head_init(SREJ_QUEUE(sk));
2527 __skb_queue_head_init(BUSY_QUEUE(sk));
2528
2529 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2530
2531 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2532 }
2533
2534 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2535 {
2536 switch (mode) {
2537 case L2CAP_MODE_STREAMING:
2538 case L2CAP_MODE_ERTM:
2539 if (l2cap_mode_supported(mode, remote_feat_mask))
2540 return mode;
2541 /* fall through */
2542 default:
2543 return L2CAP_MODE_BASIC;
2544 }
2545 }
2546
2547 static int l2cap_build_conf_req(struct sock *sk, void *data)
2548 {
2549 struct l2cap_pinfo *pi = l2cap_pi(sk);
2550 struct l2cap_conf_req *req = data;
2551 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2552 void *ptr = req->data;
2553
2554 BT_DBG("sk %p", sk);
2555
2556 if (pi->num_conf_req || pi->num_conf_rsp)
2557 goto done;
2558
2559 switch (pi->mode) {
2560 case L2CAP_MODE_STREAMING:
2561 case L2CAP_MODE_ERTM:
2562 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2563 break;
2564
2565 /* fall through */
2566 default:
2567 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2568 break;
2569 }
2570
2571 done:
2572 switch (pi->mode) {
2573 case L2CAP_MODE_BASIC:
2574 if (pi->imtu != L2CAP_DEFAULT_MTU)
2575 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2576
2577 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2578 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2579 break;
2580
2581 rfc.mode = L2CAP_MODE_BASIC;
2582 rfc.txwin_size = 0;
2583 rfc.max_transmit = 0;
2584 rfc.retrans_timeout = 0;
2585 rfc.monitor_timeout = 0;
2586 rfc.max_pdu_size = 0;
2587
2588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2589 (unsigned long) &rfc);
2590 break;
2591
2592 case L2CAP_MODE_ERTM:
2593 rfc.mode = L2CAP_MODE_ERTM;
2594 rfc.txwin_size = pi->tx_win;
2595 rfc.max_transmit = pi->max_tx;
2596 rfc.retrans_timeout = 0;
2597 rfc.monitor_timeout = 0;
2598 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2599 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2600 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2601
2602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2603 (unsigned long) &rfc);
2604
2605 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2606 break;
2607
2608 if (pi->fcs == L2CAP_FCS_NONE ||
2609 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2610 pi->fcs = L2CAP_FCS_NONE;
2611 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2612 }
2613 break;
2614
2615 case L2CAP_MODE_STREAMING:
2616 rfc.mode = L2CAP_MODE_STREAMING;
2617 rfc.txwin_size = 0;
2618 rfc.max_transmit = 0;
2619 rfc.retrans_timeout = 0;
2620 rfc.monitor_timeout = 0;
2621 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2622 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2623 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2624
2625 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2626 (unsigned long) &rfc);
2627
2628 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2629 break;
2630
2631 if (pi->fcs == L2CAP_FCS_NONE ||
2632 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2633 pi->fcs = L2CAP_FCS_NONE;
2634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2635 }
2636 break;
2637 }
2638
2639 /* FIXME: Need actual value of the flush timeout */
2640 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2641 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2642
2643 req->dcid = cpu_to_le16(pi->dcid);
2644 req->flags = cpu_to_le16(0);
2645
2646 return ptr - data;
2647 }
2648
2649 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2650 {
2651 struct l2cap_pinfo *pi = l2cap_pi(sk);
2652 struct l2cap_conf_rsp *rsp = data;
2653 void *ptr = rsp->data;
2654 void *req = pi->conf_req;
2655 int len = pi->conf_len;
2656 int type, hint, olen;
2657 unsigned long val;
2658 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2659 u16 mtu = L2CAP_DEFAULT_MTU;
2660 u16 result = L2CAP_CONF_SUCCESS;
2661
2662 BT_DBG("sk %p", sk);
2663
2664 while (len >= L2CAP_CONF_OPT_SIZE) {
2665 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2666
2667 hint = type & L2CAP_CONF_HINT;
2668 type &= L2CAP_CONF_MASK;
2669
2670 switch (type) {
2671 case L2CAP_CONF_MTU:
2672 mtu = val;
2673 break;
2674
2675 case L2CAP_CONF_FLUSH_TO:
2676 pi->flush_to = val;
2677 break;
2678
2679 case L2CAP_CONF_QOS:
2680 break;
2681
2682 case L2CAP_CONF_RFC:
2683 if (olen == sizeof(rfc))
2684 memcpy(&rfc, (void *) val, olen);
2685 break;
2686
2687 case L2CAP_CONF_FCS:
2688 if (val == L2CAP_FCS_NONE)
2689 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2690
2691 break;
2692
2693 default:
2694 if (hint)
2695 break;
2696
2697 result = L2CAP_CONF_UNKNOWN;
2698 *((u8 *) ptr++) = type;
2699 break;
2700 }
2701 }
2702
2703 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2704 goto done;
2705
2706 switch (pi->mode) {
2707 case L2CAP_MODE_STREAMING:
2708 case L2CAP_MODE_ERTM:
2709 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2710 pi->mode = l2cap_select_mode(rfc.mode,
2711 pi->conn->feat_mask);
2712 break;
2713 }
2714
2715 if (pi->mode != rfc.mode)
2716 return -ECONNREFUSED;
2717
2718 break;
2719 }
2720
2721 done:
2722 if (pi->mode != rfc.mode) {
2723 result = L2CAP_CONF_UNACCEPT;
2724 rfc.mode = pi->mode;
2725
2726 if (pi->num_conf_rsp == 1)
2727 return -ECONNREFUSED;
2728
2729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2730 sizeof(rfc), (unsigned long) &rfc);
2731 }
2732
2733
2734 if (result == L2CAP_CONF_SUCCESS) {
2735 /* Configure output options and let the other side know
2736 * which ones we don't like. */
2737
2738 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2739 result = L2CAP_CONF_UNACCEPT;
2740 else {
2741 pi->omtu = mtu;
2742 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2743 }
2744 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2745
2746 switch (rfc.mode) {
2747 case L2CAP_MODE_BASIC:
2748 pi->fcs = L2CAP_FCS_NONE;
2749 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2750 break;
2751
2752 case L2CAP_MODE_ERTM:
2753 pi->remote_tx_win = rfc.txwin_size;
2754 pi->remote_max_tx = rfc.max_transmit;
2755
2756 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2757 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2758
2759 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2760
2761 rfc.retrans_timeout =
2762 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2763 rfc.monitor_timeout =
2764 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2765
2766 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2767
2768 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2769 sizeof(rfc), (unsigned long) &rfc);
2770
2771 break;
2772
2773 case L2CAP_MODE_STREAMING:
2774 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2775 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2776
2777 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2778
2779 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2780
2781 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2782 sizeof(rfc), (unsigned long) &rfc);
2783
2784 break;
2785
2786 default:
2787 result = L2CAP_CONF_UNACCEPT;
2788
2789 memset(&rfc, 0, sizeof(rfc));
2790 rfc.mode = pi->mode;
2791 }
2792
2793 if (result == L2CAP_CONF_SUCCESS)
2794 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2795 }
2796 rsp->scid = cpu_to_le16(pi->dcid);
2797 rsp->result = cpu_to_le16(result);
2798 rsp->flags = cpu_to_le16(0x0000);
2799
2800 return ptr - data;
2801 }
2802
2803 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2804 {
2805 struct l2cap_pinfo *pi = l2cap_pi(sk);
2806 struct l2cap_conf_req *req = data;
2807 void *ptr = req->data;
2808 int type, olen;
2809 unsigned long val;
2810 struct l2cap_conf_rfc rfc;
2811
2812 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2813
2814 while (len >= L2CAP_CONF_OPT_SIZE) {
2815 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2816
2817 switch (type) {
2818 case L2CAP_CONF_MTU:
2819 if (val < L2CAP_DEFAULT_MIN_MTU) {
2820 *result = L2CAP_CONF_UNACCEPT;
2821 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2822 } else
2823 pi->imtu = val;
2824 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2825 break;
2826
2827 case L2CAP_CONF_FLUSH_TO:
2828 pi->flush_to = val;
2829 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2830 2, pi->flush_to);
2831 break;
2832
2833 case L2CAP_CONF_RFC:
2834 if (olen == sizeof(rfc))
2835 memcpy(&rfc, (void *)val, olen);
2836
2837 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2838 rfc.mode != pi->mode)
2839 return -ECONNREFUSED;
2840
2841 pi->fcs = 0;
2842
2843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2844 sizeof(rfc), (unsigned long) &rfc);
2845 break;
2846 }
2847 }
2848
2849 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2850 return -ECONNREFUSED;
2851
2852 pi->mode = rfc.mode;
2853
2854 if (*result == L2CAP_CONF_SUCCESS) {
2855 switch (rfc.mode) {
2856 case L2CAP_MODE_ERTM:
2857 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2858 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2859 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2860 break;
2861 case L2CAP_MODE_STREAMING:
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2863 }
2864 }
2865
2866 req->dcid = cpu_to_le16(pi->dcid);
2867 req->flags = cpu_to_le16(0x0000);
2868
2869 return ptr - data;
2870 }
2871
2872 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2873 {
2874 struct l2cap_conf_rsp *rsp = data;
2875 void *ptr = rsp->data;
2876
2877 BT_DBG("sk %p", sk);
2878
2879 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2880 rsp->result = cpu_to_le16(result);
2881 rsp->flags = cpu_to_le16(flags);
2882
2883 return ptr - data;
2884 }
2885
2886 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2887 {
2888 struct l2cap_pinfo *pi = l2cap_pi(sk);
2889 int type, olen;
2890 unsigned long val;
2891 struct l2cap_conf_rfc rfc;
2892
2893 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2894
2895 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2896 return;
2897
2898 while (len >= L2CAP_CONF_OPT_SIZE) {
2899 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2900
2901 switch (type) {
2902 case L2CAP_CONF_RFC:
2903 if (olen == sizeof(rfc))
2904 memcpy(&rfc, (void *)val, olen);
2905 goto done;
2906 }
2907 }
2908
2909 done:
2910 switch (rfc.mode) {
2911 case L2CAP_MODE_ERTM:
2912 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2913 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2914 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2915 break;
2916 case L2CAP_MODE_STREAMING:
2917 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2918 }
2919 }
2920
2921 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2922 {
2923 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2924
2925 if (rej->reason != 0x0000)
2926 return 0;
2927
2928 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2929 cmd->ident == conn->info_ident) {
2930 del_timer(&conn->info_timer);
2931
2932 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2933 conn->info_ident = 0;
2934
2935 l2cap_conn_start(conn);
2936 }
2937
2938 return 0;
2939 }
2940
2941 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2942 {
2943 struct l2cap_chan_list *list = &conn->chan_list;
2944 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2945 struct l2cap_conn_rsp rsp;
2946 struct sock *parent, *sk = NULL;
2947 int result, status = L2CAP_CS_NO_INFO;
2948
2949 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2950 __le16 psm = req->psm;
2951
2952 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2953
2954 /* Check if we have socket listening on psm */
2955 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2956 if (!parent) {
2957 result = L2CAP_CR_BAD_PSM;
2958 goto sendresp;
2959 }
2960
2961 bh_lock_sock(parent);
2962
2963 /* Check if the ACL is secure enough (if not SDP) */
2964 if (psm != cpu_to_le16(0x0001) &&
2965 !hci_conn_check_link_mode(conn->hcon)) {
2966 conn->disc_reason = 0x05;
2967 result = L2CAP_CR_SEC_BLOCK;
2968 goto response;
2969 }
2970
2971 result = L2CAP_CR_NO_MEM;
2972
2973 /* Check for backlog size */
2974 if (sk_acceptq_is_full(parent)) {
2975 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2976 goto response;
2977 }
2978
2979 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2980 if (!sk)
2981 goto response;
2982
2983 write_lock_bh(&list->lock);
2984
2985 /* Check if we already have channel with that dcid */
2986 if (__l2cap_get_chan_by_dcid(list, scid)) {
2987 write_unlock_bh(&list->lock);
2988 sock_set_flag(sk, SOCK_ZAPPED);
2989 l2cap_sock_kill(sk);
2990 goto response;
2991 }
2992
2993 hci_conn_hold(conn->hcon);
2994
2995 l2cap_sock_init(sk, parent);
2996 bacpy(&bt_sk(sk)->src, conn->src);
2997 bacpy(&bt_sk(sk)->dst, conn->dst);
2998 l2cap_pi(sk)->psm = psm;
2999 l2cap_pi(sk)->dcid = scid;
3000
3001 __l2cap_chan_add(conn, sk, parent);
3002 dcid = l2cap_pi(sk)->scid;
3003
3004 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
3005
3006 l2cap_pi(sk)->ident = cmd->ident;
3007
3008 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3009 if (l2cap_check_security(sk)) {
3010 if (bt_sk(sk)->defer_setup) {
3011 sk->sk_state = BT_CONNECT2;
3012 result = L2CAP_CR_PEND;
3013 status = L2CAP_CS_AUTHOR_PEND;
3014 parent->sk_data_ready(parent, 0);
3015 } else {
3016 sk->sk_state = BT_CONFIG;
3017 result = L2CAP_CR_SUCCESS;
3018 status = L2CAP_CS_NO_INFO;
3019 }
3020 } else {
3021 sk->sk_state = BT_CONNECT2;
3022 result = L2CAP_CR_PEND;
3023 status = L2CAP_CS_AUTHEN_PEND;
3024 }
3025 } else {
3026 sk->sk_state = BT_CONNECT2;
3027 result = L2CAP_CR_PEND;
3028 status = L2CAP_CS_NO_INFO;
3029 }
3030
3031 write_unlock_bh(&list->lock);
3032
3033 response:
3034 bh_unlock_sock(parent);
3035
3036 sendresp:
3037 rsp.scid = cpu_to_le16(scid);
3038 rsp.dcid = cpu_to_le16(dcid);
3039 rsp.result = cpu_to_le16(result);
3040 rsp.status = cpu_to_le16(status);
3041 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3042
3043 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3044 struct l2cap_info_req info;
3045 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3046
3047 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3048 conn->info_ident = l2cap_get_ident(conn);
3049
3050 mod_timer(&conn->info_timer, jiffies +
3051 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3052
3053 l2cap_send_cmd(conn, conn->info_ident,
3054 L2CAP_INFO_REQ, sizeof(info), &info);
3055 }
3056
3057 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3058 result == L2CAP_CR_SUCCESS) {
3059 u8 buf[128];
3060 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3061 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3062 l2cap_build_conf_req(sk, buf), buf);
3063 l2cap_pi(sk)->num_conf_req++;
3064 }
3065
3066 return 0;
3067 }
3068
3069 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3070 {
3071 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3072 u16 scid, dcid, result, status;
3073 struct sock *sk;
3074 u8 req[128];
3075
3076 scid = __le16_to_cpu(rsp->scid);
3077 dcid = __le16_to_cpu(rsp->dcid);
3078 result = __le16_to_cpu(rsp->result);
3079 status = __le16_to_cpu(rsp->status);
3080
3081 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3082
3083 if (scid) {
3084 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3085 if (!sk)
3086 return -EFAULT;
3087 } else {
3088 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3089 if (!sk)
3090 return -EFAULT;
3091 }
3092
3093 switch (result) {
3094 case L2CAP_CR_SUCCESS:
3095 sk->sk_state = BT_CONFIG;
3096 l2cap_pi(sk)->ident = 0;
3097 l2cap_pi(sk)->dcid = dcid;
3098 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3099
3100 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3101 break;
3102
3103 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3104
3105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3106 l2cap_build_conf_req(sk, req), req);
3107 l2cap_pi(sk)->num_conf_req++;
3108 break;
3109
3110 case L2CAP_CR_PEND:
3111 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3112 break;
3113
3114 default:
3115 /* don't delete l2cap channel if sk is owned by user */
3116 if (sock_owned_by_user(sk)) {
3117 sk->sk_state = BT_DISCONN;
3118 l2cap_sock_clear_timer(sk);
3119 l2cap_sock_set_timer(sk, HZ / 5);
3120 break;
3121 }
3122
3123 l2cap_chan_del(sk, ECONNREFUSED);
3124 break;
3125 }
3126
3127 bh_unlock_sock(sk);
3128 return 0;
3129 }
3130
3131 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3132 {
3133 /* FCS is enabled only in ERTM or streaming mode, if one or both
3134 * sides request it.
3135 */
3136 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3137 pi->fcs = L2CAP_FCS_NONE;
3138 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3139 pi->fcs = L2CAP_FCS_CRC16;
3140 }
3141
3142 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3143 {
3144 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3145 u16 dcid, flags;
3146 u8 rsp[64];
3147 struct sock *sk;
3148 int len;
3149
3150 dcid = __le16_to_cpu(req->dcid);
3151 flags = __le16_to_cpu(req->flags);
3152
3153 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3154
3155 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3156 if (!sk)
3157 return -ENOENT;
3158
3159 if (sk->sk_state != BT_CONFIG) {
3160 struct l2cap_cmd_rej rej;
3161
3162 rej.reason = cpu_to_le16(0x0002);
3163 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3164 sizeof(rej), &rej);
3165 goto unlock;
3166 }
3167
3168 /* Reject if config buffer is too small. */
3169 len = cmd_len - sizeof(*req);
3170 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3171 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3172 l2cap_build_conf_rsp(sk, rsp,
3173 L2CAP_CONF_REJECT, flags), rsp);
3174 goto unlock;
3175 }
3176
3177 /* Store config. */
3178 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3179 l2cap_pi(sk)->conf_len += len;
3180
3181 if (flags & 0x0001) {
3182 /* Incomplete config. Send empty response. */
3183 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3184 l2cap_build_conf_rsp(sk, rsp,
3185 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3186 goto unlock;
3187 }
3188
3189 /* Complete config. */
3190 len = l2cap_parse_conf_req(sk, rsp);
3191 if (len < 0) {
3192 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3193 goto unlock;
3194 }
3195
3196 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3197 l2cap_pi(sk)->num_conf_rsp++;
3198
3199 /* Reset config buffer. */
3200 l2cap_pi(sk)->conf_len = 0;
3201
3202 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3203 goto unlock;
3204
3205 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3206 set_default_fcs(l2cap_pi(sk));
3207
3208 sk->sk_state = BT_CONNECTED;
3209
3210 l2cap_pi(sk)->next_tx_seq = 0;
3211 l2cap_pi(sk)->expected_tx_seq = 0;
3212 __skb_queue_head_init(TX_QUEUE(sk));
3213 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3214 l2cap_ertm_init(sk);
3215
3216 l2cap_chan_ready(sk);
3217 goto unlock;
3218 }
3219
3220 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3221 u8 buf[64];
3222 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3223 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3224 l2cap_build_conf_req(sk, buf), buf);
3225 l2cap_pi(sk)->num_conf_req++;
3226 }
3227
3228 unlock:
3229 bh_unlock_sock(sk);
3230 return 0;
3231 }
3232
3233 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3234 {
3235 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3236 u16 scid, flags, result;
3237 struct sock *sk;
3238 int len = cmd->len - sizeof(*rsp);
3239
3240 scid = __le16_to_cpu(rsp->scid);
3241 flags = __le16_to_cpu(rsp->flags);
3242 result = __le16_to_cpu(rsp->result);
3243
3244 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3245 scid, flags, result);
3246
3247 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3248 if (!sk)
3249 return 0;
3250
3251 switch (result) {
3252 case L2CAP_CONF_SUCCESS:
3253 l2cap_conf_rfc_get(sk, rsp->data, len);
3254 break;
3255
3256 case L2CAP_CONF_UNACCEPT:
3257 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3258 char req[64];
3259
3260 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3261 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3262 goto done;
3263 }
3264
3265 /* throw out any old stored conf requests */
3266 result = L2CAP_CONF_SUCCESS;
3267 len = l2cap_parse_conf_rsp(sk, rsp->data,
3268 len, req, &result);
3269 if (len < 0) {
3270 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3271 goto done;
3272 }
3273
3274 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3275 L2CAP_CONF_REQ, len, req);
3276 l2cap_pi(sk)->num_conf_req++;
3277 if (result != L2CAP_CONF_SUCCESS)
3278 goto done;
3279 break;
3280 }
3281
3282 default:
3283 sk->sk_err = ECONNRESET;
3284 l2cap_sock_set_timer(sk, HZ * 5);
3285 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3286 goto done;
3287 }
3288
3289 if (flags & 0x01)
3290 goto done;
3291
3292 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3293
3294 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3295 set_default_fcs(l2cap_pi(sk));
3296
3297 sk->sk_state = BT_CONNECTED;
3298 l2cap_pi(sk)->next_tx_seq = 0;
3299 l2cap_pi(sk)->expected_tx_seq = 0;
3300 __skb_queue_head_init(TX_QUEUE(sk));
3301 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3302 l2cap_ertm_init(sk);
3303
3304 l2cap_chan_ready(sk);
3305 }
3306
3307 done:
3308 bh_unlock_sock(sk);
3309 return 0;
3310 }
3311
3312 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3313 {
3314 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3315 struct l2cap_disconn_rsp rsp;
3316 u16 dcid, scid;
3317 struct sock *sk;
3318
3319 scid = __le16_to_cpu(req->scid);
3320 dcid = __le16_to_cpu(req->dcid);
3321
3322 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3323
3324 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3325 if (!sk)
3326 return 0;
3327
3328 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3329 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3330 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3331
3332 sk->sk_shutdown = SHUTDOWN_MASK;
3333
3334 /* don't delete l2cap channel if sk is owned by user */
3335 if (sock_owned_by_user(sk)) {
3336 sk->sk_state = BT_DISCONN;
3337 l2cap_sock_clear_timer(sk);
3338 l2cap_sock_set_timer(sk, HZ / 5);
3339 bh_unlock_sock(sk);
3340 return 0;
3341 }
3342
3343 l2cap_chan_del(sk, ECONNRESET);
3344 bh_unlock_sock(sk);
3345
3346 l2cap_sock_kill(sk);
3347 return 0;
3348 }
3349
3350 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3351 {
3352 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3353 u16 dcid, scid;
3354 struct sock *sk;
3355
3356 scid = __le16_to_cpu(rsp->scid);
3357 dcid = __le16_to_cpu(rsp->dcid);
3358
3359 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3360
3361 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3362 if (!sk)
3363 return 0;
3364
3365 /* don't delete l2cap channel if sk is owned by user */
3366 if (sock_owned_by_user(sk)) {
3367 sk->sk_state = BT_DISCONN;
3368 l2cap_sock_clear_timer(sk);
3369 l2cap_sock_set_timer(sk, HZ / 5);
3370 bh_unlock_sock(sk);
3371 return 0;
3372 }
3373
3374 l2cap_chan_del(sk, 0);
3375 bh_unlock_sock(sk);
3376
3377 l2cap_sock_kill(sk);
3378 return 0;
3379 }
3380
3381 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3382 {
3383 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3384 u16 type;
3385
3386 type = __le16_to_cpu(req->type);
3387
3388 BT_DBG("type 0x%4.4x", type);
3389
3390 if (type == L2CAP_IT_FEAT_MASK) {
3391 u8 buf[8];
3392 u32 feat_mask = l2cap_feat_mask;
3393 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3394 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3395 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3396 if (!disable_ertm)
3397 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3398 | L2CAP_FEAT_FCS;
3399 put_unaligned_le32(feat_mask, rsp->data);
3400 l2cap_send_cmd(conn, cmd->ident,
3401 L2CAP_INFO_RSP, sizeof(buf), buf);
3402 } else if (type == L2CAP_IT_FIXED_CHAN) {
3403 u8 buf[12];
3404 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3405 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3406 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3407 memcpy(buf + 4, l2cap_fixed_chan, 8);
3408 l2cap_send_cmd(conn, cmd->ident,
3409 L2CAP_INFO_RSP, sizeof(buf), buf);
3410 } else {
3411 struct l2cap_info_rsp rsp;
3412 rsp.type = cpu_to_le16(type);
3413 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3414 l2cap_send_cmd(conn, cmd->ident,
3415 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3416 }
3417
3418 return 0;
3419 }
3420
3421 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3422 {
3423 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3424 u16 type, result;
3425
3426 type = __le16_to_cpu(rsp->type);
3427 result = __le16_to_cpu(rsp->result);
3428
3429 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3430
3431 del_timer(&conn->info_timer);
3432
3433 if (result != L2CAP_IR_SUCCESS) {
3434 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3435 conn->info_ident = 0;
3436
3437 l2cap_conn_start(conn);
3438
3439 return 0;
3440 }
3441
3442 if (type == L2CAP_IT_FEAT_MASK) {
3443 conn->feat_mask = get_unaligned_le32(rsp->data);
3444
3445 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3446 struct l2cap_info_req req;
3447 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3448
3449 conn->info_ident = l2cap_get_ident(conn);
3450
3451 l2cap_send_cmd(conn, conn->info_ident,
3452 L2CAP_INFO_REQ, sizeof(req), &req);
3453 } else {
3454 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3455 conn->info_ident = 0;
3456
3457 l2cap_conn_start(conn);
3458 }
3459 } else if (type == L2CAP_IT_FIXED_CHAN) {
3460 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3461 conn->info_ident = 0;
3462
3463 l2cap_conn_start(conn);
3464 }
3465
3466 return 0;
3467 }
3468
3469 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3470 {
3471 u8 *data = skb->data;
3472 int len = skb->len;
3473 struct l2cap_cmd_hdr cmd;
3474 int err = 0;
3475
3476 l2cap_raw_recv(conn, skb);
3477
3478 while (len >= L2CAP_CMD_HDR_SIZE) {
3479 u16 cmd_len;
3480 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3481 data += L2CAP_CMD_HDR_SIZE;
3482 len -= L2CAP_CMD_HDR_SIZE;
3483
3484 cmd_len = le16_to_cpu(cmd.len);
3485
3486 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3487
3488 if (cmd_len > len || !cmd.ident) {
3489 BT_DBG("corrupted command");
3490 break;
3491 }
3492
3493 switch (cmd.code) {
3494 case L2CAP_COMMAND_REJ:
3495 l2cap_command_rej(conn, &cmd, data);
3496 break;
3497
3498 case L2CAP_CONN_REQ:
3499 err = l2cap_connect_req(conn, &cmd, data);
3500 break;
3501
3502 case L2CAP_CONN_RSP:
3503 err = l2cap_connect_rsp(conn, &cmd, data);
3504 break;
3505
3506 case L2CAP_CONF_REQ:
3507 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3508 break;
3509
3510 case L2CAP_CONF_RSP:
3511 err = l2cap_config_rsp(conn, &cmd, data);
3512 break;
3513
3514 case L2CAP_DISCONN_REQ:
3515 err = l2cap_disconnect_req(conn, &cmd, data);
3516 break;
3517
3518 case L2CAP_DISCONN_RSP:
3519 err = l2cap_disconnect_rsp(conn, &cmd, data);
3520 break;
3521
3522 case L2CAP_ECHO_REQ:
3523 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3524 break;
3525
3526 case L2CAP_ECHO_RSP:
3527 break;
3528
3529 case L2CAP_INFO_REQ:
3530 err = l2cap_information_req(conn, &cmd, data);
3531 break;
3532
3533 case L2CAP_INFO_RSP:
3534 err = l2cap_information_rsp(conn, &cmd, data);
3535 break;
3536
3537 default:
3538 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3539 err = -EINVAL;
3540 break;
3541 }
3542
3543 if (err) {
3544 struct l2cap_cmd_rej rej;
3545 BT_DBG("error %d", err);
3546
3547 /* FIXME: Map err to a valid reason */
3548 rej.reason = cpu_to_le16(0);
3549 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3550 }
3551
3552 data += cmd_len;
3553 len -= cmd_len;
3554 }
3555
3556 kfree_skb(skb);
3557 }
3558
3559 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3560 {
3561 u16 our_fcs, rcv_fcs;
3562 int hdr_size = L2CAP_HDR_SIZE + 2;
3563
3564 if (pi->fcs == L2CAP_FCS_CRC16) {
3565 skb_trim(skb, skb->len - 2);
3566 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3567 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3568
3569 if (our_fcs != rcv_fcs)
3570 return -EBADMSG;
3571 }
3572 return 0;
3573 }
3574
3575 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3576 {
3577 struct l2cap_pinfo *pi = l2cap_pi(sk);
3578 u16 control = 0;
3579
3580 pi->frames_sent = 0;
3581
3582 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3583
3584 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3585 control |= L2CAP_SUPER_RCV_NOT_READY;
3586 l2cap_send_sframe(pi, control);
3587 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3588 }
3589
3590 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3591 l2cap_retransmit_frames(sk);
3592
3593 l2cap_ertm_send(sk);
3594
3595 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3596 pi->frames_sent == 0) {
3597 control |= L2CAP_SUPER_RCV_READY;
3598 l2cap_send_sframe(pi, control);
3599 }
3600 }
3601
3602 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3603 {
3604 struct sk_buff *next_skb;
3605 struct l2cap_pinfo *pi = l2cap_pi(sk);
3606 int tx_seq_offset, next_tx_seq_offset;
3607
3608 bt_cb(skb)->tx_seq = tx_seq;
3609 bt_cb(skb)->sar = sar;
3610
3611 next_skb = skb_peek(SREJ_QUEUE(sk));
3612 if (!next_skb) {
3613 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3614 return 0;
3615 }
3616
3617 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3618 if (tx_seq_offset < 0)
3619 tx_seq_offset += 64;
3620
3621 do {
3622 if (bt_cb(next_skb)->tx_seq == tx_seq)
3623 return -EINVAL;
3624
3625 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3626 pi->buffer_seq) % 64;
3627 if (next_tx_seq_offset < 0)
3628 next_tx_seq_offset += 64;
3629
3630 if (next_tx_seq_offset > tx_seq_offset) {
3631 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3632 return 0;
3633 }
3634
3635 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3636 break;
3637
3638 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3639
3640 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3641
3642 return 0;
3643 }
3644
3645 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3646 {
3647 struct l2cap_pinfo *pi = l2cap_pi(sk);
3648 struct sk_buff *_skb;
3649 int err;
3650
3651 switch (control & L2CAP_CTRL_SAR) {
3652 case L2CAP_SDU_UNSEGMENTED:
3653 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3654 goto drop;
3655
3656 err = sock_queue_rcv_skb(sk, skb);
3657 if (!err)
3658 return err;
3659
3660 break;
3661
3662 case L2CAP_SDU_START:
3663 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3664 goto drop;
3665
3666 pi->sdu_len = get_unaligned_le16(skb->data);
3667
3668 if (pi->sdu_len > pi->imtu)
3669 goto disconnect;
3670
3671 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3672 if (!pi->sdu)
3673 return -ENOMEM;
3674
3675 /* pull sdu_len bytes only after alloc, because of Local Busy
3676 * condition we have to be sure that this will be executed
3677 * only once, i.e., when alloc does not fail */
3678 skb_pull(skb, 2);
3679
3680 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3681
3682 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3683 pi->partial_sdu_len = skb->len;
3684 break;
3685
3686 case L2CAP_SDU_CONTINUE:
3687 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3688 goto disconnect;
3689
3690 if (!pi->sdu)
3691 goto disconnect;
3692
3693 pi->partial_sdu_len += skb->len;
3694 if (pi->partial_sdu_len > pi->sdu_len)
3695 goto drop;
3696
3697 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3698
3699 break;
3700
3701 case L2CAP_SDU_END:
3702 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3703 goto disconnect;
3704
3705 if (!pi->sdu)
3706 goto disconnect;
3707
3708 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3709 pi->partial_sdu_len += skb->len;
3710
3711 if (pi->partial_sdu_len > pi->imtu)
3712 goto drop;
3713
3714 if (pi->partial_sdu_len != pi->sdu_len)
3715 goto drop;
3716
3717 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3718 }
3719
3720 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3721 if (!_skb) {
3722 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3723 return -ENOMEM;
3724 }
3725
3726 err = sock_queue_rcv_skb(sk, _skb);
3727 if (err < 0) {
3728 kfree_skb(_skb);
3729 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3730 return err;
3731 }
3732
3733 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3734 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3735
3736 kfree_skb(pi->sdu);
3737 break;
3738 }
3739
3740 kfree_skb(skb);
3741 return 0;
3742
3743 drop:
3744 kfree_skb(pi->sdu);
3745 pi->sdu = NULL;
3746
3747 disconnect:
3748 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3749 kfree_skb(skb);
3750 return 0;
3751 }
3752
3753 static int l2cap_try_push_rx_skb(struct sock *sk)
3754 {
3755 struct l2cap_pinfo *pi = l2cap_pi(sk);
3756 struct sk_buff *skb;
3757 u16 control;
3758 int err;
3759
3760 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3761 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3762 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3763 if (err < 0) {
3764 skb_queue_head(BUSY_QUEUE(sk), skb);
3765 return -EBUSY;
3766 }
3767
3768 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3769 }
3770
3771 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3772 goto done;
3773
3774 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3775 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3776 l2cap_send_sframe(pi, control);
3777 l2cap_pi(sk)->retry_count = 1;
3778
3779 del_timer(&pi->retrans_timer);
3780 __mod_monitor_timer();
3781
3782 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3783
3784 done:
3785 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3786 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3787
3788 BT_DBG("sk %p, Exit local busy", sk);
3789
3790 return 0;
3791 }
3792
3793 static void l2cap_busy_work(struct work_struct *work)
3794 {
3795 DECLARE_WAITQUEUE(wait, current);
3796 struct l2cap_pinfo *pi =
3797 container_of(work, struct l2cap_pinfo, busy_work);
3798 struct sock *sk = (struct sock *)pi;
3799 int n_tries = 0, timeo = HZ/5, err;
3800 struct sk_buff *skb;
3801
3802 lock_sock(sk);
3803
3804 add_wait_queue(sk_sleep(sk), &wait);
3805 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3806 set_current_state(TASK_INTERRUPTIBLE);
3807
3808 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3809 err = -EBUSY;
3810 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3811 break;
3812 }
3813
3814 if (!timeo)
3815 timeo = HZ/5;
3816
3817 if (signal_pending(current)) {
3818 err = sock_intr_errno(timeo);
3819 break;
3820 }
3821
3822 release_sock(sk);
3823 timeo = schedule_timeout(timeo);
3824 lock_sock(sk);
3825
3826 err = sock_error(sk);
3827 if (err)
3828 break;
3829
3830 if (l2cap_try_push_rx_skb(sk) == 0)
3831 break;
3832 }
3833
3834 set_current_state(TASK_RUNNING);
3835 remove_wait_queue(sk_sleep(sk), &wait);
3836
3837 release_sock(sk);
3838 }
3839
3840 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3841 {
3842 struct l2cap_pinfo *pi = l2cap_pi(sk);
3843 int sctrl, err;
3844
3845 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3846 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3847 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3848 return l2cap_try_push_rx_skb(sk);
3849
3850
3851 }
3852
3853 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3854 if (err >= 0) {
3855 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3856 return err;
3857 }
3858
3859 /* Busy Condition */
3860 BT_DBG("sk %p, Enter local busy", sk);
3861
3862 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3863 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3864 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3865
3866 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3867 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3868 l2cap_send_sframe(pi, sctrl);
3869
3870 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3871
3872 del_timer(&pi->ack_timer);
3873
3874 queue_work(_busy_wq, &pi->busy_work);
3875
3876 return err;
3877 }
3878
3879 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3880 {
3881 struct l2cap_pinfo *pi = l2cap_pi(sk);
3882 struct sk_buff *_skb;
3883 int err = -EINVAL;
3884
3885 /*
3886 * TODO: We have to notify the userland if some data is lost with the
3887 * Streaming Mode.
3888 */
3889
3890 switch (control & L2CAP_CTRL_SAR) {
3891 case L2CAP_SDU_UNSEGMENTED:
3892 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3893 kfree_skb(pi->sdu);
3894 break;
3895 }
3896
3897 err = sock_queue_rcv_skb(sk, skb);
3898 if (!err)
3899 return 0;
3900
3901 break;
3902
3903 case L2CAP_SDU_START:
3904 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3905 kfree_skb(pi->sdu);
3906 break;
3907 }
3908
3909 pi->sdu_len = get_unaligned_le16(skb->data);
3910 skb_pull(skb, 2);
3911
3912 if (pi->sdu_len > pi->imtu) {
3913 err = -EMSGSIZE;
3914 break;
3915 }
3916
3917 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3918 if (!pi->sdu) {
3919 err = -ENOMEM;
3920 break;
3921 }
3922
3923 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3924
3925 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3926 pi->partial_sdu_len = skb->len;
3927 err = 0;
3928 break;
3929
3930 case L2CAP_SDU_CONTINUE:
3931 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3932 break;
3933
3934 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3935
3936 pi->partial_sdu_len += skb->len;
3937 if (pi->partial_sdu_len > pi->sdu_len)
3938 kfree_skb(pi->sdu);
3939 else
3940 err = 0;
3941
3942 break;
3943
3944 case L2CAP_SDU_END:
3945 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3946 break;
3947
3948 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3949
3950 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3951 pi->partial_sdu_len += skb->len;
3952
3953 if (pi->partial_sdu_len > pi->imtu)
3954 goto drop;
3955
3956 if (pi->partial_sdu_len == pi->sdu_len) {
3957 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3958 err = sock_queue_rcv_skb(sk, _skb);
3959 if (err < 0)
3960 kfree_skb(_skb);
3961 }
3962 err = 0;
3963
3964 drop:
3965 kfree_skb(pi->sdu);
3966 break;
3967 }
3968
3969 kfree_skb(skb);
3970 return err;
3971 }
3972
3973 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3974 {
3975 struct sk_buff *skb;
3976 u16 control;
3977
3978 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3979 if (bt_cb(skb)->tx_seq != tx_seq)
3980 break;
3981
3982 skb = skb_dequeue(SREJ_QUEUE(sk));
3983 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3984 l2cap_ertm_reassembly_sdu(sk, skb, control);
3985 l2cap_pi(sk)->buffer_seq_srej =
3986 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3987 tx_seq = (tx_seq + 1) % 64;
3988 }
3989 }
3990
3991 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3992 {
3993 struct l2cap_pinfo *pi = l2cap_pi(sk);
3994 struct srej_list *l, *tmp;
3995 u16 control;
3996
3997 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3998 if (l->tx_seq == tx_seq) {
3999 list_del(&l->list);
4000 kfree(l);
4001 return;
4002 }
4003 control = L2CAP_SUPER_SELECT_REJECT;
4004 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
4005 l2cap_send_sframe(pi, control);
4006 list_del(&l->list);
4007 list_add_tail(&l->list, SREJ_LIST(sk));
4008 }
4009 }
4010
4011 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
4012 {
4013 struct l2cap_pinfo *pi = l2cap_pi(sk);
4014 struct srej_list *new;
4015 u16 control;
4016
4017 while (tx_seq != pi->expected_tx_seq) {
4018 control = L2CAP_SUPER_SELECT_REJECT;
4019 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
4020 l2cap_send_sframe(pi, control);
4021
4022 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4023 new->tx_seq = pi->expected_tx_seq;
4024 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4025 list_add_tail(&new->list, SREJ_LIST(sk));
4026 }
4027 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4028 }
4029
4030 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4031 {
4032 struct l2cap_pinfo *pi = l2cap_pi(sk);
4033 u8 tx_seq = __get_txseq(rx_control);
4034 u8 req_seq = __get_reqseq(rx_control);
4035 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
4036 int tx_seq_offset, expected_tx_seq_offset;
4037 int num_to_ack = (pi->tx_win/6) + 1;
4038 int err = 0;
4039
4040 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
4041 rx_control);
4042
4043 if (L2CAP_CTRL_FINAL & rx_control &&
4044 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4045 del_timer(&pi->monitor_timer);
4046 if (pi->unacked_frames > 0)
4047 __mod_retrans_timer();
4048 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
4049 }
4050
4051 pi->expected_ack_seq = req_seq;
4052 l2cap_drop_acked_frames(sk);
4053
4054 if (tx_seq == pi->expected_tx_seq)
4055 goto expected;
4056
4057 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4058 if (tx_seq_offset < 0)
4059 tx_seq_offset += 64;
4060
4061 /* invalid tx_seq */
4062 if (tx_seq_offset >= pi->tx_win) {
4063 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4064 goto drop;
4065 }
4066
4067 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4068 goto drop;
4069
4070 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4071 struct srej_list *first;
4072
4073 first = list_first_entry(SREJ_LIST(sk),
4074 struct srej_list, list);
4075 if (tx_seq == first->tx_seq) {
4076 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4077 l2cap_check_srej_gap(sk, tx_seq);
4078
4079 list_del(&first->list);
4080 kfree(first);
4081
4082 if (list_empty(SREJ_LIST(sk))) {
4083 pi->buffer_seq = pi->buffer_seq_srej;
4084 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4085 l2cap_send_ack(pi);
4086 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4087 }
4088 } else {
4089 struct srej_list *l;
4090
4091 /* duplicated tx_seq */
4092 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4093 goto drop;
4094
4095 list_for_each_entry(l, SREJ_LIST(sk), list) {
4096 if (l->tx_seq == tx_seq) {
4097 l2cap_resend_srejframe(sk, tx_seq);
4098 return 0;
4099 }
4100 }
4101 l2cap_send_srejframe(sk, tx_seq);
4102 }
4103 } else {
4104 expected_tx_seq_offset =
4105 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4106 if (expected_tx_seq_offset < 0)
4107 expected_tx_seq_offset += 64;
4108
4109 /* duplicated tx_seq */
4110 if (tx_seq_offset < expected_tx_seq_offset)
4111 goto drop;
4112
4113 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4114
4115 BT_DBG("sk %p, Enter SREJ", sk);
4116
4117 INIT_LIST_HEAD(SREJ_LIST(sk));
4118 pi->buffer_seq_srej = pi->buffer_seq;
4119
4120 __skb_queue_head_init(SREJ_QUEUE(sk));
4121 __skb_queue_head_init(BUSY_QUEUE(sk));
4122 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4123
4124 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4125
4126 l2cap_send_srejframe(sk, tx_seq);
4127
4128 del_timer(&pi->ack_timer);
4129 }
4130 return 0;
4131
4132 expected:
4133 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4134
4135 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4136 bt_cb(skb)->tx_seq = tx_seq;
4137 bt_cb(skb)->sar = sar;
4138 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4139 return 0;
4140 }
4141
4142 err = l2cap_push_rx_skb(sk, skb, rx_control);
4143 if (err < 0)
4144 return 0;
4145
4146 if (rx_control & L2CAP_CTRL_FINAL) {
4147 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4148 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4149 else
4150 l2cap_retransmit_frames(sk);
4151 }
4152
4153 __mod_ack_timer();
4154
4155 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4156 if (pi->num_acked == num_to_ack - 1)
4157 l2cap_send_ack(pi);
4158
4159 return 0;
4160
4161 drop:
4162 kfree_skb(skb);
4163 return 0;
4164 }
4165
4166 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4167 {
4168 struct l2cap_pinfo *pi = l2cap_pi(sk);
4169
4170 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4171 rx_control);
4172
4173 pi->expected_ack_seq = __get_reqseq(rx_control);
4174 l2cap_drop_acked_frames(sk);
4175
4176 if (rx_control & L2CAP_CTRL_POLL) {
4177 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4178 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4179 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4180 (pi->unacked_frames > 0))
4181 __mod_retrans_timer();
4182
4183 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4184 l2cap_send_srejtail(sk);
4185 } else {
4186 l2cap_send_i_or_rr_or_rnr(sk);
4187 }
4188
4189 } else if (rx_control & L2CAP_CTRL_FINAL) {
4190 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4191
4192 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4193 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4194 else
4195 l2cap_retransmit_frames(sk);
4196
4197 } else {
4198 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4199 (pi->unacked_frames > 0))
4200 __mod_retrans_timer();
4201
4202 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4203 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
4204 l2cap_send_ack(pi);
4205 else
4206 l2cap_ertm_send(sk);
4207 }
4208 }
4209
4210 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4211 {
4212 struct l2cap_pinfo *pi = l2cap_pi(sk);
4213 u8 tx_seq = __get_reqseq(rx_control);
4214
4215 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4216
4217 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4218
4219 pi->expected_ack_seq = tx_seq;
4220 l2cap_drop_acked_frames(sk);
4221
4222 if (rx_control & L2CAP_CTRL_FINAL) {
4223 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4224 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4225 else
4226 l2cap_retransmit_frames(sk);
4227 } else {
4228 l2cap_retransmit_frames(sk);
4229
4230 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4231 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4232 }
4233 }
4234 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4235 {
4236 struct l2cap_pinfo *pi = l2cap_pi(sk);
4237 u8 tx_seq = __get_reqseq(rx_control);
4238
4239 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4240
4241 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4242
4243 if (rx_control & L2CAP_CTRL_POLL) {
4244 pi->expected_ack_seq = tx_seq;
4245 l2cap_drop_acked_frames(sk);
4246
4247 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4248 l2cap_retransmit_one_frame(sk, tx_seq);
4249
4250 l2cap_ertm_send(sk);
4251
4252 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4253 pi->srej_save_reqseq = tx_seq;
4254 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4255 }
4256 } else if (rx_control & L2CAP_CTRL_FINAL) {
4257 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4258 pi->srej_save_reqseq == tx_seq)
4259 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4260 else
4261 l2cap_retransmit_one_frame(sk, tx_seq);
4262 } else {
4263 l2cap_retransmit_one_frame(sk, tx_seq);
4264 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4265 pi->srej_save_reqseq = tx_seq;
4266 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4267 }
4268 }
4269 }
4270
4271 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4272 {
4273 struct l2cap_pinfo *pi = l2cap_pi(sk);
4274 u8 tx_seq = __get_reqseq(rx_control);
4275
4276 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4277
4278 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4279 pi->expected_ack_seq = tx_seq;
4280 l2cap_drop_acked_frames(sk);
4281
4282 if (rx_control & L2CAP_CTRL_POLL)
4283 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4284
4285 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4286 del_timer(&pi->retrans_timer);
4287 if (rx_control & L2CAP_CTRL_POLL)
4288 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4289 return;
4290 }
4291
4292 if (rx_control & L2CAP_CTRL_POLL)
4293 l2cap_send_srejtail(sk);
4294 else
4295 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4296 }
4297
4298 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4299 {
4300 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4301
4302 if (L2CAP_CTRL_FINAL & rx_control &&
4303 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4304 del_timer(&l2cap_pi(sk)->monitor_timer);
4305 if (l2cap_pi(sk)->unacked_frames > 0)
4306 __mod_retrans_timer();
4307 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4308 }
4309
4310 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4311 case L2CAP_SUPER_RCV_READY:
4312 l2cap_data_channel_rrframe(sk, rx_control);
4313 break;
4314
4315 case L2CAP_SUPER_REJECT:
4316 l2cap_data_channel_rejframe(sk, rx_control);
4317 break;
4318
4319 case L2CAP_SUPER_SELECT_REJECT:
4320 l2cap_data_channel_srejframe(sk, rx_control);
4321 break;
4322
4323 case L2CAP_SUPER_RCV_NOT_READY:
4324 l2cap_data_channel_rnrframe(sk, rx_control);
4325 break;
4326 }
4327
4328 kfree_skb(skb);
4329 return 0;
4330 }
4331
4332 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4333 {
4334 struct l2cap_pinfo *pi = l2cap_pi(sk);
4335 u16 control;
4336 u8 req_seq;
4337 int len, next_tx_seq_offset, req_seq_offset;
4338
4339 control = get_unaligned_le16(skb->data);
4340 skb_pull(skb, 2);
4341 len = skb->len;
4342
4343 /*
4344 * We can just drop the corrupted I-frame here.
4345 * Receiver will miss it and start proper recovery
4346 * procedures and ask retransmission.
4347 */
4348 if (l2cap_check_fcs(pi, skb))
4349 goto drop;
4350
4351 if (__is_sar_start(control) && __is_iframe(control))
4352 len -= 2;
4353
4354 if (pi->fcs == L2CAP_FCS_CRC16)
4355 len -= 2;
4356
4357 if (len > pi->mps) {
4358 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4359 goto drop;
4360 }
4361
4362 req_seq = __get_reqseq(control);
4363 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4364 if (req_seq_offset < 0)
4365 req_seq_offset += 64;
4366
4367 next_tx_seq_offset =
4368 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4369 if (next_tx_seq_offset < 0)
4370 next_tx_seq_offset += 64;
4371
4372 /* check for invalid req-seq */
4373 if (req_seq_offset > next_tx_seq_offset) {
4374 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4375 goto drop;
4376 }
4377
4378 if (__is_iframe(control)) {
4379 if (len < 0) {
4380 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4381 goto drop;
4382 }
4383
4384 l2cap_data_channel_iframe(sk, control, skb);
4385 } else {
4386 if (len != 0) {
4387 BT_ERR("%d", len);
4388 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4389 goto drop;
4390 }
4391
4392 l2cap_data_channel_sframe(sk, control, skb);
4393 }
4394
4395 return 0;
4396
4397 drop:
4398 kfree_skb(skb);
4399 return 0;
4400 }
4401
4402 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4403 {
4404 struct sock *sk;
4405 struct l2cap_pinfo *pi;
4406 u16 control;
4407 u8 tx_seq;
4408 int len;
4409
4410 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4411 if (!sk) {
4412 BT_DBG("unknown cid 0x%4.4x", cid);
4413 goto drop;
4414 }
4415
4416 pi = l2cap_pi(sk);
4417
4418 BT_DBG("sk %p, len %d", sk, skb->len);
4419
4420 if (sk->sk_state != BT_CONNECTED)
4421 goto drop;
4422
4423 switch (pi->mode) {
4424 case L2CAP_MODE_BASIC:
4425 /* If socket recv buffers overflows we drop data here
4426 * which is *bad* because L2CAP has to be reliable.
4427 * But we don't have any other choice. L2CAP doesn't
4428 * provide flow control mechanism. */
4429
4430 if (pi->imtu < skb->len)
4431 goto drop;
4432
4433 if (!sock_queue_rcv_skb(sk, skb))
4434 goto done;
4435 break;
4436
4437 case L2CAP_MODE_ERTM:
4438 if (!sock_owned_by_user(sk)) {
4439 l2cap_ertm_data_rcv(sk, skb);
4440 } else {
4441 if (sk_add_backlog(sk, skb))
4442 goto drop;
4443 }
4444
4445 goto done;
4446
4447 case L2CAP_MODE_STREAMING:
4448 control = get_unaligned_le16(skb->data);
4449 skb_pull(skb, 2);
4450 len = skb->len;
4451
4452 if (l2cap_check_fcs(pi, skb))
4453 goto drop;
4454
4455 if (__is_sar_start(control))
4456 len -= 2;
4457
4458 if (pi->fcs == L2CAP_FCS_CRC16)
4459 len -= 2;
4460
4461 if (len > pi->mps || len < 0 || __is_sframe(control))
4462 goto drop;
4463
4464 tx_seq = __get_txseq(control);
4465
4466 if (pi->expected_tx_seq == tx_seq)
4467 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4468 else
4469 pi->expected_tx_seq = (tx_seq + 1) % 64;
4470
4471 l2cap_streaming_reassembly_sdu(sk, skb, control);
4472
4473 goto done;
4474
4475 default:
4476 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4477 break;
4478 }
4479
4480 drop:
4481 kfree_skb(skb);
4482
4483 done:
4484 if (sk)
4485 bh_unlock_sock(sk);
4486
4487 return 0;
4488 }
4489
4490 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4491 {
4492 struct sock *sk;
4493
4494 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4495 if (!sk)
4496 goto drop;
4497
4498 bh_lock_sock(sk);
4499
4500 BT_DBG("sk %p, len %d", sk, skb->len);
4501
4502 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4503 goto drop;
4504
4505 if (l2cap_pi(sk)->imtu < skb->len)
4506 goto drop;
4507
4508 if (!sock_queue_rcv_skb(sk, skb))
4509 goto done;
4510
4511 drop:
4512 kfree_skb(skb);
4513
4514 done:
4515 if (sk)
4516 bh_unlock_sock(sk);
4517 return 0;
4518 }
4519
4520 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4521 {
4522 struct l2cap_hdr *lh = (void *) skb->data;
4523 u16 cid, len;
4524 __le16 psm;
4525
4526 skb_pull(skb, L2CAP_HDR_SIZE);
4527 cid = __le16_to_cpu(lh->cid);
4528 len = __le16_to_cpu(lh->len);
4529
4530 if (len != skb->len) {
4531 kfree_skb(skb);
4532 return;
4533 }
4534
4535 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4536
4537 switch (cid) {
4538 case L2CAP_CID_SIGNALING:
4539 l2cap_sig_channel(conn, skb);
4540 break;
4541
4542 case L2CAP_CID_CONN_LESS:
4543 psm = get_unaligned_le16(skb->data);
4544 skb_pull(skb, 2);
4545 l2cap_conless_channel(conn, psm, skb);
4546 break;
4547
4548 default:
4549 l2cap_data_channel(conn, cid, skb);
4550 break;
4551 }
4552 }
4553
4554 /* ---- L2CAP interface with lower layer (HCI) ---- */
4555
4556 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4557 {
4558 int exact = 0, lm1 = 0, lm2 = 0;
4559 register struct sock *sk;
4560 struct hlist_node *node;
4561
4562 if (type != ACL_LINK)
4563 return -EINVAL;
4564
4565 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4566
4567 /* Find listening sockets and check their link_mode */
4568 read_lock(&l2cap_sk_list.lock);
4569 sk_for_each(sk, node, &l2cap_sk_list.head) {
4570 if (sk->sk_state != BT_LISTEN)
4571 continue;
4572
4573 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4574 lm1 |= HCI_LM_ACCEPT;
4575 if (l2cap_pi(sk)->role_switch)
4576 lm1 |= HCI_LM_MASTER;
4577 exact++;
4578 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4579 lm2 |= HCI_LM_ACCEPT;
4580 if (l2cap_pi(sk)->role_switch)
4581 lm2 |= HCI_LM_MASTER;
4582 }
4583 }
4584 read_unlock(&l2cap_sk_list.lock);
4585
4586 return exact ? lm1 : lm2;
4587 }
4588
4589 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4590 {
4591 struct l2cap_conn *conn;
4592
4593 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4594
4595 if (hcon->type != ACL_LINK)
4596 return -EINVAL;
4597
4598 if (!status) {
4599 conn = l2cap_conn_add(hcon, status);
4600 if (conn)
4601 l2cap_conn_ready(conn);
4602 } else
4603 l2cap_conn_del(hcon, bt_err(status));
4604
4605 return 0;
4606 }
4607
4608 static int l2cap_disconn_ind(struct hci_conn *hcon)
4609 {
4610 struct l2cap_conn *conn = hcon->l2cap_data;
4611
4612 BT_DBG("hcon %p", hcon);
4613
4614 if (hcon->type != ACL_LINK || !conn)
4615 return 0x13;
4616
4617 return conn->disc_reason;
4618 }
4619
4620 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4621 {
4622 BT_DBG("hcon %p reason %d", hcon, reason);
4623
4624 if (hcon->type != ACL_LINK)
4625 return -EINVAL;
4626
4627 l2cap_conn_del(hcon, bt_err(reason));
4628
4629 return 0;
4630 }
4631
4632 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4633 {
4634 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4635 return;
4636
4637 if (encrypt == 0x00) {
4638 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4639 l2cap_sock_clear_timer(sk);
4640 l2cap_sock_set_timer(sk, HZ * 5);
4641 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4642 __l2cap_sock_close(sk, ECONNREFUSED);
4643 } else {
4644 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4645 l2cap_sock_clear_timer(sk);
4646 }
4647 }
4648
4649 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4650 {
4651 struct l2cap_chan_list *l;
4652 struct l2cap_conn *conn = hcon->l2cap_data;
4653 struct sock *sk;
4654
4655 if (!conn)
4656 return 0;
4657
4658 l = &conn->chan_list;
4659
4660 BT_DBG("conn %p", conn);
4661
4662 read_lock(&l->lock);
4663
4664 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4665 bh_lock_sock(sk);
4666
4667 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4668 bh_unlock_sock(sk);
4669 continue;
4670 }
4671
4672 if (!status && (sk->sk_state == BT_CONNECTED ||
4673 sk->sk_state == BT_CONFIG)) {
4674 l2cap_check_encryption(sk, encrypt);
4675 bh_unlock_sock(sk);
4676 continue;
4677 }
4678
4679 if (sk->sk_state == BT_CONNECT) {
4680 if (!status) {
4681 struct l2cap_conn_req req;
4682 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4683 req.psm = l2cap_pi(sk)->psm;
4684
4685 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4686 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4687
4688 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4689 L2CAP_CONN_REQ, sizeof(req), &req);
4690 } else {
4691 l2cap_sock_clear_timer(sk);
4692 l2cap_sock_set_timer(sk, HZ / 10);
4693 }
4694 } else if (sk->sk_state == BT_CONNECT2) {
4695 struct l2cap_conn_rsp rsp;
4696 __u16 result;
4697
4698 if (!status) {
4699 sk->sk_state = BT_CONFIG;
4700 result = L2CAP_CR_SUCCESS;
4701 } else {
4702 sk->sk_state = BT_DISCONN;
4703 l2cap_sock_set_timer(sk, HZ / 10);
4704 result = L2CAP_CR_SEC_BLOCK;
4705 }
4706
4707 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4708 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4709 rsp.result = cpu_to_le16(result);
4710 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4711 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4712 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4713 }
4714
4715 bh_unlock_sock(sk);
4716 }
4717
4718 read_unlock(&l->lock);
4719
4720 return 0;
4721 }
4722
4723 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4724 {
4725 struct l2cap_conn *conn = hcon->l2cap_data;
4726
4727 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4728 goto drop;
4729
4730 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4731
4732 if (!(flags & ACL_CONT)) {
4733 struct l2cap_hdr *hdr;
4734 struct sock *sk;
4735 u16 cid;
4736 int len;
4737
4738 if (conn->rx_len) {
4739 BT_ERR("Unexpected start frame (len %d)", skb->len);
4740 kfree_skb(conn->rx_skb);
4741 conn->rx_skb = NULL;
4742 conn->rx_len = 0;
4743 l2cap_conn_unreliable(conn, ECOMM);
4744 }
4745
4746 /* Start fragment always begin with Basic L2CAP header */
4747 if (skb->len < L2CAP_HDR_SIZE) {
4748 BT_ERR("Frame is too short (len %d)", skb->len);
4749 l2cap_conn_unreliable(conn, ECOMM);
4750 goto drop;
4751 }
4752
4753 hdr = (struct l2cap_hdr *) skb->data;
4754 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4755 cid = __le16_to_cpu(hdr->cid);
4756
4757 if (len == skb->len) {
4758 /* Complete frame received */
4759 l2cap_recv_frame(conn, skb);
4760 return 0;
4761 }
4762
4763 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4764
4765 if (skb->len > len) {
4766 BT_ERR("Frame is too long (len %d, expected len %d)",
4767 skb->len, len);
4768 l2cap_conn_unreliable(conn, ECOMM);
4769 goto drop;
4770 }
4771
4772 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4773
4774 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4775 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4776 len, l2cap_pi(sk)->imtu);
4777 bh_unlock_sock(sk);
4778 l2cap_conn_unreliable(conn, ECOMM);
4779 goto drop;
4780 }
4781
4782 if (sk)
4783 bh_unlock_sock(sk);
4784
4785 /* Allocate skb for the complete frame (with header) */
4786 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4787 if (!conn->rx_skb)
4788 goto drop;
4789
4790 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4791 skb->len);
4792 conn->rx_len = len - skb->len;
4793 } else {
4794 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4795
4796 if (!conn->rx_len) {
4797 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4798 l2cap_conn_unreliable(conn, ECOMM);
4799 goto drop;
4800 }
4801
4802 if (skb->len > conn->rx_len) {
4803 BT_ERR("Fragment is too long (len %d, expected %d)",
4804 skb->len, conn->rx_len);
4805 kfree_skb(conn->rx_skb);
4806 conn->rx_skb = NULL;
4807 conn->rx_len = 0;
4808 l2cap_conn_unreliable(conn, ECOMM);
4809 goto drop;
4810 }
4811
4812 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4813 skb->len);
4814 conn->rx_len -= skb->len;
4815
4816 if (!conn->rx_len) {
4817 /* Complete frame received */
4818 l2cap_recv_frame(conn, conn->rx_skb);
4819 conn->rx_skb = NULL;
4820 }
4821 }
4822
4823 drop:
4824 kfree_skb(skb);
4825 return 0;
4826 }
4827
4828 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4829 {
4830 struct sock *sk;
4831 struct hlist_node *node;
4832
4833 read_lock_bh(&l2cap_sk_list.lock);
4834
4835 sk_for_each(sk, node, &l2cap_sk_list.head) {
4836 struct l2cap_pinfo *pi = l2cap_pi(sk);
4837
4838 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4839 batostr(&bt_sk(sk)->src),
4840 batostr(&bt_sk(sk)->dst),
4841 sk->sk_state, __le16_to_cpu(pi->psm),
4842 pi->scid, pi->dcid,
4843 pi->imtu, pi->omtu, pi->sec_level);
4844 }
4845
4846 read_unlock_bh(&l2cap_sk_list.lock);
4847
4848 return 0;
4849 }
4850
4851 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4852 {
4853 return single_open(file, l2cap_debugfs_show, inode->i_private);
4854 }
4855
4856 static const struct file_operations l2cap_debugfs_fops = {
4857 .open = l2cap_debugfs_open,
4858 .read = seq_read,
4859 .llseek = seq_lseek,
4860 .release = single_release,
4861 };
4862
4863 static struct dentry *l2cap_debugfs;
4864
4865 static const struct proto_ops l2cap_sock_ops = {
4866 .family = PF_BLUETOOTH,
4867 .owner = THIS_MODULE,
4868 .release = l2cap_sock_release,
4869 .bind = l2cap_sock_bind,
4870 .connect = l2cap_sock_connect,
4871 .listen = l2cap_sock_listen,
4872 .accept = l2cap_sock_accept,
4873 .getname = l2cap_sock_getname,
4874 .sendmsg = l2cap_sock_sendmsg,
4875 .recvmsg = l2cap_sock_recvmsg,
4876 .poll = bt_sock_poll,
4877 .ioctl = bt_sock_ioctl,
4878 .mmap = sock_no_mmap,
4879 .socketpair = sock_no_socketpair,
4880 .shutdown = l2cap_sock_shutdown,
4881 .setsockopt = l2cap_sock_setsockopt,
4882 .getsockopt = l2cap_sock_getsockopt
4883 };
4884
4885 static const struct net_proto_family l2cap_sock_family_ops = {
4886 .family = PF_BLUETOOTH,
4887 .owner = THIS_MODULE,
4888 .create = l2cap_sock_create,
4889 };
4890
4891 static struct hci_proto l2cap_hci_proto = {
4892 .name = "L2CAP",
4893 .id = HCI_PROTO_L2CAP,
4894 .connect_ind = l2cap_connect_ind,
4895 .connect_cfm = l2cap_connect_cfm,
4896 .disconn_ind = l2cap_disconn_ind,
4897 .disconn_cfm = l2cap_disconn_cfm,
4898 .security_cfm = l2cap_security_cfm,
4899 .recv_acldata = l2cap_recv_acldata
4900 };
4901
4902 static int __init l2cap_init(void)
4903 {
4904 int err;
4905
4906 err = proto_register(&l2cap_proto, 0);
4907 if (err < 0)
4908 return err;
4909
4910 _busy_wq = create_singlethread_workqueue("l2cap");
4911 if (!_busy_wq) {
4912 proto_unregister(&l2cap_proto);
4913 return -ENOMEM;
4914 }
4915
4916 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4917 if (err < 0) {
4918 BT_ERR("L2CAP socket registration failed");
4919 goto error;
4920 }
4921
4922 err = hci_register_proto(&l2cap_hci_proto);
4923 if (err < 0) {
4924 BT_ERR("L2CAP protocol registration failed");
4925 bt_sock_unregister(BTPROTO_L2CAP);
4926 goto error;
4927 }
4928
4929 if (bt_debugfs) {
4930 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4931 bt_debugfs, NULL, &l2cap_debugfs_fops);
4932 if (!l2cap_debugfs)
4933 BT_ERR("Failed to create L2CAP debug file");
4934 }
4935
4936 BT_INFO("L2CAP ver %s", VERSION);
4937 BT_INFO("L2CAP socket layer initialized");
4938
4939 return 0;
4940
4941 error:
4942 destroy_workqueue(_busy_wq);
4943 proto_unregister(&l2cap_proto);
4944 return err;
4945 }
4946
4947 static void __exit l2cap_exit(void)
4948 {
4949 debugfs_remove(l2cap_debugfs);
4950
4951 flush_workqueue(_busy_wq);
4952 destroy_workqueue(_busy_wq);
4953
4954 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4955 BT_ERR("L2CAP socket unregistration failed");
4956
4957 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4958 BT_ERR("L2CAP protocol unregistration failed");
4959
4960 proto_unregister(&l2cap_proto);
4961 }
4962
4963 void l2cap_load(void)
4964 {
4965 /* Dummy function to trigger automatic L2CAP module loading by
4966 * other modules that use L2CAP sockets but don't use any other
4967 * symbols from it. */
4968 }
4969 EXPORT_SYMBOL(l2cap_load);
4970
4971 module_init(l2cap_init);
4972 module_exit(l2cap_exit);
4973
4974 module_param(disable_ertm, bool, 0644);
4975 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4976
4977 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4978 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4979 MODULE_VERSION(VERSION);
4980 MODULE_LICENSE("GPL");
4981 MODULE_ALIAS("bt-proto-0");