]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/l2cap.c
Bluetooth: Send ConfigReq after send a ConnectionRsp
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 static int enable_ertm = 0;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static const struct proto_ops l2cap_sock_ops;
64
65 static struct workqueue_struct *_busy_wq;
66
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 };
70
71 static void l2cap_busy_work(struct work_struct *work);
72
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
76
77 static int l2cap_build_conf_req(struct sock *sk, void *data);
78 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
79 u8 code, u8 ident, u16 dlen, void *data);
80
81 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
82
83 /* ---- L2CAP timers ---- */
84 static void l2cap_sock_timeout(unsigned long arg)
85 {
86 struct sock *sk = (struct sock *) arg;
87 int reason;
88
89 BT_DBG("sock %p state %d", sk, sk->sk_state);
90
91 bh_lock_sock(sk);
92
93 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
94 reason = ECONNREFUSED;
95 else if (sk->sk_state == BT_CONNECT &&
96 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
97 reason = ECONNREFUSED;
98 else
99 reason = ETIMEDOUT;
100
101 __l2cap_sock_close(sk, reason);
102
103 bh_unlock_sock(sk);
104
105 l2cap_sock_kill(sk);
106 sock_put(sk);
107 }
108
109 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 {
111 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
112 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
113 }
114
115 static void l2cap_sock_clear_timer(struct sock *sk)
116 {
117 BT_DBG("sock %p state %d", sk, sk->sk_state);
118 sk_stop_timer(sk, &sk->sk_timer);
119 }
120
121 /* ---- L2CAP channels ---- */
122 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
123 {
124 struct sock *s;
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->dcid == cid)
127 break;
128 }
129 return s;
130 }
131
132 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
133 {
134 struct sock *s;
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->scid == cid)
137 break;
138 }
139 return s;
140 }
141
142 /* Find channel with given SCID.
143 * Returns locked socket */
144 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 {
146 struct sock *s;
147 read_lock(&l->lock);
148 s = __l2cap_get_chan_by_scid(l, cid);
149 if (s)
150 bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
153 }
154
155 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 {
157 struct sock *s;
158 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
159 if (l2cap_pi(s)->ident == ident)
160 break;
161 }
162 return s;
163 }
164
165 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 {
167 struct sock *s;
168 read_lock(&l->lock);
169 s = __l2cap_get_chan_by_ident(l, ident);
170 if (s)
171 bh_lock_sock(s);
172 read_unlock(&l->lock);
173 return s;
174 }
175
176 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 {
178 u16 cid = L2CAP_CID_DYN_START;
179
180 for (; cid < L2CAP_CID_DYN_END; cid++) {
181 if (!__l2cap_get_chan_by_scid(l, cid))
182 return cid;
183 }
184
185 return 0;
186 }
187
188 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
189 {
190 sock_hold(sk);
191
192 if (l->head)
193 l2cap_pi(l->head)->prev_c = sk;
194
195 l2cap_pi(sk)->next_c = l->head;
196 l2cap_pi(sk)->prev_c = NULL;
197 l->head = sk;
198 }
199
200 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 {
202 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203
204 write_lock_bh(&l->lock);
205 if (sk == l->head)
206 l->head = next;
207
208 if (next)
209 l2cap_pi(next)->prev_c = prev;
210 if (prev)
211 l2cap_pi(prev)->next_c = next;
212 write_unlock_bh(&l->lock);
213
214 __sock_put(sk);
215 }
216
217 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 {
219 struct l2cap_chan_list *l = &conn->chan_list;
220
221 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
222 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223
224 conn->disc_reason = 0x13;
225
226 l2cap_pi(sk)->conn = conn;
227
228 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
229 /* Alloc CID for connection-oriented socket */
230 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
231 } else if (sk->sk_type == SOCK_DGRAM) {
232 /* Connectionless socket */
233 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
235 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 } else {
237 /* Raw socket can send/recv signalling messages only */
238 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
240 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
241 }
242
243 __l2cap_chan_link(l, sk);
244
245 if (parent)
246 bt_accept_enqueue(parent, sk);
247 }
248
249 /* Delete channel.
250 * Must be called on the locked socket. */
251 static void l2cap_chan_del(struct sock *sk, int err)
252 {
253 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
254 struct sock *parent = bt_sk(sk)->parent;
255
256 l2cap_sock_clear_timer(sk);
257
258 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
259
260 if (conn) {
261 /* Unlink from channel list */
262 l2cap_chan_unlink(&conn->chan_list, sk);
263 l2cap_pi(sk)->conn = NULL;
264 hci_conn_put(conn->hcon);
265 }
266
267 sk->sk_state = BT_CLOSED;
268 sock_set_flag(sk, SOCK_ZAPPED);
269
270 if (err)
271 sk->sk_err = err;
272
273 if (parent) {
274 bt_accept_unlink(sk);
275 parent->sk_data_ready(parent, 0);
276 } else
277 sk->sk_state_change(sk);
278
279 skb_queue_purge(TX_QUEUE(sk));
280
281 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
282 struct srej_list *l, *tmp;
283
284 del_timer(&l2cap_pi(sk)->retrans_timer);
285 del_timer(&l2cap_pi(sk)->monitor_timer);
286 del_timer(&l2cap_pi(sk)->ack_timer);
287
288 skb_queue_purge(SREJ_QUEUE(sk));
289 skb_queue_purge(BUSY_QUEUE(sk));
290
291 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
292 list_del(&l->list);
293 kfree(l);
294 }
295 }
296 }
297
298 /* Service level security */
299 static inline int l2cap_check_security(struct sock *sk)
300 {
301 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
302 __u8 auth_type;
303
304 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
305 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
306 auth_type = HCI_AT_NO_BONDING_MITM;
307 else
308 auth_type = HCI_AT_NO_BONDING;
309
310 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
311 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
312 } else {
313 switch (l2cap_pi(sk)->sec_level) {
314 case BT_SECURITY_HIGH:
315 auth_type = HCI_AT_GENERAL_BONDING_MITM;
316 break;
317 case BT_SECURITY_MEDIUM:
318 auth_type = HCI_AT_GENERAL_BONDING;
319 break;
320 default:
321 auth_type = HCI_AT_NO_BONDING;
322 break;
323 }
324 }
325
326 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 auth_type);
328 }
329
330 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 {
332 u8 id;
333
334 /* Get next available identificator.
335 * 1 - 128 are used by kernel.
336 * 129 - 199 are reserved.
337 * 200 - 254 are used by utilities like l2ping, etc.
338 */
339
340 spin_lock_bh(&conn->lock);
341
342 if (++conn->tx_ident > 128)
343 conn->tx_ident = 1;
344
345 id = conn->tx_ident;
346
347 spin_unlock_bh(&conn->lock);
348
349 return id;
350 }
351
352 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
353 {
354 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
355
356 BT_DBG("code 0x%2.2x", code);
357
358 if (!skb)
359 return;
360
361 hci_send_acl(conn->hcon, skb, 0);
362 }
363
364 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
365 {
366 struct sk_buff *skb;
367 struct l2cap_hdr *lh;
368 struct l2cap_conn *conn = pi->conn;
369 struct sock *sk = (struct sock *)pi;
370 int count, hlen = L2CAP_HDR_SIZE + 2;
371
372 if (sk->sk_state != BT_CONNECTED)
373 return;
374
375 if (pi->fcs == L2CAP_FCS_CRC16)
376 hlen += 2;
377
378 BT_DBG("pi %p, control 0x%2.2x", pi, control);
379
380 count = min_t(unsigned int, conn->mtu, hlen);
381 control |= L2CAP_CTRL_FRAME_TYPE;
382
383 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
384 control |= L2CAP_CTRL_FINAL;
385 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
386 }
387
388 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
389 control |= L2CAP_CTRL_POLL;
390 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
391 }
392
393 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 if (!skb)
395 return;
396
397 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
398 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
399 lh->cid = cpu_to_le16(pi->dcid);
400 put_unaligned_le16(control, skb_put(skb, 2));
401
402 if (pi->fcs == L2CAP_FCS_CRC16) {
403 u16 fcs = crc16(0, (u8 *)lh, count - 2);
404 put_unaligned_le16(fcs, skb_put(skb, 2));
405 }
406
407 hci_send_acl(pi->conn->hcon, skb, 0);
408 }
409
410 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
411 {
412 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
413 control |= L2CAP_SUPER_RCV_NOT_READY;
414 pi->conn_state |= L2CAP_CONN_RNR_SENT;
415 } else
416 control |= L2CAP_SUPER_RCV_READY;
417
418 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
419
420 l2cap_send_sframe(pi, control);
421 }
422
423 static inline int __l2cap_no_conn_pending(struct sock *sk)
424 {
425 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
426 }
427
428 static void l2cap_do_start(struct sock *sk)
429 {
430 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
431
432 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
433 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
434 return;
435
436 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
437 struct l2cap_conn_req req;
438 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
439 req.psm = l2cap_pi(sk)->psm;
440
441 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
442 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
443
444 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
445 L2CAP_CONN_REQ, sizeof(req), &req);
446 }
447 } else {
448 struct l2cap_info_req req;
449 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
450
451 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
452 conn->info_ident = l2cap_get_ident(conn);
453
454 mod_timer(&conn->info_timer, jiffies +
455 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
456
457 l2cap_send_cmd(conn, conn->info_ident,
458 L2CAP_INFO_REQ, sizeof(req), &req);
459 }
460 }
461
462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
463 {
464 u32 local_feat_mask = l2cap_feat_mask;
465 if (enable_ertm)
466 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
467
468 switch (mode) {
469 case L2CAP_MODE_ERTM:
470 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
471 case L2CAP_MODE_STREAMING:
472 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
473 default:
474 return 0x00;
475 }
476 }
477
478 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
479 {
480 struct l2cap_disconn_req req;
481
482 if (!conn)
483 return;
484
485 skb_queue_purge(TX_QUEUE(sk));
486
487 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
488 del_timer(&l2cap_pi(sk)->retrans_timer);
489 del_timer(&l2cap_pi(sk)->monitor_timer);
490 del_timer(&l2cap_pi(sk)->ack_timer);
491 }
492
493 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
494 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
495 l2cap_send_cmd(conn, l2cap_get_ident(conn),
496 L2CAP_DISCONN_REQ, sizeof(req), &req);
497
498 sk->sk_state = BT_DISCONN;
499 sk->sk_err = err;
500 }
501
502 /* ---- L2CAP connections ---- */
503 static void l2cap_conn_start(struct l2cap_conn *conn)
504 {
505 struct l2cap_chan_list *l = &conn->chan_list;
506 struct sock_del_list del, *tmp1, *tmp2;
507 struct sock *sk;
508
509 BT_DBG("conn %p", conn);
510
511 INIT_LIST_HEAD(&del.list);
512
513 read_lock(&l->lock);
514
515 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
516 bh_lock_sock(sk);
517
518 if (sk->sk_type != SOCK_SEQPACKET &&
519 sk->sk_type != SOCK_STREAM) {
520 bh_unlock_sock(sk);
521 continue;
522 }
523
524 if (sk->sk_state == BT_CONNECT) {
525 if (l2cap_check_security(sk) &&
526 __l2cap_no_conn_pending(sk)) {
527 struct l2cap_conn_req req;
528
529 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
530 conn->feat_mask)
531 && l2cap_pi(sk)->conf_state &
532 L2CAP_CONF_STATE2_DEVICE) {
533 tmp1 = kzalloc(sizeof(struct srej_list),
534 GFP_ATOMIC);
535 tmp1->sk = sk;
536 list_add_tail(&tmp1->list, &del.list);
537 bh_unlock_sock(sk);
538 continue;
539 }
540
541 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
542 req.psm = l2cap_pi(sk)->psm;
543
544 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
545 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
546
547 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
548 L2CAP_CONN_REQ, sizeof(req), &req);
549 }
550 } else if (sk->sk_state == BT_CONNECT2) {
551 struct l2cap_conn_rsp rsp;
552 char buf[128];
553 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
554 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
555
556 if (l2cap_check_security(sk)) {
557 if (bt_sk(sk)->defer_setup) {
558 struct sock *parent = bt_sk(sk)->parent;
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
561 parent->sk_data_ready(parent, 0);
562
563 } else {
564 sk->sk_state = BT_CONFIG;
565 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
566 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 }
568 } else {
569 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
570 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 }
572
573 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
574 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
575
576 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
577 rsp.result != L2CAP_CR_SUCCESS) {
578 bh_unlock_sock(sk);
579 continue;
580 }
581
582 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
583 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
584 l2cap_build_conf_req(sk, buf), buf);
585 l2cap_pi(sk)->num_conf_req++;
586 }
587
588 bh_unlock_sock(sk);
589 }
590
591 read_unlock(&l->lock);
592
593 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
594 bh_lock_sock(tmp1->sk);
595 __l2cap_sock_close(tmp1->sk, ECONNRESET);
596 bh_unlock_sock(tmp1->sk);
597 list_del(&tmp1->list);
598 kfree(tmp1);
599 }
600 }
601
602 static void l2cap_conn_ready(struct l2cap_conn *conn)
603 {
604 struct l2cap_chan_list *l = &conn->chan_list;
605 struct sock *sk;
606
607 BT_DBG("conn %p", conn);
608
609 read_lock(&l->lock);
610
611 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
612 bh_lock_sock(sk);
613
614 if (sk->sk_type != SOCK_SEQPACKET &&
615 sk->sk_type != SOCK_STREAM) {
616 l2cap_sock_clear_timer(sk);
617 sk->sk_state = BT_CONNECTED;
618 sk->sk_state_change(sk);
619 } else if (sk->sk_state == BT_CONNECT)
620 l2cap_do_start(sk);
621
622 bh_unlock_sock(sk);
623 }
624
625 read_unlock(&l->lock);
626 }
627
628 /* Notify sockets that we cannot guaranty reliability anymore */
629 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
630 {
631 struct l2cap_chan_list *l = &conn->chan_list;
632 struct sock *sk;
633
634 BT_DBG("conn %p", conn);
635
636 read_lock(&l->lock);
637
638 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
639 if (l2cap_pi(sk)->force_reliable)
640 sk->sk_err = err;
641 }
642
643 read_unlock(&l->lock);
644 }
645
646 static void l2cap_info_timeout(unsigned long arg)
647 {
648 struct l2cap_conn *conn = (void *) arg;
649
650 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
651 conn->info_ident = 0;
652
653 l2cap_conn_start(conn);
654 }
655
656 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
657 {
658 struct l2cap_conn *conn = hcon->l2cap_data;
659
660 if (conn || status)
661 return conn;
662
663 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
664 if (!conn)
665 return NULL;
666
667 hcon->l2cap_data = conn;
668 conn->hcon = hcon;
669
670 BT_DBG("hcon %p conn %p", hcon, conn);
671
672 conn->mtu = hcon->hdev->acl_mtu;
673 conn->src = &hcon->hdev->bdaddr;
674 conn->dst = &hcon->dst;
675
676 conn->feat_mask = 0;
677
678 spin_lock_init(&conn->lock);
679 rwlock_init(&conn->chan_list.lock);
680
681 setup_timer(&conn->info_timer, l2cap_info_timeout,
682 (unsigned long) conn);
683
684 conn->disc_reason = 0x13;
685
686 return conn;
687 }
688
689 static void l2cap_conn_del(struct hci_conn *hcon, int err)
690 {
691 struct l2cap_conn *conn = hcon->l2cap_data;
692 struct sock *sk;
693
694 if (!conn)
695 return;
696
697 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
698
699 kfree_skb(conn->rx_skb);
700
701 /* Kill channels */
702 while ((sk = conn->chan_list.head)) {
703 bh_lock_sock(sk);
704 l2cap_chan_del(sk, err);
705 bh_unlock_sock(sk);
706 l2cap_sock_kill(sk);
707 }
708
709 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
710 del_timer_sync(&conn->info_timer);
711
712 hcon->l2cap_data = NULL;
713 kfree(conn);
714 }
715
716 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
717 {
718 struct l2cap_chan_list *l = &conn->chan_list;
719 write_lock_bh(&l->lock);
720 __l2cap_chan_add(conn, sk, parent);
721 write_unlock_bh(&l->lock);
722 }
723
724 /* ---- Socket interface ---- */
725 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
726 {
727 struct sock *sk;
728 struct hlist_node *node;
729 sk_for_each(sk, node, &l2cap_sk_list.head)
730 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
731 goto found;
732 sk = NULL;
733 found:
734 return sk;
735 }
736
737 /* Find socket with psm and source bdaddr.
738 * Returns closest match.
739 */
740 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
741 {
742 struct sock *sk = NULL, *sk1 = NULL;
743 struct hlist_node *node;
744
745 sk_for_each(sk, node, &l2cap_sk_list.head) {
746 if (state && sk->sk_state != state)
747 continue;
748
749 if (l2cap_pi(sk)->psm == psm) {
750 /* Exact match. */
751 if (!bacmp(&bt_sk(sk)->src, src))
752 break;
753
754 /* Closest match */
755 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
756 sk1 = sk;
757 }
758 }
759 return node ? sk : sk1;
760 }
761
762 /* Find socket with given address (psm, src).
763 * Returns locked socket */
764 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
765 {
766 struct sock *s;
767 read_lock(&l2cap_sk_list.lock);
768 s = __l2cap_get_sock_by_psm(state, psm, src);
769 if (s)
770 bh_lock_sock(s);
771 read_unlock(&l2cap_sk_list.lock);
772 return s;
773 }
774
775 static void l2cap_sock_destruct(struct sock *sk)
776 {
777 BT_DBG("sk %p", sk);
778
779 skb_queue_purge(&sk->sk_receive_queue);
780 skb_queue_purge(&sk->sk_write_queue);
781 }
782
783 static void l2cap_sock_cleanup_listen(struct sock *parent)
784 {
785 struct sock *sk;
786
787 BT_DBG("parent %p", parent);
788
789 /* Close not yet accepted channels */
790 while ((sk = bt_accept_dequeue(parent, NULL)))
791 l2cap_sock_close(sk);
792
793 parent->sk_state = BT_CLOSED;
794 sock_set_flag(parent, SOCK_ZAPPED);
795 }
796
797 /* Kill socket (only if zapped and orphan)
798 * Must be called on unlocked socket.
799 */
800 static void l2cap_sock_kill(struct sock *sk)
801 {
802 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
803 return;
804
805 BT_DBG("sk %p state %d", sk, sk->sk_state);
806
807 /* Kill poor orphan */
808 bt_sock_unlink(&l2cap_sk_list, sk);
809 sock_set_flag(sk, SOCK_DEAD);
810 sock_put(sk);
811 }
812
813 static void __l2cap_sock_close(struct sock *sk, int reason)
814 {
815 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
816
817 switch (sk->sk_state) {
818 case BT_LISTEN:
819 l2cap_sock_cleanup_listen(sk);
820 break;
821
822 case BT_CONNECTED:
823 case BT_CONFIG:
824 if (sk->sk_type == SOCK_SEQPACKET ||
825 sk->sk_type == SOCK_STREAM) {
826 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
827
828 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
829 l2cap_send_disconn_req(conn, sk, reason);
830 } else
831 l2cap_chan_del(sk, reason);
832 break;
833
834 case BT_CONNECT2:
835 if (sk->sk_type == SOCK_SEQPACKET ||
836 sk->sk_type == SOCK_STREAM) {
837 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
838 struct l2cap_conn_rsp rsp;
839 __u16 result;
840
841 if (bt_sk(sk)->defer_setup)
842 result = L2CAP_CR_SEC_BLOCK;
843 else
844 result = L2CAP_CR_BAD_PSM;
845
846 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
847 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
848 rsp.result = cpu_to_le16(result);
849 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
850 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
851 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
852 } else
853 l2cap_chan_del(sk, reason);
854 break;
855
856 case BT_CONNECT:
857 case BT_DISCONN:
858 l2cap_chan_del(sk, reason);
859 break;
860
861 default:
862 sock_set_flag(sk, SOCK_ZAPPED);
863 break;
864 }
865 }
866
867 /* Must be called on unlocked socket. */
868 static void l2cap_sock_close(struct sock *sk)
869 {
870 l2cap_sock_clear_timer(sk);
871 lock_sock(sk);
872 __l2cap_sock_close(sk, ECONNRESET);
873 release_sock(sk);
874 l2cap_sock_kill(sk);
875 }
876
877 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
878 {
879 struct l2cap_pinfo *pi = l2cap_pi(sk);
880
881 BT_DBG("sk %p", sk);
882
883 if (parent) {
884 sk->sk_type = parent->sk_type;
885 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
886
887 pi->imtu = l2cap_pi(parent)->imtu;
888 pi->omtu = l2cap_pi(parent)->omtu;
889 pi->conf_state = l2cap_pi(parent)->conf_state;
890 pi->mode = l2cap_pi(parent)->mode;
891 pi->fcs = l2cap_pi(parent)->fcs;
892 pi->max_tx = l2cap_pi(parent)->max_tx;
893 pi->tx_win = l2cap_pi(parent)->tx_win;
894 pi->sec_level = l2cap_pi(parent)->sec_level;
895 pi->role_switch = l2cap_pi(parent)->role_switch;
896 pi->force_reliable = l2cap_pi(parent)->force_reliable;
897 } else {
898 pi->imtu = L2CAP_DEFAULT_MTU;
899 pi->omtu = 0;
900 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
901 pi->mode = L2CAP_MODE_ERTM;
902 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
903 } else {
904 pi->mode = L2CAP_MODE_BASIC;
905 }
906 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
907 pi->fcs = L2CAP_FCS_CRC16;
908 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
909 pi->sec_level = BT_SECURITY_LOW;
910 pi->role_switch = 0;
911 pi->force_reliable = 0;
912 }
913
914 /* Default config options */
915 pi->conf_len = 0;
916 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
917 skb_queue_head_init(TX_QUEUE(sk));
918 skb_queue_head_init(SREJ_QUEUE(sk));
919 skb_queue_head_init(BUSY_QUEUE(sk));
920 INIT_LIST_HEAD(SREJ_LIST(sk));
921 }
922
923 static struct proto l2cap_proto = {
924 .name = "L2CAP",
925 .owner = THIS_MODULE,
926 .obj_size = sizeof(struct l2cap_pinfo)
927 };
928
929 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
930 {
931 struct sock *sk;
932
933 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
934 if (!sk)
935 return NULL;
936
937 sock_init_data(sock, sk);
938 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
939
940 sk->sk_destruct = l2cap_sock_destruct;
941 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
942
943 sock_reset_flag(sk, SOCK_ZAPPED);
944
945 sk->sk_protocol = proto;
946 sk->sk_state = BT_OPEN;
947
948 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
949
950 bt_sock_link(&l2cap_sk_list, sk);
951 return sk;
952 }
953
954 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
955 int kern)
956 {
957 struct sock *sk;
958
959 BT_DBG("sock %p", sock);
960
961 sock->state = SS_UNCONNECTED;
962
963 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
964 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
965 return -ESOCKTNOSUPPORT;
966
967 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
968 return -EPERM;
969
970 sock->ops = &l2cap_sock_ops;
971
972 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
973 if (!sk)
974 return -ENOMEM;
975
976 l2cap_sock_init(sk, NULL);
977 return 0;
978 }
979
980 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
981 {
982 struct sock *sk = sock->sk;
983 struct sockaddr_l2 la;
984 int len, err = 0;
985
986 BT_DBG("sk %p", sk);
987
988 if (!addr || addr->sa_family != AF_BLUETOOTH)
989 return -EINVAL;
990
991 memset(&la, 0, sizeof(la));
992 len = min_t(unsigned int, sizeof(la), alen);
993 memcpy(&la, addr, len);
994
995 if (la.l2_cid)
996 return -EINVAL;
997
998 lock_sock(sk);
999
1000 if (sk->sk_state != BT_OPEN) {
1001 err = -EBADFD;
1002 goto done;
1003 }
1004
1005 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1006 !capable(CAP_NET_BIND_SERVICE)) {
1007 err = -EACCES;
1008 goto done;
1009 }
1010
1011 write_lock_bh(&l2cap_sk_list.lock);
1012
1013 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1014 err = -EADDRINUSE;
1015 } else {
1016 /* Save source address */
1017 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1018 l2cap_pi(sk)->psm = la.l2_psm;
1019 l2cap_pi(sk)->sport = la.l2_psm;
1020 sk->sk_state = BT_BOUND;
1021
1022 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1023 __le16_to_cpu(la.l2_psm) == 0x0003)
1024 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1025 }
1026
1027 write_unlock_bh(&l2cap_sk_list.lock);
1028
1029 done:
1030 release_sock(sk);
1031 return err;
1032 }
1033
1034 static int l2cap_do_connect(struct sock *sk)
1035 {
1036 bdaddr_t *src = &bt_sk(sk)->src;
1037 bdaddr_t *dst = &bt_sk(sk)->dst;
1038 struct l2cap_conn *conn;
1039 struct hci_conn *hcon;
1040 struct hci_dev *hdev;
1041 __u8 auth_type;
1042 int err;
1043
1044 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1045 l2cap_pi(sk)->psm);
1046
1047 hdev = hci_get_route(dst, src);
1048 if (!hdev)
1049 return -EHOSTUNREACH;
1050
1051 hci_dev_lock_bh(hdev);
1052
1053 err = -ENOMEM;
1054
1055 if (sk->sk_type == SOCK_RAW) {
1056 switch (l2cap_pi(sk)->sec_level) {
1057 case BT_SECURITY_HIGH:
1058 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1059 break;
1060 case BT_SECURITY_MEDIUM:
1061 auth_type = HCI_AT_DEDICATED_BONDING;
1062 break;
1063 default:
1064 auth_type = HCI_AT_NO_BONDING;
1065 break;
1066 }
1067 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1068 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1069 auth_type = HCI_AT_NO_BONDING_MITM;
1070 else
1071 auth_type = HCI_AT_NO_BONDING;
1072
1073 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1074 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1075 } else {
1076 switch (l2cap_pi(sk)->sec_level) {
1077 case BT_SECURITY_HIGH:
1078 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1079 break;
1080 case BT_SECURITY_MEDIUM:
1081 auth_type = HCI_AT_GENERAL_BONDING;
1082 break;
1083 default:
1084 auth_type = HCI_AT_NO_BONDING;
1085 break;
1086 }
1087 }
1088
1089 hcon = hci_connect(hdev, ACL_LINK, dst,
1090 l2cap_pi(sk)->sec_level, auth_type);
1091 if (!hcon)
1092 goto done;
1093
1094 conn = l2cap_conn_add(hcon, 0);
1095 if (!conn) {
1096 hci_conn_put(hcon);
1097 goto done;
1098 }
1099
1100 err = 0;
1101
1102 /* Update source addr of the socket */
1103 bacpy(src, conn->src);
1104
1105 l2cap_chan_add(conn, sk, NULL);
1106
1107 sk->sk_state = BT_CONNECT;
1108 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1109
1110 if (hcon->state == BT_CONNECTED) {
1111 if (sk->sk_type != SOCK_SEQPACKET &&
1112 sk->sk_type != SOCK_STREAM) {
1113 l2cap_sock_clear_timer(sk);
1114 sk->sk_state = BT_CONNECTED;
1115 } else
1116 l2cap_do_start(sk);
1117 }
1118
1119 done:
1120 hci_dev_unlock_bh(hdev);
1121 hci_dev_put(hdev);
1122 return err;
1123 }
1124
1125 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1126 {
1127 struct sock *sk = sock->sk;
1128 struct sockaddr_l2 la;
1129 int len, err = 0;
1130
1131 BT_DBG("sk %p", sk);
1132
1133 if (!addr || alen < sizeof(addr->sa_family) ||
1134 addr->sa_family != AF_BLUETOOTH)
1135 return -EINVAL;
1136
1137 memset(&la, 0, sizeof(la));
1138 len = min_t(unsigned int, sizeof(la), alen);
1139 memcpy(&la, addr, len);
1140
1141 if (la.l2_cid)
1142 return -EINVAL;
1143
1144 lock_sock(sk);
1145
1146 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1147 && !la.l2_psm) {
1148 err = -EINVAL;
1149 goto done;
1150 }
1151
1152 switch (l2cap_pi(sk)->mode) {
1153 case L2CAP_MODE_BASIC:
1154 break;
1155 case L2CAP_MODE_ERTM:
1156 case L2CAP_MODE_STREAMING:
1157 if (enable_ertm)
1158 break;
1159 /* fall through */
1160 default:
1161 err = -ENOTSUPP;
1162 goto done;
1163 }
1164
1165 switch (sk->sk_state) {
1166 case BT_CONNECT:
1167 case BT_CONNECT2:
1168 case BT_CONFIG:
1169 /* Already connecting */
1170 goto wait;
1171
1172 case BT_CONNECTED:
1173 /* Already connected */
1174 err = -EISCONN;
1175 goto done;
1176
1177 case BT_OPEN:
1178 case BT_BOUND:
1179 /* Can connect */
1180 break;
1181
1182 default:
1183 err = -EBADFD;
1184 goto done;
1185 }
1186
1187 /* Set destination address and psm */
1188 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1189 l2cap_pi(sk)->psm = la.l2_psm;
1190
1191 err = l2cap_do_connect(sk);
1192 if (err)
1193 goto done;
1194
1195 wait:
1196 err = bt_sock_wait_state(sk, BT_CONNECTED,
1197 sock_sndtimeo(sk, flags & O_NONBLOCK));
1198 done:
1199 release_sock(sk);
1200 return err;
1201 }
1202
1203 static int l2cap_sock_listen(struct socket *sock, int backlog)
1204 {
1205 struct sock *sk = sock->sk;
1206 int err = 0;
1207
1208 BT_DBG("sk %p backlog %d", sk, backlog);
1209
1210 lock_sock(sk);
1211
1212 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1213 || sk->sk_state != BT_BOUND) {
1214 err = -EBADFD;
1215 goto done;
1216 }
1217
1218 switch (l2cap_pi(sk)->mode) {
1219 case L2CAP_MODE_BASIC:
1220 break;
1221 case L2CAP_MODE_ERTM:
1222 case L2CAP_MODE_STREAMING:
1223 if (enable_ertm)
1224 break;
1225 /* fall through */
1226 default:
1227 err = -ENOTSUPP;
1228 goto done;
1229 }
1230
1231 if (!l2cap_pi(sk)->psm) {
1232 bdaddr_t *src = &bt_sk(sk)->src;
1233 u16 psm;
1234
1235 err = -EINVAL;
1236
1237 write_lock_bh(&l2cap_sk_list.lock);
1238
1239 for (psm = 0x1001; psm < 0x1100; psm += 2)
1240 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1241 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1242 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1243 err = 0;
1244 break;
1245 }
1246
1247 write_unlock_bh(&l2cap_sk_list.lock);
1248
1249 if (err < 0)
1250 goto done;
1251 }
1252
1253 sk->sk_max_ack_backlog = backlog;
1254 sk->sk_ack_backlog = 0;
1255 sk->sk_state = BT_LISTEN;
1256
1257 done:
1258 release_sock(sk);
1259 return err;
1260 }
1261
1262 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1263 {
1264 DECLARE_WAITQUEUE(wait, current);
1265 struct sock *sk = sock->sk, *nsk;
1266 long timeo;
1267 int err = 0;
1268
1269 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1270
1271 if (sk->sk_state != BT_LISTEN) {
1272 err = -EBADFD;
1273 goto done;
1274 }
1275
1276 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1277
1278 BT_DBG("sk %p timeo %ld", sk, timeo);
1279
1280 /* Wait for an incoming connection. (wake-one). */
1281 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1282 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1283 set_current_state(TASK_INTERRUPTIBLE);
1284 if (!timeo) {
1285 err = -EAGAIN;
1286 break;
1287 }
1288
1289 release_sock(sk);
1290 timeo = schedule_timeout(timeo);
1291 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1292
1293 if (sk->sk_state != BT_LISTEN) {
1294 err = -EBADFD;
1295 break;
1296 }
1297
1298 if (signal_pending(current)) {
1299 err = sock_intr_errno(timeo);
1300 break;
1301 }
1302 }
1303 set_current_state(TASK_RUNNING);
1304 remove_wait_queue(sk_sleep(sk), &wait);
1305
1306 if (err)
1307 goto done;
1308
1309 newsock->state = SS_CONNECTED;
1310
1311 BT_DBG("new socket %p", nsk);
1312
1313 done:
1314 release_sock(sk);
1315 return err;
1316 }
1317
1318 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1319 {
1320 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1321 struct sock *sk = sock->sk;
1322
1323 BT_DBG("sock %p, sk %p", sock, sk);
1324
1325 addr->sa_family = AF_BLUETOOTH;
1326 *len = sizeof(struct sockaddr_l2);
1327
1328 if (peer) {
1329 la->l2_psm = l2cap_pi(sk)->psm;
1330 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1331 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1332 } else {
1333 la->l2_psm = l2cap_pi(sk)->sport;
1334 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1335 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1336 }
1337
1338 return 0;
1339 }
1340
1341 static int __l2cap_wait_ack(struct sock *sk)
1342 {
1343 DECLARE_WAITQUEUE(wait, current);
1344 int err = 0;
1345 int timeo = HZ/5;
1346
1347 add_wait_queue(sk_sleep(sk), &wait);
1348 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1349 set_current_state(TASK_INTERRUPTIBLE);
1350
1351 if (!timeo)
1352 timeo = HZ/5;
1353
1354 if (signal_pending(current)) {
1355 err = sock_intr_errno(timeo);
1356 break;
1357 }
1358
1359 release_sock(sk);
1360 timeo = schedule_timeout(timeo);
1361 lock_sock(sk);
1362
1363 err = sock_error(sk);
1364 if (err)
1365 break;
1366 }
1367 set_current_state(TASK_RUNNING);
1368 remove_wait_queue(sk_sleep(sk), &wait);
1369 return err;
1370 }
1371
1372 static void l2cap_monitor_timeout(unsigned long arg)
1373 {
1374 struct sock *sk = (void *) arg;
1375
1376 BT_DBG("sk %p", sk);
1377
1378 bh_lock_sock(sk);
1379 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1380 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1381 bh_unlock_sock(sk);
1382 return;
1383 }
1384
1385 l2cap_pi(sk)->retry_count++;
1386 __mod_monitor_timer();
1387
1388 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1389 bh_unlock_sock(sk);
1390 }
1391
1392 static void l2cap_retrans_timeout(unsigned long arg)
1393 {
1394 struct sock *sk = (void *) arg;
1395
1396 BT_DBG("sk %p", sk);
1397
1398 bh_lock_sock(sk);
1399 l2cap_pi(sk)->retry_count = 1;
1400 __mod_monitor_timer();
1401
1402 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1403
1404 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1405 bh_unlock_sock(sk);
1406 }
1407
1408 static void l2cap_drop_acked_frames(struct sock *sk)
1409 {
1410 struct sk_buff *skb;
1411
1412 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1413 l2cap_pi(sk)->unacked_frames) {
1414 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1415 break;
1416
1417 skb = skb_dequeue(TX_QUEUE(sk));
1418 kfree_skb(skb);
1419
1420 l2cap_pi(sk)->unacked_frames--;
1421 }
1422
1423 if (!l2cap_pi(sk)->unacked_frames)
1424 del_timer(&l2cap_pi(sk)->retrans_timer);
1425 }
1426
1427 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1428 {
1429 struct l2cap_pinfo *pi = l2cap_pi(sk);
1430
1431 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1432
1433 hci_send_acl(pi->conn->hcon, skb, 0);
1434 }
1435
1436 static void l2cap_streaming_send(struct sock *sk)
1437 {
1438 struct sk_buff *skb, *tx_skb;
1439 struct l2cap_pinfo *pi = l2cap_pi(sk);
1440 u16 control, fcs;
1441
1442 while ((skb = sk->sk_send_head)) {
1443 tx_skb = skb_clone(skb, GFP_ATOMIC);
1444
1445 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1446 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1447 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1448
1449 if (pi->fcs == L2CAP_FCS_CRC16) {
1450 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1451 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1452 }
1453
1454 l2cap_do_send(sk, tx_skb);
1455
1456 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1457
1458 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1459 sk->sk_send_head = NULL;
1460 else
1461 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1462
1463 skb = skb_dequeue(TX_QUEUE(sk));
1464 kfree_skb(skb);
1465 }
1466 }
1467
1468 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1469 {
1470 struct l2cap_pinfo *pi = l2cap_pi(sk);
1471 struct sk_buff *skb, *tx_skb;
1472 u16 control, fcs;
1473
1474 skb = skb_peek(TX_QUEUE(sk));
1475 if (!skb)
1476 return;
1477
1478 do {
1479 if (bt_cb(skb)->tx_seq == tx_seq)
1480 break;
1481
1482 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1483 return;
1484
1485 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1486
1487 if (pi->remote_max_tx &&
1488 bt_cb(skb)->retries == pi->remote_max_tx) {
1489 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1490 return;
1491 }
1492
1493 tx_skb = skb_clone(skb, GFP_ATOMIC);
1494 bt_cb(skb)->retries++;
1495 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1496
1497 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1498 control |= L2CAP_CTRL_FINAL;
1499 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1500 }
1501
1502 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1503 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1504
1505 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1506
1507 if (pi->fcs == L2CAP_FCS_CRC16) {
1508 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1509 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1510 }
1511
1512 l2cap_do_send(sk, tx_skb);
1513 }
1514
1515 static int l2cap_ertm_send(struct sock *sk)
1516 {
1517 struct sk_buff *skb, *tx_skb;
1518 struct l2cap_pinfo *pi = l2cap_pi(sk);
1519 u16 control, fcs;
1520 int nsent = 0;
1521
1522 if (sk->sk_state != BT_CONNECTED)
1523 return -ENOTCONN;
1524
1525 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1526
1527 if (pi->remote_max_tx &&
1528 bt_cb(skb)->retries == pi->remote_max_tx) {
1529 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1530 break;
1531 }
1532
1533 tx_skb = skb_clone(skb, GFP_ATOMIC);
1534
1535 bt_cb(skb)->retries++;
1536
1537 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1538 control &= L2CAP_CTRL_SAR;
1539
1540 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1541 control |= L2CAP_CTRL_FINAL;
1542 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1543 }
1544 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1545 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1546 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1547
1548
1549 if (pi->fcs == L2CAP_FCS_CRC16) {
1550 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1551 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1552 }
1553
1554 l2cap_do_send(sk, tx_skb);
1555
1556 __mod_retrans_timer();
1557
1558 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1559 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1560
1561 pi->unacked_frames++;
1562 pi->frames_sent++;
1563
1564 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1565 sk->sk_send_head = NULL;
1566 else
1567 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1568
1569 nsent++;
1570 }
1571
1572 return nsent;
1573 }
1574
1575 static int l2cap_retransmit_frames(struct sock *sk)
1576 {
1577 struct l2cap_pinfo *pi = l2cap_pi(sk);
1578 int ret;
1579
1580 if (!skb_queue_empty(TX_QUEUE(sk)))
1581 sk->sk_send_head = TX_QUEUE(sk)->next;
1582
1583 pi->next_tx_seq = pi->expected_ack_seq;
1584 ret = l2cap_ertm_send(sk);
1585 return ret;
1586 }
1587
1588 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1589 {
1590 struct sock *sk = (struct sock *)pi;
1591 u16 control = 0;
1592
1593 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1594
1595 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1596 control |= L2CAP_SUPER_RCV_NOT_READY;
1597 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1598 l2cap_send_sframe(pi, control);
1599 return;
1600 }
1601
1602 if (l2cap_ertm_send(sk) > 0)
1603 return;
1604
1605 control |= L2CAP_SUPER_RCV_READY;
1606 l2cap_send_sframe(pi, control);
1607 }
1608
1609 static void l2cap_send_srejtail(struct sock *sk)
1610 {
1611 struct srej_list *tail;
1612 u16 control;
1613
1614 control = L2CAP_SUPER_SELECT_REJECT;
1615 control |= L2CAP_CTRL_FINAL;
1616
1617 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1618 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1619
1620 l2cap_send_sframe(l2cap_pi(sk), control);
1621 }
1622
1623 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1624 {
1625 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1626 struct sk_buff **frag;
1627 int err, sent = 0;
1628
1629 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1630 return -EFAULT;
1631
1632 sent += count;
1633 len -= count;
1634
1635 /* Continuation fragments (no L2CAP header) */
1636 frag = &skb_shinfo(skb)->frag_list;
1637 while (len) {
1638 count = min_t(unsigned int, conn->mtu, len);
1639
1640 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1641 if (!*frag)
1642 return -EFAULT;
1643 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1644 return -EFAULT;
1645
1646 sent += count;
1647 len -= count;
1648
1649 frag = &(*frag)->next;
1650 }
1651
1652 return sent;
1653 }
1654
1655 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1656 {
1657 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1658 struct sk_buff *skb;
1659 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1660 struct l2cap_hdr *lh;
1661
1662 BT_DBG("sk %p len %d", sk, (int)len);
1663
1664 count = min_t(unsigned int, (conn->mtu - hlen), len);
1665 skb = bt_skb_send_alloc(sk, count + hlen,
1666 msg->msg_flags & MSG_DONTWAIT, &err);
1667 if (!skb)
1668 return ERR_PTR(-ENOMEM);
1669
1670 /* Create L2CAP header */
1671 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1672 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1673 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1674 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1675
1676 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1677 if (unlikely(err < 0)) {
1678 kfree_skb(skb);
1679 return ERR_PTR(err);
1680 }
1681 return skb;
1682 }
1683
1684 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1685 {
1686 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1687 struct sk_buff *skb;
1688 int err, count, hlen = L2CAP_HDR_SIZE;
1689 struct l2cap_hdr *lh;
1690
1691 BT_DBG("sk %p len %d", sk, (int)len);
1692
1693 count = min_t(unsigned int, (conn->mtu - hlen), len);
1694 skb = bt_skb_send_alloc(sk, count + hlen,
1695 msg->msg_flags & MSG_DONTWAIT, &err);
1696 if (!skb)
1697 return ERR_PTR(-ENOMEM);
1698
1699 /* Create L2CAP header */
1700 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1701 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1702 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1703
1704 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1705 if (unlikely(err < 0)) {
1706 kfree_skb(skb);
1707 return ERR_PTR(err);
1708 }
1709 return skb;
1710 }
1711
1712 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1713 {
1714 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1715 struct sk_buff *skb;
1716 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1717 struct l2cap_hdr *lh;
1718
1719 BT_DBG("sk %p len %d", sk, (int)len);
1720
1721 if (!conn)
1722 return ERR_PTR(-ENOTCONN);
1723
1724 if (sdulen)
1725 hlen += 2;
1726
1727 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1728 hlen += 2;
1729
1730 count = min_t(unsigned int, (conn->mtu - hlen), len);
1731 skb = bt_skb_send_alloc(sk, count + hlen,
1732 msg->msg_flags & MSG_DONTWAIT, &err);
1733 if (!skb)
1734 return ERR_PTR(-ENOMEM);
1735
1736 /* Create L2CAP header */
1737 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1738 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1739 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1740 put_unaligned_le16(control, skb_put(skb, 2));
1741 if (sdulen)
1742 put_unaligned_le16(sdulen, skb_put(skb, 2));
1743
1744 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1745 if (unlikely(err < 0)) {
1746 kfree_skb(skb);
1747 return ERR_PTR(err);
1748 }
1749
1750 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1751 put_unaligned_le16(0, skb_put(skb, 2));
1752
1753 bt_cb(skb)->retries = 0;
1754 return skb;
1755 }
1756
1757 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1758 {
1759 struct l2cap_pinfo *pi = l2cap_pi(sk);
1760 struct sk_buff *skb;
1761 struct sk_buff_head sar_queue;
1762 u16 control;
1763 size_t size = 0;
1764
1765 skb_queue_head_init(&sar_queue);
1766 control = L2CAP_SDU_START;
1767 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1768 if (IS_ERR(skb))
1769 return PTR_ERR(skb);
1770
1771 __skb_queue_tail(&sar_queue, skb);
1772 len -= pi->remote_mps;
1773 size += pi->remote_mps;
1774
1775 while (len > 0) {
1776 size_t buflen;
1777
1778 if (len > pi->remote_mps) {
1779 control = L2CAP_SDU_CONTINUE;
1780 buflen = pi->remote_mps;
1781 } else {
1782 control = L2CAP_SDU_END;
1783 buflen = len;
1784 }
1785
1786 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1787 if (IS_ERR(skb)) {
1788 skb_queue_purge(&sar_queue);
1789 return PTR_ERR(skb);
1790 }
1791
1792 __skb_queue_tail(&sar_queue, skb);
1793 len -= buflen;
1794 size += buflen;
1795 }
1796 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1797 if (sk->sk_send_head == NULL)
1798 sk->sk_send_head = sar_queue.next;
1799
1800 return size;
1801 }
1802
1803 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1804 {
1805 struct sock *sk = sock->sk;
1806 struct l2cap_pinfo *pi = l2cap_pi(sk);
1807 struct sk_buff *skb;
1808 u16 control;
1809 int err;
1810
1811 BT_DBG("sock %p, sk %p", sock, sk);
1812
1813 err = sock_error(sk);
1814 if (err)
1815 return err;
1816
1817 if (msg->msg_flags & MSG_OOB)
1818 return -EOPNOTSUPP;
1819
1820 lock_sock(sk);
1821
1822 if (sk->sk_state != BT_CONNECTED) {
1823 err = -ENOTCONN;
1824 goto done;
1825 }
1826
1827 /* Connectionless channel */
1828 if (sk->sk_type == SOCK_DGRAM) {
1829 skb = l2cap_create_connless_pdu(sk, msg, len);
1830 if (IS_ERR(skb)) {
1831 err = PTR_ERR(skb);
1832 } else {
1833 l2cap_do_send(sk, skb);
1834 err = len;
1835 }
1836 goto done;
1837 }
1838
1839 switch (pi->mode) {
1840 case L2CAP_MODE_BASIC:
1841 /* Check outgoing MTU */
1842 if (len > pi->omtu) {
1843 err = -EMSGSIZE;
1844 goto done;
1845 }
1846
1847 /* Create a basic PDU */
1848 skb = l2cap_create_basic_pdu(sk, msg, len);
1849 if (IS_ERR(skb)) {
1850 err = PTR_ERR(skb);
1851 goto done;
1852 }
1853
1854 l2cap_do_send(sk, skb);
1855 err = len;
1856 break;
1857
1858 case L2CAP_MODE_ERTM:
1859 case L2CAP_MODE_STREAMING:
1860 /* Entire SDU fits into one PDU */
1861 if (len <= pi->remote_mps) {
1862 control = L2CAP_SDU_UNSEGMENTED;
1863 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1864 if (IS_ERR(skb)) {
1865 err = PTR_ERR(skb);
1866 goto done;
1867 }
1868 __skb_queue_tail(TX_QUEUE(sk), skb);
1869
1870 if (sk->sk_send_head == NULL)
1871 sk->sk_send_head = skb;
1872
1873 } else {
1874 /* Segment SDU into multiples PDUs */
1875 err = l2cap_sar_segment_sdu(sk, msg, len);
1876 if (err < 0)
1877 goto done;
1878 }
1879
1880 if (pi->mode == L2CAP_MODE_STREAMING) {
1881 l2cap_streaming_send(sk);
1882 } else {
1883 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1884 pi->conn_state && L2CAP_CONN_WAIT_F) {
1885 err = len;
1886 break;
1887 }
1888 err = l2cap_ertm_send(sk);
1889 }
1890
1891 if (err >= 0)
1892 err = len;
1893 break;
1894
1895 default:
1896 BT_DBG("bad state %1.1x", pi->mode);
1897 err = -EBADFD;
1898 }
1899
1900 done:
1901 release_sock(sk);
1902 return err;
1903 }
1904
1905 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1906 {
1907 struct sock *sk = sock->sk;
1908
1909 lock_sock(sk);
1910
1911 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1912 struct l2cap_conn_rsp rsp;
1913 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1914 u8 buf[128];
1915
1916 sk->sk_state = BT_CONFIG;
1917
1918 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1919 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1920 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1921 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1922 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1923 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1924
1925 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1926 release_sock(sk);
1927 return 0;
1928 }
1929
1930 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1931 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1932 l2cap_build_conf_req(sk, buf), buf);
1933 l2cap_pi(sk)->num_conf_req++;
1934
1935 release_sock(sk);
1936 return 0;
1937 }
1938
1939 release_sock(sk);
1940
1941 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1942 }
1943
1944 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1945 {
1946 struct sock *sk = sock->sk;
1947 struct l2cap_options opts;
1948 int len, err = 0;
1949 u32 opt;
1950
1951 BT_DBG("sk %p", sk);
1952
1953 lock_sock(sk);
1954
1955 switch (optname) {
1956 case L2CAP_OPTIONS:
1957 opts.imtu = l2cap_pi(sk)->imtu;
1958 opts.omtu = l2cap_pi(sk)->omtu;
1959 opts.flush_to = l2cap_pi(sk)->flush_to;
1960 opts.mode = l2cap_pi(sk)->mode;
1961 opts.fcs = l2cap_pi(sk)->fcs;
1962 opts.max_tx = l2cap_pi(sk)->max_tx;
1963 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1964
1965 len = min_t(unsigned int, sizeof(opts), optlen);
1966 if (copy_from_user((char *) &opts, optval, len)) {
1967 err = -EFAULT;
1968 break;
1969 }
1970
1971 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1972 err = -EINVAL;
1973 break;
1974 }
1975
1976 l2cap_pi(sk)->mode = opts.mode;
1977 switch (l2cap_pi(sk)->mode) {
1978 case L2CAP_MODE_BASIC:
1979 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1980 break;
1981 case L2CAP_MODE_ERTM:
1982 case L2CAP_MODE_STREAMING:
1983 if (enable_ertm)
1984 break;
1985 /* fall through */
1986 default:
1987 err = -EINVAL;
1988 break;
1989 }
1990
1991 l2cap_pi(sk)->imtu = opts.imtu;
1992 l2cap_pi(sk)->omtu = opts.omtu;
1993 l2cap_pi(sk)->fcs = opts.fcs;
1994 l2cap_pi(sk)->max_tx = opts.max_tx;
1995 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1996 break;
1997
1998 case L2CAP_LM:
1999 if (get_user(opt, (u32 __user *) optval)) {
2000 err = -EFAULT;
2001 break;
2002 }
2003
2004 if (opt & L2CAP_LM_AUTH)
2005 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2006 if (opt & L2CAP_LM_ENCRYPT)
2007 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2008 if (opt & L2CAP_LM_SECURE)
2009 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2010
2011 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2012 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2013 break;
2014
2015 default:
2016 err = -ENOPROTOOPT;
2017 break;
2018 }
2019
2020 release_sock(sk);
2021 return err;
2022 }
2023
2024 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2025 {
2026 struct sock *sk = sock->sk;
2027 struct bt_security sec;
2028 int len, err = 0;
2029 u32 opt;
2030
2031 BT_DBG("sk %p", sk);
2032
2033 if (level == SOL_L2CAP)
2034 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2035
2036 if (level != SOL_BLUETOOTH)
2037 return -ENOPROTOOPT;
2038
2039 lock_sock(sk);
2040
2041 switch (optname) {
2042 case BT_SECURITY:
2043 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2044 && sk->sk_type != SOCK_RAW) {
2045 err = -EINVAL;
2046 break;
2047 }
2048
2049 sec.level = BT_SECURITY_LOW;
2050
2051 len = min_t(unsigned int, sizeof(sec), optlen);
2052 if (copy_from_user((char *) &sec, optval, len)) {
2053 err = -EFAULT;
2054 break;
2055 }
2056
2057 if (sec.level < BT_SECURITY_LOW ||
2058 sec.level > BT_SECURITY_HIGH) {
2059 err = -EINVAL;
2060 break;
2061 }
2062
2063 l2cap_pi(sk)->sec_level = sec.level;
2064 break;
2065
2066 case BT_DEFER_SETUP:
2067 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2068 err = -EINVAL;
2069 break;
2070 }
2071
2072 if (get_user(opt, (u32 __user *) optval)) {
2073 err = -EFAULT;
2074 break;
2075 }
2076
2077 bt_sk(sk)->defer_setup = opt;
2078 break;
2079
2080 default:
2081 err = -ENOPROTOOPT;
2082 break;
2083 }
2084
2085 release_sock(sk);
2086 return err;
2087 }
2088
2089 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2090 {
2091 struct sock *sk = sock->sk;
2092 struct l2cap_options opts;
2093 struct l2cap_conninfo cinfo;
2094 int len, err = 0;
2095 u32 opt;
2096
2097 BT_DBG("sk %p", sk);
2098
2099 if (get_user(len, optlen))
2100 return -EFAULT;
2101
2102 lock_sock(sk);
2103
2104 switch (optname) {
2105 case L2CAP_OPTIONS:
2106 opts.imtu = l2cap_pi(sk)->imtu;
2107 opts.omtu = l2cap_pi(sk)->omtu;
2108 opts.flush_to = l2cap_pi(sk)->flush_to;
2109 opts.mode = l2cap_pi(sk)->mode;
2110 opts.fcs = l2cap_pi(sk)->fcs;
2111 opts.max_tx = l2cap_pi(sk)->max_tx;
2112 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2113
2114 len = min_t(unsigned int, len, sizeof(opts));
2115 if (copy_to_user(optval, (char *) &opts, len))
2116 err = -EFAULT;
2117
2118 break;
2119
2120 case L2CAP_LM:
2121 switch (l2cap_pi(sk)->sec_level) {
2122 case BT_SECURITY_LOW:
2123 opt = L2CAP_LM_AUTH;
2124 break;
2125 case BT_SECURITY_MEDIUM:
2126 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2127 break;
2128 case BT_SECURITY_HIGH:
2129 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2130 L2CAP_LM_SECURE;
2131 break;
2132 default:
2133 opt = 0;
2134 break;
2135 }
2136
2137 if (l2cap_pi(sk)->role_switch)
2138 opt |= L2CAP_LM_MASTER;
2139
2140 if (l2cap_pi(sk)->force_reliable)
2141 opt |= L2CAP_LM_RELIABLE;
2142
2143 if (put_user(opt, (u32 __user *) optval))
2144 err = -EFAULT;
2145 break;
2146
2147 case L2CAP_CONNINFO:
2148 if (sk->sk_state != BT_CONNECTED &&
2149 !(sk->sk_state == BT_CONNECT2 &&
2150 bt_sk(sk)->defer_setup)) {
2151 err = -ENOTCONN;
2152 break;
2153 }
2154
2155 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2156 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2157
2158 len = min_t(unsigned int, len, sizeof(cinfo));
2159 if (copy_to_user(optval, (char *) &cinfo, len))
2160 err = -EFAULT;
2161
2162 break;
2163
2164 default:
2165 err = -ENOPROTOOPT;
2166 break;
2167 }
2168
2169 release_sock(sk);
2170 return err;
2171 }
2172
2173 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2174 {
2175 struct sock *sk = sock->sk;
2176 struct bt_security sec;
2177 int len, err = 0;
2178
2179 BT_DBG("sk %p", sk);
2180
2181 if (level == SOL_L2CAP)
2182 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2183
2184 if (level != SOL_BLUETOOTH)
2185 return -ENOPROTOOPT;
2186
2187 if (get_user(len, optlen))
2188 return -EFAULT;
2189
2190 lock_sock(sk);
2191
2192 switch (optname) {
2193 case BT_SECURITY:
2194 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2195 && sk->sk_type != SOCK_RAW) {
2196 err = -EINVAL;
2197 break;
2198 }
2199
2200 sec.level = l2cap_pi(sk)->sec_level;
2201
2202 len = min_t(unsigned int, len, sizeof(sec));
2203 if (copy_to_user(optval, (char *) &sec, len))
2204 err = -EFAULT;
2205
2206 break;
2207
2208 case BT_DEFER_SETUP:
2209 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2210 err = -EINVAL;
2211 break;
2212 }
2213
2214 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2215 err = -EFAULT;
2216
2217 break;
2218
2219 default:
2220 err = -ENOPROTOOPT;
2221 break;
2222 }
2223
2224 release_sock(sk);
2225 return err;
2226 }
2227
2228 static int l2cap_sock_shutdown(struct socket *sock, int how)
2229 {
2230 struct sock *sk = sock->sk;
2231 int err = 0;
2232
2233 BT_DBG("sock %p, sk %p", sock, sk);
2234
2235 if (!sk)
2236 return 0;
2237
2238 lock_sock(sk);
2239 if (!sk->sk_shutdown) {
2240 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2241 err = __l2cap_wait_ack(sk);
2242
2243 sk->sk_shutdown = SHUTDOWN_MASK;
2244 l2cap_sock_clear_timer(sk);
2245 __l2cap_sock_close(sk, 0);
2246
2247 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2248 err = bt_sock_wait_state(sk, BT_CLOSED,
2249 sk->sk_lingertime);
2250 }
2251
2252 if (!err && sk->sk_err)
2253 err = -sk->sk_err;
2254
2255 release_sock(sk);
2256 return err;
2257 }
2258
2259 static int l2cap_sock_release(struct socket *sock)
2260 {
2261 struct sock *sk = sock->sk;
2262 int err;
2263
2264 BT_DBG("sock %p, sk %p", sock, sk);
2265
2266 if (!sk)
2267 return 0;
2268
2269 err = l2cap_sock_shutdown(sock, 2);
2270
2271 sock_orphan(sk);
2272 l2cap_sock_kill(sk);
2273 return err;
2274 }
2275
2276 static void l2cap_chan_ready(struct sock *sk)
2277 {
2278 struct sock *parent = bt_sk(sk)->parent;
2279
2280 BT_DBG("sk %p, parent %p", sk, parent);
2281
2282 l2cap_pi(sk)->conf_state = 0;
2283 l2cap_sock_clear_timer(sk);
2284
2285 if (!parent) {
2286 /* Outgoing channel.
2287 * Wake up socket sleeping on connect.
2288 */
2289 sk->sk_state = BT_CONNECTED;
2290 sk->sk_state_change(sk);
2291 } else {
2292 /* Incoming channel.
2293 * Wake up socket sleeping on accept.
2294 */
2295 parent->sk_data_ready(parent, 0);
2296 }
2297 }
2298
2299 /* Copy frame to all raw sockets on that connection */
2300 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2301 {
2302 struct l2cap_chan_list *l = &conn->chan_list;
2303 struct sk_buff *nskb;
2304 struct sock *sk;
2305
2306 BT_DBG("conn %p", conn);
2307
2308 read_lock(&l->lock);
2309 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2310 if (sk->sk_type != SOCK_RAW)
2311 continue;
2312
2313 /* Don't send frame to the socket it came from */
2314 if (skb->sk == sk)
2315 continue;
2316 nskb = skb_clone(skb, GFP_ATOMIC);
2317 if (!nskb)
2318 continue;
2319
2320 if (sock_queue_rcv_skb(sk, nskb))
2321 kfree_skb(nskb);
2322 }
2323 read_unlock(&l->lock);
2324 }
2325
2326 /* ---- L2CAP signalling commands ---- */
2327 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2328 u8 code, u8 ident, u16 dlen, void *data)
2329 {
2330 struct sk_buff *skb, **frag;
2331 struct l2cap_cmd_hdr *cmd;
2332 struct l2cap_hdr *lh;
2333 int len, count;
2334
2335 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2336 conn, code, ident, dlen);
2337
2338 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2339 count = min_t(unsigned int, conn->mtu, len);
2340
2341 skb = bt_skb_alloc(count, GFP_ATOMIC);
2342 if (!skb)
2343 return NULL;
2344
2345 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2346 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2347 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2348
2349 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2350 cmd->code = code;
2351 cmd->ident = ident;
2352 cmd->len = cpu_to_le16(dlen);
2353
2354 if (dlen) {
2355 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2356 memcpy(skb_put(skb, count), data, count);
2357 data += count;
2358 }
2359
2360 len -= skb->len;
2361
2362 /* Continuation fragments (no L2CAP header) */
2363 frag = &skb_shinfo(skb)->frag_list;
2364 while (len) {
2365 count = min_t(unsigned int, conn->mtu, len);
2366
2367 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2368 if (!*frag)
2369 goto fail;
2370
2371 memcpy(skb_put(*frag, count), data, count);
2372
2373 len -= count;
2374 data += count;
2375
2376 frag = &(*frag)->next;
2377 }
2378
2379 return skb;
2380
2381 fail:
2382 kfree_skb(skb);
2383 return NULL;
2384 }
2385
2386 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2387 {
2388 struct l2cap_conf_opt *opt = *ptr;
2389 int len;
2390
2391 len = L2CAP_CONF_OPT_SIZE + opt->len;
2392 *ptr += len;
2393
2394 *type = opt->type;
2395 *olen = opt->len;
2396
2397 switch (opt->len) {
2398 case 1:
2399 *val = *((u8 *) opt->val);
2400 break;
2401
2402 case 2:
2403 *val = __le16_to_cpu(*((__le16 *) opt->val));
2404 break;
2405
2406 case 4:
2407 *val = __le32_to_cpu(*((__le32 *) opt->val));
2408 break;
2409
2410 default:
2411 *val = (unsigned long) opt->val;
2412 break;
2413 }
2414
2415 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2416 return len;
2417 }
2418
2419 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2420 {
2421 struct l2cap_conf_opt *opt = *ptr;
2422
2423 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2424
2425 opt->type = type;
2426 opt->len = len;
2427
2428 switch (len) {
2429 case 1:
2430 *((u8 *) opt->val) = val;
2431 break;
2432
2433 case 2:
2434 *((__le16 *) opt->val) = cpu_to_le16(val);
2435 break;
2436
2437 case 4:
2438 *((__le32 *) opt->val) = cpu_to_le32(val);
2439 break;
2440
2441 default:
2442 memcpy(opt->val, (void *) val, len);
2443 break;
2444 }
2445
2446 *ptr += L2CAP_CONF_OPT_SIZE + len;
2447 }
2448
2449 static void l2cap_ack_timeout(unsigned long arg)
2450 {
2451 struct sock *sk = (void *) arg;
2452
2453 bh_lock_sock(sk);
2454 l2cap_send_ack(l2cap_pi(sk));
2455 bh_unlock_sock(sk);
2456 }
2457
2458 static inline void l2cap_ertm_init(struct sock *sk)
2459 {
2460 l2cap_pi(sk)->expected_ack_seq = 0;
2461 l2cap_pi(sk)->unacked_frames = 0;
2462 l2cap_pi(sk)->buffer_seq = 0;
2463 l2cap_pi(sk)->num_acked = 0;
2464 l2cap_pi(sk)->frames_sent = 0;
2465
2466 setup_timer(&l2cap_pi(sk)->retrans_timer,
2467 l2cap_retrans_timeout, (unsigned long) sk);
2468 setup_timer(&l2cap_pi(sk)->monitor_timer,
2469 l2cap_monitor_timeout, (unsigned long) sk);
2470 setup_timer(&l2cap_pi(sk)->ack_timer,
2471 l2cap_ack_timeout, (unsigned long) sk);
2472
2473 __skb_queue_head_init(SREJ_QUEUE(sk));
2474 __skb_queue_head_init(BUSY_QUEUE(sk));
2475
2476 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2477
2478 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2479 }
2480
2481 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2482 {
2483 switch (mode) {
2484 case L2CAP_MODE_STREAMING:
2485 case L2CAP_MODE_ERTM:
2486 if (l2cap_mode_supported(mode, remote_feat_mask))
2487 return mode;
2488 /* fall through */
2489 default:
2490 return L2CAP_MODE_BASIC;
2491 }
2492 }
2493
2494 static int l2cap_build_conf_req(struct sock *sk, void *data)
2495 {
2496 struct l2cap_pinfo *pi = l2cap_pi(sk);
2497 struct l2cap_conf_req *req = data;
2498 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2499 void *ptr = req->data;
2500
2501 BT_DBG("sk %p", sk);
2502
2503 if (pi->num_conf_req || pi->num_conf_rsp)
2504 goto done;
2505
2506 switch (pi->mode) {
2507 case L2CAP_MODE_STREAMING:
2508 case L2CAP_MODE_ERTM:
2509 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2510 break;
2511
2512 /* fall through */
2513 default:
2514 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2515 break;
2516 }
2517
2518 done:
2519 switch (pi->mode) {
2520 case L2CAP_MODE_BASIC:
2521 if (pi->imtu != L2CAP_DEFAULT_MTU)
2522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2523
2524 rfc.mode = L2CAP_MODE_BASIC;
2525 rfc.txwin_size = 0;
2526 rfc.max_transmit = 0;
2527 rfc.retrans_timeout = 0;
2528 rfc.monitor_timeout = 0;
2529 rfc.max_pdu_size = 0;
2530
2531 break;
2532
2533 case L2CAP_MODE_ERTM:
2534 rfc.mode = L2CAP_MODE_ERTM;
2535 rfc.txwin_size = pi->tx_win;
2536 rfc.max_transmit = pi->max_tx;
2537 rfc.retrans_timeout = 0;
2538 rfc.monitor_timeout = 0;
2539 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2540 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2541 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2542
2543 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2544 break;
2545
2546 if (pi->fcs == L2CAP_FCS_NONE ||
2547 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2548 pi->fcs = L2CAP_FCS_NONE;
2549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2550 }
2551 break;
2552
2553 case L2CAP_MODE_STREAMING:
2554 rfc.mode = L2CAP_MODE_STREAMING;
2555 rfc.txwin_size = 0;
2556 rfc.max_transmit = 0;
2557 rfc.retrans_timeout = 0;
2558 rfc.monitor_timeout = 0;
2559 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2560 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2561 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2562
2563 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2564 break;
2565
2566 if (pi->fcs == L2CAP_FCS_NONE ||
2567 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2568 pi->fcs = L2CAP_FCS_NONE;
2569 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2570 }
2571 break;
2572 }
2573
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2575 (unsigned long) &rfc);
2576
2577 /* FIXME: Need actual value of the flush timeout */
2578 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2579 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2580
2581 req->dcid = cpu_to_le16(pi->dcid);
2582 req->flags = cpu_to_le16(0);
2583
2584 return ptr - data;
2585 }
2586
2587 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2588 {
2589 struct l2cap_pinfo *pi = l2cap_pi(sk);
2590 struct l2cap_conf_rsp *rsp = data;
2591 void *ptr = rsp->data;
2592 void *req = pi->conf_req;
2593 int len = pi->conf_len;
2594 int type, hint, olen;
2595 unsigned long val;
2596 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2597 u16 mtu = L2CAP_DEFAULT_MTU;
2598 u16 result = L2CAP_CONF_SUCCESS;
2599
2600 BT_DBG("sk %p", sk);
2601
2602 while (len >= L2CAP_CONF_OPT_SIZE) {
2603 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2604
2605 hint = type & L2CAP_CONF_HINT;
2606 type &= L2CAP_CONF_MASK;
2607
2608 switch (type) {
2609 case L2CAP_CONF_MTU:
2610 mtu = val;
2611 break;
2612
2613 case L2CAP_CONF_FLUSH_TO:
2614 pi->flush_to = val;
2615 break;
2616
2617 case L2CAP_CONF_QOS:
2618 break;
2619
2620 case L2CAP_CONF_RFC:
2621 if (olen == sizeof(rfc))
2622 memcpy(&rfc, (void *) val, olen);
2623 break;
2624
2625 case L2CAP_CONF_FCS:
2626 if (val == L2CAP_FCS_NONE)
2627 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2628
2629 break;
2630
2631 default:
2632 if (hint)
2633 break;
2634
2635 result = L2CAP_CONF_UNKNOWN;
2636 *((u8 *) ptr++) = type;
2637 break;
2638 }
2639 }
2640
2641 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2642 goto done;
2643
2644 switch (pi->mode) {
2645 case L2CAP_MODE_STREAMING:
2646 case L2CAP_MODE_ERTM:
2647 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2648 pi->mode = l2cap_select_mode(rfc.mode,
2649 pi->conn->feat_mask);
2650 break;
2651 }
2652
2653 if (pi->mode != rfc.mode)
2654 return -ECONNREFUSED;
2655
2656 break;
2657 }
2658
2659 done:
2660 if (pi->mode != rfc.mode) {
2661 result = L2CAP_CONF_UNACCEPT;
2662 rfc.mode = pi->mode;
2663
2664 if (pi->num_conf_rsp == 1)
2665 return -ECONNREFUSED;
2666
2667 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2668 sizeof(rfc), (unsigned long) &rfc);
2669 }
2670
2671
2672 if (result == L2CAP_CONF_SUCCESS) {
2673 /* Configure output options and let the other side know
2674 * which ones we don't like. */
2675
2676 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2677 result = L2CAP_CONF_UNACCEPT;
2678 else {
2679 pi->omtu = mtu;
2680 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2681 }
2682 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2683
2684 switch (rfc.mode) {
2685 case L2CAP_MODE_BASIC:
2686 pi->fcs = L2CAP_FCS_NONE;
2687 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2688 break;
2689
2690 case L2CAP_MODE_ERTM:
2691 pi->remote_tx_win = rfc.txwin_size;
2692 pi->remote_max_tx = rfc.max_transmit;
2693 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2694 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2695
2696 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2697
2698 rfc.retrans_timeout =
2699 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2700 rfc.monitor_timeout =
2701 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2702
2703 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2704
2705 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2706 sizeof(rfc), (unsigned long) &rfc);
2707
2708 break;
2709
2710 case L2CAP_MODE_STREAMING:
2711 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2712 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2713
2714 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2715
2716 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2717
2718 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2719 sizeof(rfc), (unsigned long) &rfc);
2720
2721 break;
2722
2723 default:
2724 result = L2CAP_CONF_UNACCEPT;
2725
2726 memset(&rfc, 0, sizeof(rfc));
2727 rfc.mode = pi->mode;
2728 }
2729
2730 if (result == L2CAP_CONF_SUCCESS)
2731 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2732 }
2733 rsp->scid = cpu_to_le16(pi->dcid);
2734 rsp->result = cpu_to_le16(result);
2735 rsp->flags = cpu_to_le16(0x0000);
2736
2737 return ptr - data;
2738 }
2739
2740 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2741 {
2742 struct l2cap_pinfo *pi = l2cap_pi(sk);
2743 struct l2cap_conf_req *req = data;
2744 void *ptr = req->data;
2745 int type, olen;
2746 unsigned long val;
2747 struct l2cap_conf_rfc rfc;
2748
2749 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2750
2751 while (len >= L2CAP_CONF_OPT_SIZE) {
2752 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2753
2754 switch (type) {
2755 case L2CAP_CONF_MTU:
2756 if (val < L2CAP_DEFAULT_MIN_MTU) {
2757 *result = L2CAP_CONF_UNACCEPT;
2758 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2759 } else
2760 pi->omtu = val;
2761 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2762 break;
2763
2764 case L2CAP_CONF_FLUSH_TO:
2765 pi->flush_to = val;
2766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2767 2, pi->flush_to);
2768 break;
2769
2770 case L2CAP_CONF_RFC:
2771 if (olen == sizeof(rfc))
2772 memcpy(&rfc, (void *)val, olen);
2773
2774 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2775 rfc.mode != pi->mode)
2776 return -ECONNREFUSED;
2777
2778 pi->fcs = 0;
2779
2780 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2781 sizeof(rfc), (unsigned long) &rfc);
2782 break;
2783 }
2784 }
2785
2786 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2787 return -ECONNREFUSED;
2788
2789 pi->mode = rfc.mode;
2790
2791 if (*result == L2CAP_CONF_SUCCESS) {
2792 switch (rfc.mode) {
2793 case L2CAP_MODE_ERTM:
2794 pi->remote_tx_win = rfc.txwin_size;
2795 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2796 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2797 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2798 break;
2799 case L2CAP_MODE_STREAMING:
2800 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2801 }
2802 }
2803
2804 req->dcid = cpu_to_le16(pi->dcid);
2805 req->flags = cpu_to_le16(0x0000);
2806
2807 return ptr - data;
2808 }
2809
2810 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2811 {
2812 struct l2cap_conf_rsp *rsp = data;
2813 void *ptr = rsp->data;
2814
2815 BT_DBG("sk %p", sk);
2816
2817 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2818 rsp->result = cpu_to_le16(result);
2819 rsp->flags = cpu_to_le16(flags);
2820
2821 return ptr - data;
2822 }
2823
2824 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2825 {
2826 struct l2cap_pinfo *pi = l2cap_pi(sk);
2827 int type, olen;
2828 unsigned long val;
2829 struct l2cap_conf_rfc rfc;
2830
2831 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2832
2833 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2834 return;
2835
2836 while (len >= L2CAP_CONF_OPT_SIZE) {
2837 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2838
2839 switch (type) {
2840 case L2CAP_CONF_RFC:
2841 if (olen == sizeof(rfc))
2842 memcpy(&rfc, (void *)val, olen);
2843 goto done;
2844 }
2845 }
2846
2847 done:
2848 switch (rfc.mode) {
2849 case L2CAP_MODE_ERTM:
2850 pi->remote_tx_win = rfc.txwin_size;
2851 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2852 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2853 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2854 break;
2855 case L2CAP_MODE_STREAMING:
2856 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2857 }
2858 }
2859
2860 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2861 {
2862 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2863
2864 if (rej->reason != 0x0000)
2865 return 0;
2866
2867 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2868 cmd->ident == conn->info_ident) {
2869 del_timer(&conn->info_timer);
2870
2871 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2872 conn->info_ident = 0;
2873
2874 l2cap_conn_start(conn);
2875 }
2876
2877 return 0;
2878 }
2879
2880 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2881 {
2882 struct l2cap_chan_list *list = &conn->chan_list;
2883 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2884 struct l2cap_conn_rsp rsp;
2885 struct sock *parent, *uninitialized_var(sk);
2886 int result, status = L2CAP_CS_NO_INFO;
2887
2888 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2889 __le16 psm = req->psm;
2890
2891 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2892
2893 /* Check if we have socket listening on psm */
2894 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2895 if (!parent) {
2896 result = L2CAP_CR_BAD_PSM;
2897 goto sendresp;
2898 }
2899
2900 /* Check if the ACL is secure enough (if not SDP) */
2901 if (psm != cpu_to_le16(0x0001) &&
2902 !hci_conn_check_link_mode(conn->hcon)) {
2903 conn->disc_reason = 0x05;
2904 result = L2CAP_CR_SEC_BLOCK;
2905 goto response;
2906 }
2907
2908 result = L2CAP_CR_NO_MEM;
2909
2910 /* Check for backlog size */
2911 if (sk_acceptq_is_full(parent)) {
2912 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2913 goto response;
2914 }
2915
2916 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2917 if (!sk)
2918 goto response;
2919
2920 write_lock_bh(&list->lock);
2921
2922 /* Check if we already have channel with that dcid */
2923 if (__l2cap_get_chan_by_dcid(list, scid)) {
2924 write_unlock_bh(&list->lock);
2925 sock_set_flag(sk, SOCK_ZAPPED);
2926 l2cap_sock_kill(sk);
2927 goto response;
2928 }
2929
2930 hci_conn_hold(conn->hcon);
2931
2932 l2cap_sock_init(sk, parent);
2933 bacpy(&bt_sk(sk)->src, conn->src);
2934 bacpy(&bt_sk(sk)->dst, conn->dst);
2935 l2cap_pi(sk)->psm = psm;
2936 l2cap_pi(sk)->dcid = scid;
2937
2938 __l2cap_chan_add(conn, sk, parent);
2939 dcid = l2cap_pi(sk)->scid;
2940
2941 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2942
2943 l2cap_pi(sk)->ident = cmd->ident;
2944
2945 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2946 if (l2cap_check_security(sk)) {
2947 if (bt_sk(sk)->defer_setup) {
2948 sk->sk_state = BT_CONNECT2;
2949 result = L2CAP_CR_PEND;
2950 status = L2CAP_CS_AUTHOR_PEND;
2951 parent->sk_data_ready(parent, 0);
2952 } else {
2953 sk->sk_state = BT_CONFIG;
2954 result = L2CAP_CR_SUCCESS;
2955 status = L2CAP_CS_NO_INFO;
2956 }
2957 } else {
2958 sk->sk_state = BT_CONNECT2;
2959 result = L2CAP_CR_PEND;
2960 status = L2CAP_CS_AUTHEN_PEND;
2961 }
2962 } else {
2963 sk->sk_state = BT_CONNECT2;
2964 result = L2CAP_CR_PEND;
2965 status = L2CAP_CS_NO_INFO;
2966 }
2967
2968 write_unlock_bh(&list->lock);
2969
2970 response:
2971 bh_unlock_sock(parent);
2972
2973 sendresp:
2974 rsp.scid = cpu_to_le16(scid);
2975 rsp.dcid = cpu_to_le16(dcid);
2976 rsp.result = cpu_to_le16(result);
2977 rsp.status = cpu_to_le16(status);
2978 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2979
2980 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2981 struct l2cap_info_req info;
2982 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2983
2984 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2985 conn->info_ident = l2cap_get_ident(conn);
2986
2987 mod_timer(&conn->info_timer, jiffies +
2988 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2989
2990 l2cap_send_cmd(conn, conn->info_ident,
2991 L2CAP_INFO_REQ, sizeof(info), &info);
2992 }
2993
2994 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2995 result == L2CAP_CR_SUCCESS) {
2996 u8 buf[128];
2997 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2998 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2999 l2cap_build_conf_req(sk, buf), buf);
3000 l2cap_pi(sk)->num_conf_req++;
3001 }
3002
3003 return 0;
3004 }
3005
3006 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3007 {
3008 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3009 u16 scid, dcid, result, status;
3010 struct sock *sk;
3011 u8 req[128];
3012
3013 scid = __le16_to_cpu(rsp->scid);
3014 dcid = __le16_to_cpu(rsp->dcid);
3015 result = __le16_to_cpu(rsp->result);
3016 status = __le16_to_cpu(rsp->status);
3017
3018 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3019
3020 if (scid) {
3021 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3022 if (!sk)
3023 return -EFAULT;
3024 } else {
3025 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3026 if (!sk)
3027 return -EFAULT;
3028 }
3029
3030 switch (result) {
3031 case L2CAP_CR_SUCCESS:
3032 sk->sk_state = BT_CONFIG;
3033 l2cap_pi(sk)->ident = 0;
3034 l2cap_pi(sk)->dcid = dcid;
3035 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3036
3037 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3038 break;
3039
3040 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3041
3042 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3043 l2cap_build_conf_req(sk, req), req);
3044 l2cap_pi(sk)->num_conf_req++;
3045 break;
3046
3047 case L2CAP_CR_PEND:
3048 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3049 break;
3050
3051 default:
3052 l2cap_chan_del(sk, ECONNREFUSED);
3053 break;
3054 }
3055
3056 bh_unlock_sock(sk);
3057 return 0;
3058 }
3059
3060 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3061 {
3062 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3063 u16 dcid, flags;
3064 u8 rsp[64];
3065 struct sock *sk;
3066 int len;
3067
3068 dcid = __le16_to_cpu(req->dcid);
3069 flags = __le16_to_cpu(req->flags);
3070
3071 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3072
3073 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3074 if (!sk)
3075 return -ENOENT;
3076
3077 if (sk->sk_state != BT_CONFIG) {
3078 struct l2cap_cmd_rej rej;
3079
3080 rej.reason = cpu_to_le16(0x0002);
3081 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3082 sizeof(rej), &rej);
3083 goto unlock;
3084 }
3085
3086 /* Reject if config buffer is too small. */
3087 len = cmd_len - sizeof(*req);
3088 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3089 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3090 l2cap_build_conf_rsp(sk, rsp,
3091 L2CAP_CONF_REJECT, flags), rsp);
3092 goto unlock;
3093 }
3094
3095 /* Store config. */
3096 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3097 l2cap_pi(sk)->conf_len += len;
3098
3099 if (flags & 0x0001) {
3100 /* Incomplete config. Send empty response. */
3101 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3102 l2cap_build_conf_rsp(sk, rsp,
3103 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3104 goto unlock;
3105 }
3106
3107 /* Complete config. */
3108 len = l2cap_parse_conf_req(sk, rsp);
3109 if (len < 0) {
3110 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3111 goto unlock;
3112 }
3113
3114 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3115 l2cap_pi(sk)->num_conf_rsp++;
3116
3117 /* Reset config buffer. */
3118 l2cap_pi(sk)->conf_len = 0;
3119
3120 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3121 goto unlock;
3122
3123 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3124 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3125 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3126 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3127
3128 sk->sk_state = BT_CONNECTED;
3129
3130 l2cap_pi(sk)->next_tx_seq = 0;
3131 l2cap_pi(sk)->expected_tx_seq = 0;
3132 __skb_queue_head_init(TX_QUEUE(sk));
3133 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3134 l2cap_ertm_init(sk);
3135
3136 l2cap_chan_ready(sk);
3137 goto unlock;
3138 }
3139
3140 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3141 u8 buf[64];
3142 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3143 l2cap_build_conf_req(sk, buf), buf);
3144 l2cap_pi(sk)->num_conf_req++;
3145 }
3146
3147 unlock:
3148 bh_unlock_sock(sk);
3149 return 0;
3150 }
3151
3152 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3153 {
3154 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3155 u16 scid, flags, result;
3156 struct sock *sk;
3157 int len = cmd->len - sizeof(*rsp);
3158
3159 scid = __le16_to_cpu(rsp->scid);
3160 flags = __le16_to_cpu(rsp->flags);
3161 result = __le16_to_cpu(rsp->result);
3162
3163 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3164 scid, flags, result);
3165
3166 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3167 if (!sk)
3168 return 0;
3169
3170 switch (result) {
3171 case L2CAP_CONF_SUCCESS:
3172 l2cap_conf_rfc_get(sk, rsp->data, len);
3173 break;
3174
3175 case L2CAP_CONF_UNACCEPT:
3176 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3177 char req[64];
3178
3179 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3180 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3181 goto done;
3182 }
3183
3184 /* throw out any old stored conf requests */
3185 result = L2CAP_CONF_SUCCESS;
3186 len = l2cap_parse_conf_rsp(sk, rsp->data,
3187 len, req, &result);
3188 if (len < 0) {
3189 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3190 goto done;
3191 }
3192
3193 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3194 L2CAP_CONF_REQ, len, req);
3195 l2cap_pi(sk)->num_conf_req++;
3196 if (result != L2CAP_CONF_SUCCESS)
3197 goto done;
3198 break;
3199 }
3200
3201 default:
3202 sk->sk_err = ECONNRESET;
3203 l2cap_sock_set_timer(sk, HZ * 5);
3204 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3205 goto done;
3206 }
3207
3208 if (flags & 0x01)
3209 goto done;
3210
3211 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3212
3213 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3214 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3215 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3216 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3217
3218 sk->sk_state = BT_CONNECTED;
3219 l2cap_pi(sk)->next_tx_seq = 0;
3220 l2cap_pi(sk)->expected_tx_seq = 0;
3221 __skb_queue_head_init(TX_QUEUE(sk));
3222 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3223 l2cap_ertm_init(sk);
3224
3225 l2cap_chan_ready(sk);
3226 }
3227
3228 done:
3229 bh_unlock_sock(sk);
3230 return 0;
3231 }
3232
3233 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3234 {
3235 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3236 struct l2cap_disconn_rsp rsp;
3237 u16 dcid, scid;
3238 struct sock *sk;
3239
3240 scid = __le16_to_cpu(req->scid);
3241 dcid = __le16_to_cpu(req->dcid);
3242
3243 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3244
3245 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3246 if (!sk)
3247 return 0;
3248
3249 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3250 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3251 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3252
3253 sk->sk_shutdown = SHUTDOWN_MASK;
3254
3255 l2cap_chan_del(sk, ECONNRESET);
3256 bh_unlock_sock(sk);
3257
3258 l2cap_sock_kill(sk);
3259 return 0;
3260 }
3261
3262 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3263 {
3264 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3265 u16 dcid, scid;
3266 struct sock *sk;
3267
3268 scid = __le16_to_cpu(rsp->scid);
3269 dcid = __le16_to_cpu(rsp->dcid);
3270
3271 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3272
3273 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3274 if (!sk)
3275 return 0;
3276
3277 l2cap_chan_del(sk, 0);
3278 bh_unlock_sock(sk);
3279
3280 l2cap_sock_kill(sk);
3281 return 0;
3282 }
3283
3284 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3285 {
3286 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3287 u16 type;
3288
3289 type = __le16_to_cpu(req->type);
3290
3291 BT_DBG("type 0x%4.4x", type);
3292
3293 if (type == L2CAP_IT_FEAT_MASK) {
3294 u8 buf[8];
3295 u32 feat_mask = l2cap_feat_mask;
3296 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3297 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3298 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3299 if (enable_ertm)
3300 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3301 | L2CAP_FEAT_FCS;
3302 put_unaligned_le32(feat_mask, rsp->data);
3303 l2cap_send_cmd(conn, cmd->ident,
3304 L2CAP_INFO_RSP, sizeof(buf), buf);
3305 } else if (type == L2CAP_IT_FIXED_CHAN) {
3306 u8 buf[12];
3307 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3308 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3309 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3310 memcpy(buf + 4, l2cap_fixed_chan, 8);
3311 l2cap_send_cmd(conn, cmd->ident,
3312 L2CAP_INFO_RSP, sizeof(buf), buf);
3313 } else {
3314 struct l2cap_info_rsp rsp;
3315 rsp.type = cpu_to_le16(type);
3316 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3317 l2cap_send_cmd(conn, cmd->ident,
3318 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3319 }
3320
3321 return 0;
3322 }
3323
3324 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3325 {
3326 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3327 u16 type, result;
3328
3329 type = __le16_to_cpu(rsp->type);
3330 result = __le16_to_cpu(rsp->result);
3331
3332 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3333
3334 del_timer(&conn->info_timer);
3335
3336 if (type == L2CAP_IT_FEAT_MASK) {
3337 conn->feat_mask = get_unaligned_le32(rsp->data);
3338
3339 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3340 struct l2cap_info_req req;
3341 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3342
3343 conn->info_ident = l2cap_get_ident(conn);
3344
3345 l2cap_send_cmd(conn, conn->info_ident,
3346 L2CAP_INFO_REQ, sizeof(req), &req);
3347 } else {
3348 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3349 conn->info_ident = 0;
3350
3351 l2cap_conn_start(conn);
3352 }
3353 } else if (type == L2CAP_IT_FIXED_CHAN) {
3354 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3355 conn->info_ident = 0;
3356
3357 l2cap_conn_start(conn);
3358 }
3359
3360 return 0;
3361 }
3362
3363 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3364 {
3365 u8 *data = skb->data;
3366 int len = skb->len;
3367 struct l2cap_cmd_hdr cmd;
3368 int err = 0;
3369
3370 l2cap_raw_recv(conn, skb);
3371
3372 while (len >= L2CAP_CMD_HDR_SIZE) {
3373 u16 cmd_len;
3374 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3375 data += L2CAP_CMD_HDR_SIZE;
3376 len -= L2CAP_CMD_HDR_SIZE;
3377
3378 cmd_len = le16_to_cpu(cmd.len);
3379
3380 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3381
3382 if (cmd_len > len || !cmd.ident) {
3383 BT_DBG("corrupted command");
3384 break;
3385 }
3386
3387 switch (cmd.code) {
3388 case L2CAP_COMMAND_REJ:
3389 l2cap_command_rej(conn, &cmd, data);
3390 break;
3391
3392 case L2CAP_CONN_REQ:
3393 err = l2cap_connect_req(conn, &cmd, data);
3394 break;
3395
3396 case L2CAP_CONN_RSP:
3397 err = l2cap_connect_rsp(conn, &cmd, data);
3398 break;
3399
3400 case L2CAP_CONF_REQ:
3401 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3402 break;
3403
3404 case L2CAP_CONF_RSP:
3405 err = l2cap_config_rsp(conn, &cmd, data);
3406 break;
3407
3408 case L2CAP_DISCONN_REQ:
3409 err = l2cap_disconnect_req(conn, &cmd, data);
3410 break;
3411
3412 case L2CAP_DISCONN_RSP:
3413 err = l2cap_disconnect_rsp(conn, &cmd, data);
3414 break;
3415
3416 case L2CAP_ECHO_REQ:
3417 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3418 break;
3419
3420 case L2CAP_ECHO_RSP:
3421 break;
3422
3423 case L2CAP_INFO_REQ:
3424 err = l2cap_information_req(conn, &cmd, data);
3425 break;
3426
3427 case L2CAP_INFO_RSP:
3428 err = l2cap_information_rsp(conn, &cmd, data);
3429 break;
3430
3431 default:
3432 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3433 err = -EINVAL;
3434 break;
3435 }
3436
3437 if (err) {
3438 struct l2cap_cmd_rej rej;
3439 BT_DBG("error %d", err);
3440
3441 /* FIXME: Map err to a valid reason */
3442 rej.reason = cpu_to_le16(0);
3443 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3444 }
3445
3446 data += cmd_len;
3447 len -= cmd_len;
3448 }
3449
3450 kfree_skb(skb);
3451 }
3452
3453 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3454 {
3455 u16 our_fcs, rcv_fcs;
3456 int hdr_size = L2CAP_HDR_SIZE + 2;
3457
3458 if (pi->fcs == L2CAP_FCS_CRC16) {
3459 skb_trim(skb, skb->len - 2);
3460 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3461 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3462
3463 if (our_fcs != rcv_fcs)
3464 return -EBADMSG;
3465 }
3466 return 0;
3467 }
3468
3469 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3470 {
3471 struct l2cap_pinfo *pi = l2cap_pi(sk);
3472 u16 control = 0;
3473
3474 pi->frames_sent = 0;
3475
3476 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3477
3478 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3479 control |= L2CAP_SUPER_RCV_NOT_READY;
3480 l2cap_send_sframe(pi, control);
3481 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3482 }
3483
3484 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3485 l2cap_retransmit_frames(sk);
3486
3487 l2cap_ertm_send(sk);
3488
3489 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3490 pi->frames_sent == 0) {
3491 control |= L2CAP_SUPER_RCV_READY;
3492 l2cap_send_sframe(pi, control);
3493 }
3494 }
3495
3496 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3497 {
3498 struct sk_buff *next_skb;
3499 struct l2cap_pinfo *pi = l2cap_pi(sk);
3500 int tx_seq_offset, next_tx_seq_offset;
3501
3502 bt_cb(skb)->tx_seq = tx_seq;
3503 bt_cb(skb)->sar = sar;
3504
3505 next_skb = skb_peek(SREJ_QUEUE(sk));
3506 if (!next_skb) {
3507 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3508 return 0;
3509 }
3510
3511 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3512 if (tx_seq_offset < 0)
3513 tx_seq_offset += 64;
3514
3515 do {
3516 if (bt_cb(next_skb)->tx_seq == tx_seq)
3517 return -EINVAL;
3518
3519 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3520 pi->buffer_seq) % 64;
3521 if (next_tx_seq_offset < 0)
3522 next_tx_seq_offset += 64;
3523
3524 if (next_tx_seq_offset > tx_seq_offset) {
3525 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3526 return 0;
3527 }
3528
3529 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3530 break;
3531
3532 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3533
3534 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3535
3536 return 0;
3537 }
3538
3539 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3540 {
3541 struct l2cap_pinfo *pi = l2cap_pi(sk);
3542 struct sk_buff *_skb;
3543 int err;
3544
3545 switch (control & L2CAP_CTRL_SAR) {
3546 case L2CAP_SDU_UNSEGMENTED:
3547 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3548 goto drop;
3549
3550 err = sock_queue_rcv_skb(sk, skb);
3551 if (!err)
3552 return err;
3553
3554 break;
3555
3556 case L2CAP_SDU_START:
3557 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3558 goto drop;
3559
3560 pi->sdu_len = get_unaligned_le16(skb->data);
3561
3562 if (pi->sdu_len > pi->imtu)
3563 goto disconnect;
3564
3565 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3566 if (!pi->sdu)
3567 return -ENOMEM;
3568
3569 /* pull sdu_len bytes only after alloc, because of Local Busy
3570 * condition we have to be sure that this will be executed
3571 * only once, i.e., when alloc does not fail */
3572 skb_pull(skb, 2);
3573
3574 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3575
3576 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3577 pi->partial_sdu_len = skb->len;
3578 break;
3579
3580 case L2CAP_SDU_CONTINUE:
3581 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3582 goto disconnect;
3583
3584 if (!pi->sdu)
3585 goto disconnect;
3586
3587 pi->partial_sdu_len += skb->len;
3588 if (pi->partial_sdu_len > pi->sdu_len)
3589 goto drop;
3590
3591 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3592
3593 break;
3594
3595 case L2CAP_SDU_END:
3596 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3597 goto disconnect;
3598
3599 if (!pi->sdu)
3600 goto disconnect;
3601
3602 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3603 pi->partial_sdu_len += skb->len;
3604
3605 if (pi->partial_sdu_len > pi->imtu)
3606 goto drop;
3607
3608 if (pi->partial_sdu_len != pi->sdu_len)
3609 goto drop;
3610
3611 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3612 }
3613
3614 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3615 if (!_skb) {
3616 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3617 return -ENOMEM;
3618 }
3619
3620 err = sock_queue_rcv_skb(sk, _skb);
3621 if (err < 0) {
3622 kfree_skb(_skb);
3623 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3624 return err;
3625 }
3626
3627 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3628 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3629
3630 kfree_skb(pi->sdu);
3631 break;
3632 }
3633
3634 kfree_skb(skb);
3635 return 0;
3636
3637 drop:
3638 kfree_skb(pi->sdu);
3639 pi->sdu = NULL;
3640
3641 disconnect:
3642 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3643 kfree_skb(skb);
3644 return 0;
3645 }
3646
3647 static int l2cap_try_push_rx_skb(struct sock *sk)
3648 {
3649 struct l2cap_pinfo *pi = l2cap_pi(sk);
3650 struct sk_buff *skb;
3651 u16 control;
3652 int err;
3653
3654 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3655 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3656 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3657 if (err < 0) {
3658 skb_queue_head(BUSY_QUEUE(sk), skb);
3659 return -EBUSY;
3660 }
3661
3662 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3663 }
3664
3665 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3666 goto done;
3667
3668 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3669 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3670 l2cap_send_sframe(pi, control);
3671 l2cap_pi(sk)->retry_count = 1;
3672
3673 del_timer(&pi->retrans_timer);
3674 __mod_monitor_timer();
3675
3676 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3677
3678 done:
3679 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3680 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3681
3682 BT_DBG("sk %p, Exit local busy", sk);
3683
3684 return 0;
3685 }
3686
3687 static void l2cap_busy_work(struct work_struct *work)
3688 {
3689 DECLARE_WAITQUEUE(wait, current);
3690 struct l2cap_pinfo *pi =
3691 container_of(work, struct l2cap_pinfo, busy_work);
3692 struct sock *sk = (struct sock *)pi;
3693 int n_tries = 0, timeo = HZ/5, err;
3694 struct sk_buff *skb;
3695
3696 lock_sock(sk);
3697
3698 add_wait_queue(sk_sleep(sk), &wait);
3699 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3700 set_current_state(TASK_INTERRUPTIBLE);
3701
3702 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3703 err = -EBUSY;
3704 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3705 break;
3706 }
3707
3708 if (!timeo)
3709 timeo = HZ/5;
3710
3711 if (signal_pending(current)) {
3712 err = sock_intr_errno(timeo);
3713 break;
3714 }
3715
3716 release_sock(sk);
3717 timeo = schedule_timeout(timeo);
3718 lock_sock(sk);
3719
3720 err = sock_error(sk);
3721 if (err)
3722 break;
3723
3724 if (l2cap_try_push_rx_skb(sk) == 0)
3725 break;
3726 }
3727
3728 set_current_state(TASK_RUNNING);
3729 remove_wait_queue(sk_sleep(sk), &wait);
3730
3731 release_sock(sk);
3732 }
3733
3734 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3735 {
3736 struct l2cap_pinfo *pi = l2cap_pi(sk);
3737 int sctrl, err;
3738
3739 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3740 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3741 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3742 return l2cap_try_push_rx_skb(sk);
3743
3744
3745 }
3746
3747 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3748 if (err >= 0) {
3749 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3750 return err;
3751 }
3752
3753 /* Busy Condition */
3754 BT_DBG("sk %p, Enter local busy", sk);
3755
3756 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3757 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3758 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3759
3760 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3761 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3762 l2cap_send_sframe(pi, sctrl);
3763
3764 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3765
3766 del_timer(&pi->ack_timer);
3767
3768 queue_work(_busy_wq, &pi->busy_work);
3769
3770 return err;
3771 }
3772
3773 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3774 {
3775 struct l2cap_pinfo *pi = l2cap_pi(sk);
3776 struct sk_buff *_skb;
3777 int err = -EINVAL;
3778
3779 /*
3780 * TODO: We have to notify the userland if some data is lost with the
3781 * Streaming Mode.
3782 */
3783
3784 switch (control & L2CAP_CTRL_SAR) {
3785 case L2CAP_SDU_UNSEGMENTED:
3786 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3787 kfree_skb(pi->sdu);
3788 break;
3789 }
3790
3791 err = sock_queue_rcv_skb(sk, skb);
3792 if (!err)
3793 return 0;
3794
3795 break;
3796
3797 case L2CAP_SDU_START:
3798 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3799 kfree_skb(pi->sdu);
3800 break;
3801 }
3802
3803 pi->sdu_len = get_unaligned_le16(skb->data);
3804 skb_pull(skb, 2);
3805
3806 if (pi->sdu_len > pi->imtu) {
3807 err = -EMSGSIZE;
3808 break;
3809 }
3810
3811 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3812 if (!pi->sdu) {
3813 err = -ENOMEM;
3814 break;
3815 }
3816
3817 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3818
3819 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3820 pi->partial_sdu_len = skb->len;
3821 err = 0;
3822 break;
3823
3824 case L2CAP_SDU_CONTINUE:
3825 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3826 break;
3827
3828 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3829
3830 pi->partial_sdu_len += skb->len;
3831 if (pi->partial_sdu_len > pi->sdu_len)
3832 kfree_skb(pi->sdu);
3833 else
3834 err = 0;
3835
3836 break;
3837
3838 case L2CAP_SDU_END:
3839 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3840 break;
3841
3842 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3843
3844 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3845 pi->partial_sdu_len += skb->len;
3846
3847 if (pi->partial_sdu_len > pi->imtu)
3848 goto drop;
3849
3850 if (pi->partial_sdu_len == pi->sdu_len) {
3851 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3852 err = sock_queue_rcv_skb(sk, _skb);
3853 if (err < 0)
3854 kfree_skb(_skb);
3855 }
3856 err = 0;
3857
3858 drop:
3859 kfree_skb(pi->sdu);
3860 break;
3861 }
3862
3863 kfree_skb(skb);
3864 return err;
3865 }
3866
3867 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3868 {
3869 struct sk_buff *skb;
3870 u16 control;
3871
3872 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3873 if (bt_cb(skb)->tx_seq != tx_seq)
3874 break;
3875
3876 skb = skb_dequeue(SREJ_QUEUE(sk));
3877 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3878 l2cap_ertm_reassembly_sdu(sk, skb, control);
3879 l2cap_pi(sk)->buffer_seq_srej =
3880 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3881 tx_seq = (tx_seq + 1) % 64;
3882 }
3883 }
3884
3885 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3886 {
3887 struct l2cap_pinfo *pi = l2cap_pi(sk);
3888 struct srej_list *l, *tmp;
3889 u16 control;
3890
3891 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3892 if (l->tx_seq == tx_seq) {
3893 list_del(&l->list);
3894 kfree(l);
3895 return;
3896 }
3897 control = L2CAP_SUPER_SELECT_REJECT;
3898 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3899 l2cap_send_sframe(pi, control);
3900 list_del(&l->list);
3901 list_add_tail(&l->list, SREJ_LIST(sk));
3902 }
3903 }
3904
3905 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3906 {
3907 struct l2cap_pinfo *pi = l2cap_pi(sk);
3908 struct srej_list *new;
3909 u16 control;
3910
3911 while (tx_seq != pi->expected_tx_seq) {
3912 control = L2CAP_SUPER_SELECT_REJECT;
3913 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3914 l2cap_send_sframe(pi, control);
3915
3916 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3917 new->tx_seq = pi->expected_tx_seq;
3918 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3919 list_add_tail(&new->list, SREJ_LIST(sk));
3920 }
3921 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3922 }
3923
3924 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3925 {
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 u8 tx_seq = __get_txseq(rx_control);
3928 u8 req_seq = __get_reqseq(rx_control);
3929 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3930 int tx_seq_offset, expected_tx_seq_offset;
3931 int num_to_ack = (pi->tx_win/6) + 1;
3932 int err = 0;
3933
3934 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3935 rx_control);
3936
3937 if (L2CAP_CTRL_FINAL & rx_control &&
3938 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3939 del_timer(&pi->monitor_timer);
3940 if (pi->unacked_frames > 0)
3941 __mod_retrans_timer();
3942 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3943 }
3944
3945 pi->expected_ack_seq = req_seq;
3946 l2cap_drop_acked_frames(sk);
3947
3948 if (tx_seq == pi->expected_tx_seq)
3949 goto expected;
3950
3951 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3952 if (tx_seq_offset < 0)
3953 tx_seq_offset += 64;
3954
3955 /* invalid tx_seq */
3956 if (tx_seq_offset >= pi->tx_win) {
3957 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3958 goto drop;
3959 }
3960
3961 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3962 goto drop;
3963
3964 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3965 struct srej_list *first;
3966
3967 first = list_first_entry(SREJ_LIST(sk),
3968 struct srej_list, list);
3969 if (tx_seq == first->tx_seq) {
3970 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3971 l2cap_check_srej_gap(sk, tx_seq);
3972
3973 list_del(&first->list);
3974 kfree(first);
3975
3976 if (list_empty(SREJ_LIST(sk))) {
3977 pi->buffer_seq = pi->buffer_seq_srej;
3978 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3979 l2cap_send_ack(pi);
3980 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3981 }
3982 } else {
3983 struct srej_list *l;
3984
3985 /* duplicated tx_seq */
3986 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3987 goto drop;
3988
3989 list_for_each_entry(l, SREJ_LIST(sk), list) {
3990 if (l->tx_seq == tx_seq) {
3991 l2cap_resend_srejframe(sk, tx_seq);
3992 return 0;
3993 }
3994 }
3995 l2cap_send_srejframe(sk, tx_seq);
3996 }
3997 } else {
3998 expected_tx_seq_offset =
3999 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4000 if (expected_tx_seq_offset < 0)
4001 expected_tx_seq_offset += 64;
4002
4003 /* duplicated tx_seq */
4004 if (tx_seq_offset < expected_tx_seq_offset)
4005 goto drop;
4006
4007 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4008
4009 BT_DBG("sk %p, Enter SREJ", sk);
4010
4011 INIT_LIST_HEAD(SREJ_LIST(sk));
4012 pi->buffer_seq_srej = pi->buffer_seq;
4013
4014 __skb_queue_head_init(SREJ_QUEUE(sk));
4015 __skb_queue_head_init(BUSY_QUEUE(sk));
4016 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4017
4018 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4019
4020 l2cap_send_srejframe(sk, tx_seq);
4021
4022 del_timer(&pi->ack_timer);
4023 }
4024 return 0;
4025
4026 expected:
4027 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4028
4029 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4030 bt_cb(skb)->tx_seq = tx_seq;
4031 bt_cb(skb)->sar = sar;
4032 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4033 return 0;
4034 }
4035
4036 err = l2cap_push_rx_skb(sk, skb, rx_control);
4037 if (err < 0)
4038 return 0;
4039
4040 if (rx_control & L2CAP_CTRL_FINAL) {
4041 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4042 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4043 else
4044 l2cap_retransmit_frames(sk);
4045 }
4046
4047 __mod_ack_timer();
4048
4049 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4050 if (pi->num_acked == num_to_ack - 1)
4051 l2cap_send_ack(pi);
4052
4053 return 0;
4054
4055 drop:
4056 kfree_skb(skb);
4057 return 0;
4058 }
4059
4060 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4061 {
4062 struct l2cap_pinfo *pi = l2cap_pi(sk);
4063
4064 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4065 rx_control);
4066
4067 pi->expected_ack_seq = __get_reqseq(rx_control);
4068 l2cap_drop_acked_frames(sk);
4069
4070 if (rx_control & L2CAP_CTRL_POLL) {
4071 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4072 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4073 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4074 (pi->unacked_frames > 0))
4075 __mod_retrans_timer();
4076
4077 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4078 l2cap_send_srejtail(sk);
4079 } else {
4080 l2cap_send_i_or_rr_or_rnr(sk);
4081 }
4082
4083 } else if (rx_control & L2CAP_CTRL_FINAL) {
4084 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4085
4086 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4087 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4088 else
4089 l2cap_retransmit_frames(sk);
4090
4091 } else {
4092 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4093 (pi->unacked_frames > 0))
4094 __mod_retrans_timer();
4095
4096 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4097 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4098 l2cap_send_ack(pi);
4099 } else {
4100 l2cap_ertm_send(sk);
4101 }
4102 }
4103 }
4104
4105 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4106 {
4107 struct l2cap_pinfo *pi = l2cap_pi(sk);
4108 u8 tx_seq = __get_reqseq(rx_control);
4109
4110 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4111
4112 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4113
4114 pi->expected_ack_seq = tx_seq;
4115 l2cap_drop_acked_frames(sk);
4116
4117 if (rx_control & L2CAP_CTRL_FINAL) {
4118 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4119 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4120 else
4121 l2cap_retransmit_frames(sk);
4122 } else {
4123 l2cap_retransmit_frames(sk);
4124
4125 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4126 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4127 }
4128 }
4129 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4130 {
4131 struct l2cap_pinfo *pi = l2cap_pi(sk);
4132 u8 tx_seq = __get_reqseq(rx_control);
4133
4134 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4135
4136 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4137
4138 if (rx_control & L2CAP_CTRL_POLL) {
4139 pi->expected_ack_seq = tx_seq;
4140 l2cap_drop_acked_frames(sk);
4141
4142 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4143 l2cap_retransmit_one_frame(sk, tx_seq);
4144
4145 l2cap_ertm_send(sk);
4146
4147 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4148 pi->srej_save_reqseq = tx_seq;
4149 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4150 }
4151 } else if (rx_control & L2CAP_CTRL_FINAL) {
4152 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4153 pi->srej_save_reqseq == tx_seq)
4154 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4155 else
4156 l2cap_retransmit_one_frame(sk, tx_seq);
4157 } else {
4158 l2cap_retransmit_one_frame(sk, tx_seq);
4159 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4160 pi->srej_save_reqseq = tx_seq;
4161 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4162 }
4163 }
4164 }
4165
4166 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4167 {
4168 struct l2cap_pinfo *pi = l2cap_pi(sk);
4169 u8 tx_seq = __get_reqseq(rx_control);
4170
4171 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4172
4173 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4174 pi->expected_ack_seq = tx_seq;
4175 l2cap_drop_acked_frames(sk);
4176
4177 if (rx_control & L2CAP_CTRL_POLL)
4178 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4179
4180 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4181 del_timer(&pi->retrans_timer);
4182 if (rx_control & L2CAP_CTRL_POLL)
4183 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4184 return;
4185 }
4186
4187 if (rx_control & L2CAP_CTRL_POLL)
4188 l2cap_send_srejtail(sk);
4189 else
4190 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4191 }
4192
4193 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4194 {
4195 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4196
4197 if (L2CAP_CTRL_FINAL & rx_control &&
4198 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4199 del_timer(&l2cap_pi(sk)->monitor_timer);
4200 if (l2cap_pi(sk)->unacked_frames > 0)
4201 __mod_retrans_timer();
4202 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4203 }
4204
4205 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4206 case L2CAP_SUPER_RCV_READY:
4207 l2cap_data_channel_rrframe(sk, rx_control);
4208 break;
4209
4210 case L2CAP_SUPER_REJECT:
4211 l2cap_data_channel_rejframe(sk, rx_control);
4212 break;
4213
4214 case L2CAP_SUPER_SELECT_REJECT:
4215 l2cap_data_channel_srejframe(sk, rx_control);
4216 break;
4217
4218 case L2CAP_SUPER_RCV_NOT_READY:
4219 l2cap_data_channel_rnrframe(sk, rx_control);
4220 break;
4221 }
4222
4223 kfree_skb(skb);
4224 return 0;
4225 }
4226
4227 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4228 {
4229 struct l2cap_pinfo *pi = l2cap_pi(sk);
4230 u16 control;
4231 u8 req_seq;
4232 int len, next_tx_seq_offset, req_seq_offset;
4233
4234 control = get_unaligned_le16(skb->data);
4235 skb_pull(skb, 2);
4236 len = skb->len;
4237
4238 /*
4239 * We can just drop the corrupted I-frame here.
4240 * Receiver will miss it and start proper recovery
4241 * procedures and ask retransmission.
4242 */
4243 if (l2cap_check_fcs(pi, skb))
4244 goto drop;
4245
4246 if (__is_sar_start(control) && __is_iframe(control))
4247 len -= 2;
4248
4249 if (pi->fcs == L2CAP_FCS_CRC16)
4250 len -= 2;
4251
4252 if (len > pi->mps) {
4253 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4254 goto drop;
4255 }
4256
4257 req_seq = __get_reqseq(control);
4258 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4259 if (req_seq_offset < 0)
4260 req_seq_offset += 64;
4261
4262 next_tx_seq_offset =
4263 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4264 if (next_tx_seq_offset < 0)
4265 next_tx_seq_offset += 64;
4266
4267 /* check for invalid req-seq */
4268 if (req_seq_offset > next_tx_seq_offset) {
4269 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4270 goto drop;
4271 }
4272
4273 if (__is_iframe(control)) {
4274 if (len < 0) {
4275 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4276 goto drop;
4277 }
4278
4279 l2cap_data_channel_iframe(sk, control, skb);
4280 } else {
4281 if (len != 0) {
4282 BT_ERR("%d", len);
4283 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4284 goto drop;
4285 }
4286
4287 l2cap_data_channel_sframe(sk, control, skb);
4288 }
4289
4290 return 0;
4291
4292 drop:
4293 kfree_skb(skb);
4294 return 0;
4295 }
4296
4297 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4298 {
4299 struct sock *sk;
4300 struct l2cap_pinfo *pi;
4301 u16 control;
4302 u8 tx_seq;
4303 int len;
4304
4305 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4306 if (!sk) {
4307 BT_DBG("unknown cid 0x%4.4x", cid);
4308 goto drop;
4309 }
4310
4311 pi = l2cap_pi(sk);
4312
4313 BT_DBG("sk %p, len %d", sk, skb->len);
4314
4315 if (sk->sk_state != BT_CONNECTED)
4316 goto drop;
4317
4318 switch (pi->mode) {
4319 case L2CAP_MODE_BASIC:
4320 /* If socket recv buffers overflows we drop data here
4321 * which is *bad* because L2CAP has to be reliable.
4322 * But we don't have any other choice. L2CAP doesn't
4323 * provide flow control mechanism. */
4324
4325 if (pi->imtu < skb->len)
4326 goto drop;
4327
4328 if (!sock_queue_rcv_skb(sk, skb))
4329 goto done;
4330 break;
4331
4332 case L2CAP_MODE_ERTM:
4333 if (!sock_owned_by_user(sk)) {
4334 l2cap_ertm_data_rcv(sk, skb);
4335 } else {
4336 if (sk_add_backlog(sk, skb))
4337 goto drop;
4338 }
4339
4340 goto done;
4341
4342 case L2CAP_MODE_STREAMING:
4343 control = get_unaligned_le16(skb->data);
4344 skb_pull(skb, 2);
4345 len = skb->len;
4346
4347 if (l2cap_check_fcs(pi, skb))
4348 goto drop;
4349
4350 if (__is_sar_start(control))
4351 len -= 2;
4352
4353 if (pi->fcs == L2CAP_FCS_CRC16)
4354 len -= 2;
4355
4356 if (len > pi->mps || len < 0 || __is_sframe(control))
4357 goto drop;
4358
4359 tx_seq = __get_txseq(control);
4360
4361 if (pi->expected_tx_seq == tx_seq)
4362 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4363 else
4364 pi->expected_tx_seq = (tx_seq + 1) % 64;
4365
4366 l2cap_streaming_reassembly_sdu(sk, skb, control);
4367
4368 goto done;
4369
4370 default:
4371 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4372 break;
4373 }
4374
4375 drop:
4376 kfree_skb(skb);
4377
4378 done:
4379 if (sk)
4380 bh_unlock_sock(sk);
4381
4382 return 0;
4383 }
4384
4385 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4386 {
4387 struct sock *sk;
4388
4389 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4390 if (!sk)
4391 goto drop;
4392
4393 BT_DBG("sk %p, len %d", sk, skb->len);
4394
4395 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4396 goto drop;
4397
4398 if (l2cap_pi(sk)->imtu < skb->len)
4399 goto drop;
4400
4401 if (!sock_queue_rcv_skb(sk, skb))
4402 goto done;
4403
4404 drop:
4405 kfree_skb(skb);
4406
4407 done:
4408 if (sk)
4409 bh_unlock_sock(sk);
4410 return 0;
4411 }
4412
4413 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4414 {
4415 struct l2cap_hdr *lh = (void *) skb->data;
4416 u16 cid, len;
4417 __le16 psm;
4418
4419 skb_pull(skb, L2CAP_HDR_SIZE);
4420 cid = __le16_to_cpu(lh->cid);
4421 len = __le16_to_cpu(lh->len);
4422
4423 if (len != skb->len) {
4424 kfree_skb(skb);
4425 return;
4426 }
4427
4428 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4429
4430 switch (cid) {
4431 case L2CAP_CID_SIGNALING:
4432 l2cap_sig_channel(conn, skb);
4433 break;
4434
4435 case L2CAP_CID_CONN_LESS:
4436 psm = get_unaligned_le16(skb->data);
4437 skb_pull(skb, 2);
4438 l2cap_conless_channel(conn, psm, skb);
4439 break;
4440
4441 default:
4442 l2cap_data_channel(conn, cid, skb);
4443 break;
4444 }
4445 }
4446
4447 /* ---- L2CAP interface with lower layer (HCI) ---- */
4448
4449 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4450 {
4451 int exact = 0, lm1 = 0, lm2 = 0;
4452 register struct sock *sk;
4453 struct hlist_node *node;
4454
4455 if (type != ACL_LINK)
4456 return -EINVAL;
4457
4458 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4459
4460 /* Find listening sockets and check their link_mode */
4461 read_lock(&l2cap_sk_list.lock);
4462 sk_for_each(sk, node, &l2cap_sk_list.head) {
4463 if (sk->sk_state != BT_LISTEN)
4464 continue;
4465
4466 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4467 lm1 |= HCI_LM_ACCEPT;
4468 if (l2cap_pi(sk)->role_switch)
4469 lm1 |= HCI_LM_MASTER;
4470 exact++;
4471 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4472 lm2 |= HCI_LM_ACCEPT;
4473 if (l2cap_pi(sk)->role_switch)
4474 lm2 |= HCI_LM_MASTER;
4475 }
4476 }
4477 read_unlock(&l2cap_sk_list.lock);
4478
4479 return exact ? lm1 : lm2;
4480 }
4481
4482 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4483 {
4484 struct l2cap_conn *conn;
4485
4486 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4487
4488 if (hcon->type != ACL_LINK)
4489 return -EINVAL;
4490
4491 if (!status) {
4492 conn = l2cap_conn_add(hcon, status);
4493 if (conn)
4494 l2cap_conn_ready(conn);
4495 } else
4496 l2cap_conn_del(hcon, bt_err(status));
4497
4498 return 0;
4499 }
4500
4501 static int l2cap_disconn_ind(struct hci_conn *hcon)
4502 {
4503 struct l2cap_conn *conn = hcon->l2cap_data;
4504
4505 BT_DBG("hcon %p", hcon);
4506
4507 if (hcon->type != ACL_LINK || !conn)
4508 return 0x13;
4509
4510 return conn->disc_reason;
4511 }
4512
4513 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4514 {
4515 BT_DBG("hcon %p reason %d", hcon, reason);
4516
4517 if (hcon->type != ACL_LINK)
4518 return -EINVAL;
4519
4520 l2cap_conn_del(hcon, bt_err(reason));
4521
4522 return 0;
4523 }
4524
4525 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4526 {
4527 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4528 return;
4529
4530 if (encrypt == 0x00) {
4531 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4532 l2cap_sock_clear_timer(sk);
4533 l2cap_sock_set_timer(sk, HZ * 5);
4534 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4535 __l2cap_sock_close(sk, ECONNREFUSED);
4536 } else {
4537 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4538 l2cap_sock_clear_timer(sk);
4539 }
4540 }
4541
4542 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4543 {
4544 struct l2cap_chan_list *l;
4545 struct l2cap_conn *conn = hcon->l2cap_data;
4546 struct sock *sk;
4547
4548 if (!conn)
4549 return 0;
4550
4551 l = &conn->chan_list;
4552
4553 BT_DBG("conn %p", conn);
4554
4555 read_lock(&l->lock);
4556
4557 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4558 bh_lock_sock(sk);
4559
4560 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4561 bh_unlock_sock(sk);
4562 continue;
4563 }
4564
4565 if (!status && (sk->sk_state == BT_CONNECTED ||
4566 sk->sk_state == BT_CONFIG)) {
4567 l2cap_check_encryption(sk, encrypt);
4568 bh_unlock_sock(sk);
4569 continue;
4570 }
4571
4572 if (sk->sk_state == BT_CONNECT) {
4573 if (!status) {
4574 struct l2cap_conn_req req;
4575 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4576 req.psm = l2cap_pi(sk)->psm;
4577
4578 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4579 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4580
4581 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4582 L2CAP_CONN_REQ, sizeof(req), &req);
4583 } else {
4584 l2cap_sock_clear_timer(sk);
4585 l2cap_sock_set_timer(sk, HZ / 10);
4586 }
4587 } else if (sk->sk_state == BT_CONNECT2) {
4588 struct l2cap_conn_rsp rsp;
4589 __u16 result;
4590
4591 if (!status) {
4592 sk->sk_state = BT_CONFIG;
4593 result = L2CAP_CR_SUCCESS;
4594 } else {
4595 sk->sk_state = BT_DISCONN;
4596 l2cap_sock_set_timer(sk, HZ / 10);
4597 result = L2CAP_CR_SEC_BLOCK;
4598 }
4599
4600 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4601 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4602 rsp.result = cpu_to_le16(result);
4603 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4604 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4605 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4606 }
4607
4608 bh_unlock_sock(sk);
4609 }
4610
4611 read_unlock(&l->lock);
4612
4613 return 0;
4614 }
4615
4616 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4617 {
4618 struct l2cap_conn *conn = hcon->l2cap_data;
4619
4620 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4621 goto drop;
4622
4623 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4624
4625 if (flags & ACL_START) {
4626 struct l2cap_hdr *hdr;
4627 int len;
4628
4629 if (conn->rx_len) {
4630 BT_ERR("Unexpected start frame (len %d)", skb->len);
4631 kfree_skb(conn->rx_skb);
4632 conn->rx_skb = NULL;
4633 conn->rx_len = 0;
4634 l2cap_conn_unreliable(conn, ECOMM);
4635 }
4636
4637 if (skb->len < 2) {
4638 BT_ERR("Frame is too short (len %d)", skb->len);
4639 l2cap_conn_unreliable(conn, ECOMM);
4640 goto drop;
4641 }
4642
4643 hdr = (struct l2cap_hdr *) skb->data;
4644 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4645
4646 if (len == skb->len) {
4647 /* Complete frame received */
4648 l2cap_recv_frame(conn, skb);
4649 return 0;
4650 }
4651
4652 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4653
4654 if (skb->len > len) {
4655 BT_ERR("Frame is too long (len %d, expected len %d)",
4656 skb->len, len);
4657 l2cap_conn_unreliable(conn, ECOMM);
4658 goto drop;
4659 }
4660
4661 /* Allocate skb for the complete frame (with header) */
4662 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4663 if (!conn->rx_skb)
4664 goto drop;
4665
4666 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4667 skb->len);
4668 conn->rx_len = len - skb->len;
4669 } else {
4670 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4671
4672 if (!conn->rx_len) {
4673 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4674 l2cap_conn_unreliable(conn, ECOMM);
4675 goto drop;
4676 }
4677
4678 if (skb->len > conn->rx_len) {
4679 BT_ERR("Fragment is too long (len %d, expected %d)",
4680 skb->len, conn->rx_len);
4681 kfree_skb(conn->rx_skb);
4682 conn->rx_skb = NULL;
4683 conn->rx_len = 0;
4684 l2cap_conn_unreliable(conn, ECOMM);
4685 goto drop;
4686 }
4687
4688 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4689 skb->len);
4690 conn->rx_len -= skb->len;
4691
4692 if (!conn->rx_len) {
4693 /* Complete frame received */
4694 l2cap_recv_frame(conn, conn->rx_skb);
4695 conn->rx_skb = NULL;
4696 }
4697 }
4698
4699 drop:
4700 kfree_skb(skb);
4701 return 0;
4702 }
4703
4704 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4705 {
4706 struct sock *sk;
4707 struct hlist_node *node;
4708
4709 read_lock_bh(&l2cap_sk_list.lock);
4710
4711 sk_for_each(sk, node, &l2cap_sk_list.head) {
4712 struct l2cap_pinfo *pi = l2cap_pi(sk);
4713
4714 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4715 batostr(&bt_sk(sk)->src),
4716 batostr(&bt_sk(sk)->dst),
4717 sk->sk_state, __le16_to_cpu(pi->psm),
4718 pi->scid, pi->dcid,
4719 pi->imtu, pi->omtu, pi->sec_level);
4720 }
4721
4722 read_unlock_bh(&l2cap_sk_list.lock);
4723
4724 return 0;
4725 }
4726
4727 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4728 {
4729 return single_open(file, l2cap_debugfs_show, inode->i_private);
4730 }
4731
4732 static const struct file_operations l2cap_debugfs_fops = {
4733 .open = l2cap_debugfs_open,
4734 .read = seq_read,
4735 .llseek = seq_lseek,
4736 .release = single_release,
4737 };
4738
4739 static struct dentry *l2cap_debugfs;
4740
4741 static const struct proto_ops l2cap_sock_ops = {
4742 .family = PF_BLUETOOTH,
4743 .owner = THIS_MODULE,
4744 .release = l2cap_sock_release,
4745 .bind = l2cap_sock_bind,
4746 .connect = l2cap_sock_connect,
4747 .listen = l2cap_sock_listen,
4748 .accept = l2cap_sock_accept,
4749 .getname = l2cap_sock_getname,
4750 .sendmsg = l2cap_sock_sendmsg,
4751 .recvmsg = l2cap_sock_recvmsg,
4752 .poll = bt_sock_poll,
4753 .ioctl = bt_sock_ioctl,
4754 .mmap = sock_no_mmap,
4755 .socketpair = sock_no_socketpair,
4756 .shutdown = l2cap_sock_shutdown,
4757 .setsockopt = l2cap_sock_setsockopt,
4758 .getsockopt = l2cap_sock_getsockopt
4759 };
4760
4761 static const struct net_proto_family l2cap_sock_family_ops = {
4762 .family = PF_BLUETOOTH,
4763 .owner = THIS_MODULE,
4764 .create = l2cap_sock_create,
4765 };
4766
4767 static struct hci_proto l2cap_hci_proto = {
4768 .name = "L2CAP",
4769 .id = HCI_PROTO_L2CAP,
4770 .connect_ind = l2cap_connect_ind,
4771 .connect_cfm = l2cap_connect_cfm,
4772 .disconn_ind = l2cap_disconn_ind,
4773 .disconn_cfm = l2cap_disconn_cfm,
4774 .security_cfm = l2cap_security_cfm,
4775 .recv_acldata = l2cap_recv_acldata
4776 };
4777
4778 static int __init l2cap_init(void)
4779 {
4780 int err;
4781
4782 err = proto_register(&l2cap_proto, 0);
4783 if (err < 0)
4784 return err;
4785
4786 _busy_wq = create_singlethread_workqueue("l2cap");
4787 if (!_busy_wq)
4788 goto error;
4789
4790 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4791 if (err < 0) {
4792 BT_ERR("L2CAP socket registration failed");
4793 goto error;
4794 }
4795
4796 err = hci_register_proto(&l2cap_hci_proto);
4797 if (err < 0) {
4798 BT_ERR("L2CAP protocol registration failed");
4799 bt_sock_unregister(BTPROTO_L2CAP);
4800 goto error;
4801 }
4802
4803 if (bt_debugfs) {
4804 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4805 bt_debugfs, NULL, &l2cap_debugfs_fops);
4806 if (!l2cap_debugfs)
4807 BT_ERR("Failed to create L2CAP debug file");
4808 }
4809
4810 BT_INFO("L2CAP ver %s", VERSION);
4811 BT_INFO("L2CAP socket layer initialized");
4812
4813 return 0;
4814
4815 error:
4816 proto_unregister(&l2cap_proto);
4817 return err;
4818 }
4819
4820 static void __exit l2cap_exit(void)
4821 {
4822 debugfs_remove(l2cap_debugfs);
4823
4824 flush_workqueue(_busy_wq);
4825 destroy_workqueue(_busy_wq);
4826
4827 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4828 BT_ERR("L2CAP socket unregistration failed");
4829
4830 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4831 BT_ERR("L2CAP protocol unregistration failed");
4832
4833 proto_unregister(&l2cap_proto);
4834 }
4835
4836 void l2cap_load(void)
4837 {
4838 /* Dummy function to trigger automatic L2CAP module loading by
4839 * other modules that use L2CAP sockets but don't use any other
4840 * symbols from it. */
4841 }
4842 EXPORT_SYMBOL(l2cap_load);
4843
4844 module_init(l2cap_init);
4845 module_exit(l2cap_exit);
4846
4847 module_param(enable_ertm, bool, 0644);
4848 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4849
4850 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4851 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4852 MODULE_VERSION(VERSION);
4853 MODULE_LICENSE("GPL");
4854 MODULE_ALIAS("bt-proto-0");