]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - net/bluetooth/l2cap.c
3396d5bdef1c3f8e0f24af0a64dd7b1eca7a333f
[mirror_ubuntu-kernels.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
57
58 #define VERSION "2.10"
59
60 static u32 l2cap_feat_mask = 0x0000;
61
62 static const struct proto_ops l2cap_sock_ops;
63
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 };
67
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
71
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
74
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
77 {
78 struct sock *sk = (struct sock *) arg;
79 int reason;
80
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
82
83 bh_lock_sock(sk);
84
85 if (sk->sk_state == BT_CONNECT &&
86 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
87 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
91
92 __l2cap_sock_close(sk, reason);
93
94 bh_unlock_sock(sk);
95
96 l2cap_sock_kill(sk);
97 sock_put(sk);
98 }
99
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 {
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
104 }
105
106 static void l2cap_sock_clear_timer(struct sock *sk)
107 {
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
110 }
111
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
114 {
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
119 }
120 return s;
121 }
122
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
124 {
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
129 }
130 return s;
131 }
132
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 {
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s) bh_lock_sock(s);
141 read_unlock(&l->lock);
142 return s;
143 }
144
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
146 {
147 struct sock *s;
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
150 break;
151 }
152 return s;
153 }
154
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 {
157 struct sock *s;
158 read_lock(&l->lock);
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s) bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163 }
164
165 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
166 {
167 u16 cid = 0x0040;
168
169 for (; cid < 0xffff; cid++) {
170 if(!__l2cap_get_chan_by_scid(l, cid))
171 return cid;
172 }
173
174 return 0;
175 }
176
177 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
178 {
179 sock_hold(sk);
180
181 if (l->head)
182 l2cap_pi(l->head)->prev_c = sk;
183
184 l2cap_pi(sk)->next_c = l->head;
185 l2cap_pi(sk)->prev_c = NULL;
186 l->head = sk;
187 }
188
189 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
190 {
191 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
192
193 write_lock_bh(&l->lock);
194 if (sk == l->head)
195 l->head = next;
196
197 if (next)
198 l2cap_pi(next)->prev_c = prev;
199 if (prev)
200 l2cap_pi(prev)->next_c = next;
201 write_unlock_bh(&l->lock);
202
203 __sock_put(sk);
204 }
205
206 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
207 {
208 struct l2cap_chan_list *l = &conn->chan_list;
209
210 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
211
212 l2cap_pi(sk)->conn = conn;
213
214 if (sk->sk_type == SOCK_SEQPACKET) {
215 /* Alloc CID for connection-oriented socket */
216 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
217 } else if (sk->sk_type == SOCK_DGRAM) {
218 /* Connectionless socket */
219 l2cap_pi(sk)->scid = 0x0002;
220 l2cap_pi(sk)->dcid = 0x0002;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 } else {
223 /* Raw socket can send/recv signalling messages only */
224 l2cap_pi(sk)->scid = 0x0001;
225 l2cap_pi(sk)->dcid = 0x0001;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 }
228
229 __l2cap_chan_link(l, sk);
230
231 if (parent)
232 bt_accept_enqueue(parent, sk);
233 }
234
235 /* Delete channel.
236 * Must be called on the locked socket. */
237 static void l2cap_chan_del(struct sock *sk, int err)
238 {
239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
240 struct sock *parent = bt_sk(sk)->parent;
241
242 l2cap_sock_clear_timer(sk);
243
244 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
245
246 if (conn) {
247 /* Unlink from channel list */
248 l2cap_chan_unlink(&conn->chan_list, sk);
249 l2cap_pi(sk)->conn = NULL;
250 hci_conn_put(conn->hcon);
251 }
252
253 sk->sk_state = BT_CLOSED;
254 sock_set_flag(sk, SOCK_ZAPPED);
255
256 if (err)
257 sk->sk_err = err;
258
259 if (parent) {
260 bt_accept_unlink(sk);
261 parent->sk_data_ready(parent, 0);
262 } else
263 sk->sk_state_change(sk);
264 }
265
266 /* Service level security */
267 static inline int l2cap_check_link_mode(struct sock *sk)
268 {
269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
270
271 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
272 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
273 return hci_conn_encrypt(conn->hcon);
274
275 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
276 return hci_conn_auth(conn->hcon);
277
278 return 1;
279 }
280
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
282 {
283 u8 id;
284
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
289 */
290
291 spin_lock_bh(&conn->lock);
292
293 if (++conn->tx_ident > 128)
294 conn->tx_ident = 1;
295
296 id = conn->tx_ident;
297
298 spin_unlock_bh(&conn->lock);
299
300 return id;
301 }
302
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
304 {
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
306
307 BT_DBG("code 0x%2.2x", code);
308
309 if (!skb)
310 return -ENOMEM;
311
312 return hci_send_acl(conn->hcon, skb, 0);
313 }
314
315 static void l2cap_do_start(struct sock *sk)
316 {
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
318
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
324
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
326
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
329 }
330 } else {
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
333
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
336
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
339
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
342 }
343 }
344
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
347 {
348 struct l2cap_chan_list *l = &conn->chan_list;
349 struct sock *sk;
350
351 BT_DBG("conn %p", conn);
352
353 read_lock(&l->lock);
354
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
356 bh_lock_sock(sk);
357
358 if (sk->sk_type != SOCK_SEQPACKET) {
359 bh_unlock_sock(sk);
360 continue;
361 }
362
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
368
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
370
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
373 }
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
378
379 if (l2cap_check_link_mode(sk)) {
380 sk->sk_state = BT_CONFIG;
381 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
382 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
383 } else {
384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
386 }
387
388 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
389 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
390 }
391
392 bh_unlock_sock(sk);
393 }
394
395 read_unlock(&l->lock);
396 }
397
398 static void l2cap_conn_ready(struct l2cap_conn *conn)
399 {
400 struct l2cap_chan_list *l = &conn->chan_list;
401 struct sock *sk;
402
403 BT_DBG("conn %p", conn);
404
405 read_lock(&l->lock);
406
407 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
408 bh_lock_sock(sk);
409
410 if (sk->sk_type != SOCK_SEQPACKET) {
411 l2cap_sock_clear_timer(sk);
412 sk->sk_state = BT_CONNECTED;
413 sk->sk_state_change(sk);
414 } else if (sk->sk_state == BT_CONNECT)
415 l2cap_do_start(sk);
416
417 bh_unlock_sock(sk);
418 }
419
420 read_unlock(&l->lock);
421 }
422
423 /* Notify sockets that we cannot guaranty reliability anymore */
424 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
425 {
426 struct l2cap_chan_list *l = &conn->chan_list;
427 struct sock *sk;
428
429 BT_DBG("conn %p", conn);
430
431 read_lock(&l->lock);
432
433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
434 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
435 sk->sk_err = err;
436 }
437
438 read_unlock(&l->lock);
439 }
440
441 static void l2cap_info_timeout(unsigned long arg)
442 {
443 struct l2cap_conn *conn = (void *) arg;
444
445 conn->info_ident = 0;
446
447 l2cap_conn_start(conn);
448 }
449
450 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
451 {
452 struct l2cap_conn *conn = hcon->l2cap_data;
453
454 if (conn || status)
455 return conn;
456
457 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
458 if (!conn)
459 return NULL;
460
461 hcon->l2cap_data = conn;
462 conn->hcon = hcon;
463
464 BT_DBG("hcon %p conn %p", hcon, conn);
465
466 conn->mtu = hcon->hdev->acl_mtu;
467 conn->src = &hcon->hdev->bdaddr;
468 conn->dst = &hcon->dst;
469
470 conn->feat_mask = 0;
471
472 setup_timer(&conn->info_timer, l2cap_info_timeout,
473 (unsigned long) conn);
474
475 spin_lock_init(&conn->lock);
476 rwlock_init(&conn->chan_list.lock);
477
478 return conn;
479 }
480
481 static void l2cap_conn_del(struct hci_conn *hcon, int err)
482 {
483 struct l2cap_conn *conn = hcon->l2cap_data;
484 struct sock *sk;
485
486 if (!conn)
487 return;
488
489 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
490
491 if (conn->rx_skb)
492 kfree_skb(conn->rx_skb);
493
494 /* Kill channels */
495 while ((sk = conn->chan_list.head)) {
496 bh_lock_sock(sk);
497 l2cap_chan_del(sk, err);
498 bh_unlock_sock(sk);
499 l2cap_sock_kill(sk);
500 }
501
502 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
503 del_timer_sync(&conn->info_timer);
504
505 hcon->l2cap_data = NULL;
506 kfree(conn);
507 }
508
509 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
510 {
511 struct l2cap_chan_list *l = &conn->chan_list;
512 write_lock_bh(&l->lock);
513 __l2cap_chan_add(conn, sk, parent);
514 write_unlock_bh(&l->lock);
515 }
516
517 /* ---- Socket interface ---- */
518 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
519 {
520 struct sock *sk;
521 struct hlist_node *node;
522 sk_for_each(sk, node, &l2cap_sk_list.head)
523 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
524 goto found;
525 sk = NULL;
526 found:
527 return sk;
528 }
529
530 /* Find socket with psm and source bdaddr.
531 * Returns closest match.
532 */
533 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
534 {
535 struct sock *sk = NULL, *sk1 = NULL;
536 struct hlist_node *node;
537
538 sk_for_each(sk, node, &l2cap_sk_list.head) {
539 if (state && sk->sk_state != state)
540 continue;
541
542 if (l2cap_pi(sk)->psm == psm) {
543 /* Exact match. */
544 if (!bacmp(&bt_sk(sk)->src, src))
545 break;
546
547 /* Closest match */
548 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
549 sk1 = sk;
550 }
551 }
552 return node ? sk : sk1;
553 }
554
555 /* Find socket with given address (psm, src).
556 * Returns locked socket */
557 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
558 {
559 struct sock *s;
560 read_lock(&l2cap_sk_list.lock);
561 s = __l2cap_get_sock_by_psm(state, psm, src);
562 if (s) bh_lock_sock(s);
563 read_unlock(&l2cap_sk_list.lock);
564 return s;
565 }
566
567 static void l2cap_sock_destruct(struct sock *sk)
568 {
569 BT_DBG("sk %p", sk);
570
571 skb_queue_purge(&sk->sk_receive_queue);
572 skb_queue_purge(&sk->sk_write_queue);
573 }
574
575 static void l2cap_sock_cleanup_listen(struct sock *parent)
576 {
577 struct sock *sk;
578
579 BT_DBG("parent %p", parent);
580
581 /* Close not yet accepted channels */
582 while ((sk = bt_accept_dequeue(parent, NULL)))
583 l2cap_sock_close(sk);
584
585 parent->sk_state = BT_CLOSED;
586 sock_set_flag(parent, SOCK_ZAPPED);
587 }
588
589 /* Kill socket (only if zapped and orphan)
590 * Must be called on unlocked socket.
591 */
592 static void l2cap_sock_kill(struct sock *sk)
593 {
594 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
595 return;
596
597 BT_DBG("sk %p state %d", sk, sk->sk_state);
598
599 /* Kill poor orphan */
600 bt_sock_unlink(&l2cap_sk_list, sk);
601 sock_set_flag(sk, SOCK_DEAD);
602 sock_put(sk);
603 }
604
605 static void __l2cap_sock_close(struct sock *sk, int reason)
606 {
607 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
608
609 switch (sk->sk_state) {
610 case BT_LISTEN:
611 l2cap_sock_cleanup_listen(sk);
612 break;
613
614 case BT_CONNECTED:
615 case BT_CONFIG:
616 case BT_CONNECT2:
617 if (sk->sk_type == SOCK_SEQPACKET) {
618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
619 struct l2cap_disconn_req req;
620
621 sk->sk_state = BT_DISCONN;
622 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
623
624 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
626 l2cap_send_cmd(conn, l2cap_get_ident(conn),
627 L2CAP_DISCONN_REQ, sizeof(req), &req);
628 } else
629 l2cap_chan_del(sk, reason);
630 break;
631
632 case BT_CONNECT:
633 case BT_DISCONN:
634 l2cap_chan_del(sk, reason);
635 break;
636
637 default:
638 sock_set_flag(sk, SOCK_ZAPPED);
639 break;
640 }
641 }
642
643 /* Must be called on unlocked socket. */
644 static void l2cap_sock_close(struct sock *sk)
645 {
646 l2cap_sock_clear_timer(sk);
647 lock_sock(sk);
648 __l2cap_sock_close(sk, ECONNRESET);
649 release_sock(sk);
650 l2cap_sock_kill(sk);
651 }
652
653 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
654 {
655 struct l2cap_pinfo *pi = l2cap_pi(sk);
656
657 BT_DBG("sk %p", sk);
658
659 if (parent) {
660 sk->sk_type = parent->sk_type;
661 pi->imtu = l2cap_pi(parent)->imtu;
662 pi->omtu = l2cap_pi(parent)->omtu;
663 pi->link_mode = l2cap_pi(parent)->link_mode;
664 } else {
665 pi->imtu = L2CAP_DEFAULT_MTU;
666 pi->omtu = 0;
667 pi->link_mode = 0;
668 }
669
670 /* Default config options */
671 pi->conf_len = 0;
672 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
673 }
674
675 static struct proto l2cap_proto = {
676 .name = "L2CAP",
677 .owner = THIS_MODULE,
678 .obj_size = sizeof(struct l2cap_pinfo)
679 };
680
681 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
682 {
683 struct sock *sk;
684
685 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
686 if (!sk)
687 return NULL;
688
689 sock_init_data(sock, sk);
690 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
691
692 sk->sk_destruct = l2cap_sock_destruct;
693 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
694
695 sock_reset_flag(sk, SOCK_ZAPPED);
696
697 sk->sk_protocol = proto;
698 sk->sk_state = BT_OPEN;
699
700 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
701
702 bt_sock_link(&l2cap_sk_list, sk);
703 return sk;
704 }
705
706 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
707 {
708 struct sock *sk;
709
710 BT_DBG("sock %p", sock);
711
712 sock->state = SS_UNCONNECTED;
713
714 if (sock->type != SOCK_SEQPACKET &&
715 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
716 return -ESOCKTNOSUPPORT;
717
718 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
719 return -EPERM;
720
721 sock->ops = &l2cap_sock_ops;
722
723 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
724 if (!sk)
725 return -ENOMEM;
726
727 l2cap_sock_init(sk, NULL);
728 return 0;
729 }
730
731 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
732 {
733 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
734 struct sock *sk = sock->sk;
735 int err = 0;
736
737 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
738
739 if (!addr || addr->sa_family != AF_BLUETOOTH)
740 return -EINVAL;
741
742 lock_sock(sk);
743
744 if (sk->sk_state != BT_OPEN) {
745 err = -EBADFD;
746 goto done;
747 }
748
749 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
750 !capable(CAP_NET_BIND_SERVICE)) {
751 err = -EACCES;
752 goto done;
753 }
754
755 write_lock_bh(&l2cap_sk_list.lock);
756
757 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
758 err = -EADDRINUSE;
759 } else {
760 /* Save source address */
761 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
762 l2cap_pi(sk)->psm = la->l2_psm;
763 l2cap_pi(sk)->sport = la->l2_psm;
764 sk->sk_state = BT_BOUND;
765 }
766
767 write_unlock_bh(&l2cap_sk_list.lock);
768
769 done:
770 release_sock(sk);
771 return err;
772 }
773
774 static int l2cap_do_connect(struct sock *sk)
775 {
776 bdaddr_t *src = &bt_sk(sk)->src;
777 bdaddr_t *dst = &bt_sk(sk)->dst;
778 struct l2cap_conn *conn;
779 struct hci_conn *hcon;
780 struct hci_dev *hdev;
781 int err = 0;
782
783 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
784
785 if (!(hdev = hci_get_route(dst, src)))
786 return -EHOSTUNREACH;
787
788 hci_dev_lock_bh(hdev);
789
790 err = -ENOMEM;
791
792 hcon = hci_connect(hdev, ACL_LINK, dst);
793 if (!hcon)
794 goto done;
795
796 conn = l2cap_conn_add(hcon, 0);
797 if (!conn) {
798 hci_conn_put(hcon);
799 goto done;
800 }
801
802 err = 0;
803
804 /* Update source addr of the socket */
805 bacpy(src, conn->src);
806
807 l2cap_chan_add(conn, sk, NULL);
808
809 sk->sk_state = BT_CONNECT;
810 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
811
812 if (hcon->state == BT_CONNECTED) {
813 if (sk->sk_type != SOCK_SEQPACKET) {
814 l2cap_sock_clear_timer(sk);
815 sk->sk_state = BT_CONNECTED;
816 } else
817 l2cap_do_start(sk);
818 }
819
820 done:
821 hci_dev_unlock_bh(hdev);
822 hci_dev_put(hdev);
823 return err;
824 }
825
826 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
827 {
828 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
829 struct sock *sk = sock->sk;
830 int err = 0;
831
832 lock_sock(sk);
833
834 BT_DBG("sk %p", sk);
835
836 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
837 err = -EINVAL;
838 goto done;
839 }
840
841 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
842 err = -EINVAL;
843 goto done;
844 }
845
846 switch(sk->sk_state) {
847 case BT_CONNECT:
848 case BT_CONNECT2:
849 case BT_CONFIG:
850 /* Already connecting */
851 goto wait;
852
853 case BT_CONNECTED:
854 /* Already connected */
855 goto done;
856
857 case BT_OPEN:
858 case BT_BOUND:
859 /* Can connect */
860 break;
861
862 default:
863 err = -EBADFD;
864 goto done;
865 }
866
867 /* Set destination address and psm */
868 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
869 l2cap_pi(sk)->psm = la->l2_psm;
870
871 if ((err = l2cap_do_connect(sk)))
872 goto done;
873
874 wait:
875 err = bt_sock_wait_state(sk, BT_CONNECTED,
876 sock_sndtimeo(sk, flags & O_NONBLOCK));
877 done:
878 release_sock(sk);
879 return err;
880 }
881
882 static int l2cap_sock_listen(struct socket *sock, int backlog)
883 {
884 struct sock *sk = sock->sk;
885 int err = 0;
886
887 BT_DBG("sk %p backlog %d", sk, backlog);
888
889 lock_sock(sk);
890
891 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
892 err = -EBADFD;
893 goto done;
894 }
895
896 if (!l2cap_pi(sk)->psm) {
897 bdaddr_t *src = &bt_sk(sk)->src;
898 u16 psm;
899
900 err = -EINVAL;
901
902 write_lock_bh(&l2cap_sk_list.lock);
903
904 for (psm = 0x1001; psm < 0x1100; psm += 2)
905 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
906 l2cap_pi(sk)->psm = htobs(psm);
907 l2cap_pi(sk)->sport = htobs(psm);
908 err = 0;
909 break;
910 }
911
912 write_unlock_bh(&l2cap_sk_list.lock);
913
914 if (err < 0)
915 goto done;
916 }
917
918 sk->sk_max_ack_backlog = backlog;
919 sk->sk_ack_backlog = 0;
920 sk->sk_state = BT_LISTEN;
921
922 done:
923 release_sock(sk);
924 return err;
925 }
926
927 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
928 {
929 DECLARE_WAITQUEUE(wait, current);
930 struct sock *sk = sock->sk, *nsk;
931 long timeo;
932 int err = 0;
933
934 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
935
936 if (sk->sk_state != BT_LISTEN) {
937 err = -EBADFD;
938 goto done;
939 }
940
941 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
942
943 BT_DBG("sk %p timeo %ld", sk, timeo);
944
945 /* Wait for an incoming connection. (wake-one). */
946 add_wait_queue_exclusive(sk->sk_sleep, &wait);
947 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
948 set_current_state(TASK_INTERRUPTIBLE);
949 if (!timeo) {
950 err = -EAGAIN;
951 break;
952 }
953
954 release_sock(sk);
955 timeo = schedule_timeout(timeo);
956 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
957
958 if (sk->sk_state != BT_LISTEN) {
959 err = -EBADFD;
960 break;
961 }
962
963 if (signal_pending(current)) {
964 err = sock_intr_errno(timeo);
965 break;
966 }
967 }
968 set_current_state(TASK_RUNNING);
969 remove_wait_queue(sk->sk_sleep, &wait);
970
971 if (err)
972 goto done;
973
974 newsock->state = SS_CONNECTED;
975
976 BT_DBG("new socket %p", nsk);
977
978 done:
979 release_sock(sk);
980 return err;
981 }
982
983 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
984 {
985 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
986 struct sock *sk = sock->sk;
987
988 BT_DBG("sock %p, sk %p", sock, sk);
989
990 addr->sa_family = AF_BLUETOOTH;
991 *len = sizeof(struct sockaddr_l2);
992
993 if (peer)
994 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
995 else
996 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
997
998 la->l2_psm = l2cap_pi(sk)->psm;
999 return 0;
1000 }
1001
1002 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1003 {
1004 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1005 struct sk_buff *skb, **frag;
1006 int err, hlen, count, sent=0;
1007 struct l2cap_hdr *lh;
1008
1009 BT_DBG("sk %p len %d", sk, len);
1010
1011 /* First fragment (with L2CAP header) */
1012 if (sk->sk_type == SOCK_DGRAM)
1013 hlen = L2CAP_HDR_SIZE + 2;
1014 else
1015 hlen = L2CAP_HDR_SIZE;
1016
1017 count = min_t(unsigned int, (conn->mtu - hlen), len);
1018
1019 skb = bt_skb_send_alloc(sk, hlen + count,
1020 msg->msg_flags & MSG_DONTWAIT, &err);
1021 if (!skb)
1022 return err;
1023
1024 /* Create L2CAP header */
1025 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1026 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1027 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1028
1029 if (sk->sk_type == SOCK_DGRAM)
1030 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1031
1032 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1033 err = -EFAULT;
1034 goto fail;
1035 }
1036
1037 sent += count;
1038 len -= count;
1039
1040 /* Continuation fragments (no L2CAP header) */
1041 frag = &skb_shinfo(skb)->frag_list;
1042 while (len) {
1043 count = min_t(unsigned int, conn->mtu, len);
1044
1045 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1046 if (!*frag)
1047 goto fail;
1048
1049 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1050 err = -EFAULT;
1051 goto fail;
1052 }
1053
1054 sent += count;
1055 len -= count;
1056
1057 frag = &(*frag)->next;
1058 }
1059
1060 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1061 goto fail;
1062
1063 return sent;
1064
1065 fail:
1066 kfree_skb(skb);
1067 return err;
1068 }
1069
1070 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1071 {
1072 struct sock *sk = sock->sk;
1073 int err = 0;
1074
1075 BT_DBG("sock %p, sk %p", sock, sk);
1076
1077 err = sock_error(sk);
1078 if (err)
1079 return err;
1080
1081 if (msg->msg_flags & MSG_OOB)
1082 return -EOPNOTSUPP;
1083
1084 /* Check outgoing MTU */
1085 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1086 return -EINVAL;
1087
1088 lock_sock(sk);
1089
1090 if (sk->sk_state == BT_CONNECTED)
1091 err = l2cap_do_send(sk, msg, len);
1092 else
1093 err = -ENOTCONN;
1094
1095 release_sock(sk);
1096 return err;
1097 }
1098
1099 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1100 {
1101 struct sock *sk = sock->sk;
1102 struct l2cap_options opts;
1103 int err = 0, len;
1104 u32 opt;
1105
1106 BT_DBG("sk %p", sk);
1107
1108 lock_sock(sk);
1109
1110 switch (optname) {
1111 case L2CAP_OPTIONS:
1112 opts.imtu = l2cap_pi(sk)->imtu;
1113 opts.omtu = l2cap_pi(sk)->omtu;
1114 opts.flush_to = l2cap_pi(sk)->flush_to;
1115 opts.mode = L2CAP_MODE_BASIC;
1116
1117 len = min_t(unsigned int, sizeof(opts), optlen);
1118 if (copy_from_user((char *) &opts, optval, len)) {
1119 err = -EFAULT;
1120 break;
1121 }
1122
1123 l2cap_pi(sk)->imtu = opts.imtu;
1124 l2cap_pi(sk)->omtu = opts.omtu;
1125 break;
1126
1127 case L2CAP_LM:
1128 if (get_user(opt, (u32 __user *) optval)) {
1129 err = -EFAULT;
1130 break;
1131 }
1132
1133 l2cap_pi(sk)->link_mode = opt;
1134 break;
1135
1136 default:
1137 err = -ENOPROTOOPT;
1138 break;
1139 }
1140
1141 release_sock(sk);
1142 return err;
1143 }
1144
1145 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1146 {
1147 struct sock *sk = sock->sk;
1148 struct l2cap_options opts;
1149 struct l2cap_conninfo cinfo;
1150 int len, err = 0;
1151
1152 BT_DBG("sk %p", sk);
1153
1154 if (get_user(len, optlen))
1155 return -EFAULT;
1156
1157 lock_sock(sk);
1158
1159 switch (optname) {
1160 case L2CAP_OPTIONS:
1161 opts.imtu = l2cap_pi(sk)->imtu;
1162 opts.omtu = l2cap_pi(sk)->omtu;
1163 opts.flush_to = l2cap_pi(sk)->flush_to;
1164 opts.mode = L2CAP_MODE_BASIC;
1165
1166 len = min_t(unsigned int, len, sizeof(opts));
1167 if (copy_to_user(optval, (char *) &opts, len))
1168 err = -EFAULT;
1169
1170 break;
1171
1172 case L2CAP_LM:
1173 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1174 err = -EFAULT;
1175 break;
1176
1177 case L2CAP_CONNINFO:
1178 if (sk->sk_state != BT_CONNECTED) {
1179 err = -ENOTCONN;
1180 break;
1181 }
1182
1183 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1184 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1185
1186 len = min_t(unsigned int, len, sizeof(cinfo));
1187 if (copy_to_user(optval, (char *) &cinfo, len))
1188 err = -EFAULT;
1189
1190 break;
1191
1192 default:
1193 err = -ENOPROTOOPT;
1194 break;
1195 }
1196
1197 release_sock(sk);
1198 return err;
1199 }
1200
1201 static int l2cap_sock_shutdown(struct socket *sock, int how)
1202 {
1203 struct sock *sk = sock->sk;
1204 int err = 0;
1205
1206 BT_DBG("sock %p, sk %p", sock, sk);
1207
1208 if (!sk)
1209 return 0;
1210
1211 lock_sock(sk);
1212 if (!sk->sk_shutdown) {
1213 sk->sk_shutdown = SHUTDOWN_MASK;
1214 l2cap_sock_clear_timer(sk);
1215 __l2cap_sock_close(sk, 0);
1216
1217 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1218 err = bt_sock_wait_state(sk, BT_CLOSED,
1219 sk->sk_lingertime);
1220 }
1221 release_sock(sk);
1222 return err;
1223 }
1224
1225 static int l2cap_sock_release(struct socket *sock)
1226 {
1227 struct sock *sk = sock->sk;
1228 int err;
1229
1230 BT_DBG("sock %p, sk %p", sock, sk);
1231
1232 if (!sk)
1233 return 0;
1234
1235 err = l2cap_sock_shutdown(sock, 2);
1236
1237 sock_orphan(sk);
1238 l2cap_sock_kill(sk);
1239 return err;
1240 }
1241
1242 static void l2cap_chan_ready(struct sock *sk)
1243 {
1244 struct sock *parent = bt_sk(sk)->parent;
1245
1246 BT_DBG("sk %p, parent %p", sk, parent);
1247
1248 l2cap_pi(sk)->conf_state = 0;
1249 l2cap_sock_clear_timer(sk);
1250
1251 if (!parent) {
1252 /* Outgoing channel.
1253 * Wake up socket sleeping on connect.
1254 */
1255 sk->sk_state = BT_CONNECTED;
1256 sk->sk_state_change(sk);
1257 } else {
1258 /* Incoming channel.
1259 * Wake up socket sleeping on accept.
1260 */
1261 parent->sk_data_ready(parent, 0);
1262 }
1263
1264 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1266 hci_conn_change_link_key(conn->hcon);
1267 }
1268 }
1269
1270 /* Copy frame to all raw sockets on that connection */
1271 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1272 {
1273 struct l2cap_chan_list *l = &conn->chan_list;
1274 struct sk_buff *nskb;
1275 struct sock * sk;
1276
1277 BT_DBG("conn %p", conn);
1278
1279 read_lock(&l->lock);
1280 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1281 if (sk->sk_type != SOCK_RAW)
1282 continue;
1283
1284 /* Don't send frame to the socket it came from */
1285 if (skb->sk == sk)
1286 continue;
1287
1288 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1289 continue;
1290
1291 if (sock_queue_rcv_skb(sk, nskb))
1292 kfree_skb(nskb);
1293 }
1294 read_unlock(&l->lock);
1295 }
1296
1297 /* ---- L2CAP signalling commands ---- */
1298 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1299 u8 code, u8 ident, u16 dlen, void *data)
1300 {
1301 struct sk_buff *skb, **frag;
1302 struct l2cap_cmd_hdr *cmd;
1303 struct l2cap_hdr *lh;
1304 int len, count;
1305
1306 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1307
1308 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1309 count = min_t(unsigned int, conn->mtu, len);
1310
1311 skb = bt_skb_alloc(count, GFP_ATOMIC);
1312 if (!skb)
1313 return NULL;
1314
1315 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1316 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1317 lh->cid = cpu_to_le16(0x0001);
1318
1319 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1320 cmd->code = code;
1321 cmd->ident = ident;
1322 cmd->len = cpu_to_le16(dlen);
1323
1324 if (dlen) {
1325 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1326 memcpy(skb_put(skb, count), data, count);
1327 data += count;
1328 }
1329
1330 len -= skb->len;
1331
1332 /* Continuation fragments (no L2CAP header) */
1333 frag = &skb_shinfo(skb)->frag_list;
1334 while (len) {
1335 count = min_t(unsigned int, conn->mtu, len);
1336
1337 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1338 if (!*frag)
1339 goto fail;
1340
1341 memcpy(skb_put(*frag, count), data, count);
1342
1343 len -= count;
1344 data += count;
1345
1346 frag = &(*frag)->next;
1347 }
1348
1349 return skb;
1350
1351 fail:
1352 kfree_skb(skb);
1353 return NULL;
1354 }
1355
1356 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1357 {
1358 struct l2cap_conf_opt *opt = *ptr;
1359 int len;
1360
1361 len = L2CAP_CONF_OPT_SIZE + opt->len;
1362 *ptr += len;
1363
1364 *type = opt->type;
1365 *olen = opt->len;
1366
1367 switch (opt->len) {
1368 case 1:
1369 *val = *((u8 *) opt->val);
1370 break;
1371
1372 case 2:
1373 *val = __le16_to_cpu(*((__le16 *) opt->val));
1374 break;
1375
1376 case 4:
1377 *val = __le32_to_cpu(*((__le32 *) opt->val));
1378 break;
1379
1380 default:
1381 *val = (unsigned long) opt->val;
1382 break;
1383 }
1384
1385 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1386 return len;
1387 }
1388
1389 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1390 {
1391 struct l2cap_conf_opt *opt = *ptr;
1392
1393 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1394
1395 opt->type = type;
1396 opt->len = len;
1397
1398 switch (len) {
1399 case 1:
1400 *((u8 *) opt->val) = val;
1401 break;
1402
1403 case 2:
1404 *((__le16 *) opt->val) = cpu_to_le16(val);
1405 break;
1406
1407 case 4:
1408 *((__le32 *) opt->val) = cpu_to_le32(val);
1409 break;
1410
1411 default:
1412 memcpy(opt->val, (void *) val, len);
1413 break;
1414 }
1415
1416 *ptr += L2CAP_CONF_OPT_SIZE + len;
1417 }
1418
1419 static int l2cap_build_conf_req(struct sock *sk, void *data)
1420 {
1421 struct l2cap_pinfo *pi = l2cap_pi(sk);
1422 struct l2cap_conf_req *req = data;
1423 void *ptr = req->data;
1424
1425 BT_DBG("sk %p", sk);
1426
1427 if (pi->imtu != L2CAP_DEFAULT_MTU)
1428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1429
1430 /* FIXME: Need actual value of the flush timeout */
1431 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1432 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1433
1434 req->dcid = cpu_to_le16(pi->dcid);
1435 req->flags = cpu_to_le16(0);
1436
1437 return ptr - data;
1438 }
1439
1440 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1441 {
1442 struct l2cap_pinfo *pi = l2cap_pi(sk);
1443 struct l2cap_conf_rsp *rsp = data;
1444 void *ptr = rsp->data;
1445 void *req = pi->conf_req;
1446 int len = pi->conf_len;
1447 int type, hint, olen;
1448 unsigned long val;
1449 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1450 u16 mtu = L2CAP_DEFAULT_MTU;
1451 u16 result = L2CAP_CONF_SUCCESS;
1452
1453 BT_DBG("sk %p", sk);
1454
1455 while (len >= L2CAP_CONF_OPT_SIZE) {
1456 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1457
1458 hint = type & 0x80;
1459 type &= 0x7f;
1460
1461 switch (type) {
1462 case L2CAP_CONF_MTU:
1463 mtu = val;
1464 break;
1465
1466 case L2CAP_CONF_FLUSH_TO:
1467 pi->flush_to = val;
1468 break;
1469
1470 case L2CAP_CONF_QOS:
1471 break;
1472
1473 case L2CAP_CONF_RFC:
1474 if (olen == sizeof(rfc))
1475 memcpy(&rfc, (void *) val, olen);
1476 break;
1477
1478 default:
1479 if (hint)
1480 break;
1481
1482 result = L2CAP_CONF_UNKNOWN;
1483 *((u8 *) ptr++) = type;
1484 break;
1485 }
1486 }
1487
1488 if (result == L2CAP_CONF_SUCCESS) {
1489 /* Configure output options and let the other side know
1490 * which ones we don't like. */
1491
1492 if (rfc.mode == L2CAP_MODE_BASIC) {
1493 if (mtu < pi->omtu)
1494 result = L2CAP_CONF_UNACCEPT;
1495 else {
1496 pi->omtu = mtu;
1497 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1498 }
1499
1500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1501 } else {
1502 result = L2CAP_CONF_UNACCEPT;
1503
1504 memset(&rfc, 0, sizeof(rfc));
1505 rfc.mode = L2CAP_MODE_BASIC;
1506
1507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1508 sizeof(rfc), (unsigned long) &rfc);
1509 }
1510 }
1511
1512 rsp->scid = cpu_to_le16(pi->dcid);
1513 rsp->result = cpu_to_le16(result);
1514 rsp->flags = cpu_to_le16(0x0000);
1515
1516 return ptr - data;
1517 }
1518
1519 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1520 {
1521 struct l2cap_conf_rsp *rsp = data;
1522 void *ptr = rsp->data;
1523
1524 BT_DBG("sk %p", sk);
1525
1526 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1527 rsp->result = cpu_to_le16(result);
1528 rsp->flags = cpu_to_le16(flags);
1529
1530 return ptr - data;
1531 }
1532
1533 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1534 {
1535 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1536
1537 if (rej->reason != 0x0000)
1538 return 0;
1539
1540 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1541 cmd->ident == conn->info_ident) {
1542 conn->info_ident = 0;
1543 del_timer(&conn->info_timer);
1544 l2cap_conn_start(conn);
1545 }
1546
1547 return 0;
1548 }
1549
1550 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1551 {
1552 struct l2cap_chan_list *list = &conn->chan_list;
1553 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1554 struct l2cap_conn_rsp rsp;
1555 struct sock *sk, *parent;
1556 int result, status = 0;
1557
1558 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1559 __le16 psm = req->psm;
1560
1561 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1562
1563 /* Check if we have socket listening on psm */
1564 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1565 if (!parent) {
1566 result = L2CAP_CR_BAD_PSM;
1567 goto sendresp;
1568 }
1569
1570 result = L2CAP_CR_NO_MEM;
1571
1572 /* Check for backlog size */
1573 if (sk_acceptq_is_full(parent)) {
1574 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1575 goto response;
1576 }
1577
1578 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1579 if (!sk)
1580 goto response;
1581
1582 write_lock_bh(&list->lock);
1583
1584 /* Check if we already have channel with that dcid */
1585 if (__l2cap_get_chan_by_dcid(list, scid)) {
1586 write_unlock_bh(&list->lock);
1587 sock_set_flag(sk, SOCK_ZAPPED);
1588 l2cap_sock_kill(sk);
1589 goto response;
1590 }
1591
1592 hci_conn_hold(conn->hcon);
1593
1594 l2cap_sock_init(sk, parent);
1595 bacpy(&bt_sk(sk)->src, conn->src);
1596 bacpy(&bt_sk(sk)->dst, conn->dst);
1597 l2cap_pi(sk)->psm = psm;
1598 l2cap_pi(sk)->dcid = scid;
1599
1600 __l2cap_chan_add(conn, sk, parent);
1601 dcid = l2cap_pi(sk)->scid;
1602
1603 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1604
1605 l2cap_pi(sk)->ident = cmd->ident;
1606
1607 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1608 if (l2cap_check_link_mode(sk)) {
1609 sk->sk_state = BT_CONFIG;
1610 result = L2CAP_CR_SUCCESS;
1611 status = L2CAP_CS_NO_INFO;
1612 } else {
1613 sk->sk_state = BT_CONNECT2;
1614 result = L2CAP_CR_PEND;
1615 status = L2CAP_CS_AUTHEN_PEND;
1616 }
1617 } else {
1618 sk->sk_state = BT_CONNECT2;
1619 result = L2CAP_CR_PEND;
1620 status = L2CAP_CS_NO_INFO;
1621 }
1622
1623 write_unlock_bh(&list->lock);
1624
1625 response:
1626 bh_unlock_sock(parent);
1627
1628 sendresp:
1629 rsp.scid = cpu_to_le16(scid);
1630 rsp.dcid = cpu_to_le16(dcid);
1631 rsp.result = cpu_to_le16(result);
1632 rsp.status = cpu_to_le16(status);
1633 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1634
1635 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1636 struct l2cap_info_req info;
1637 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1638
1639 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1640 conn->info_ident = l2cap_get_ident(conn);
1641
1642 mod_timer(&conn->info_timer, jiffies +
1643 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1644
1645 l2cap_send_cmd(conn, conn->info_ident,
1646 L2CAP_INFO_REQ, sizeof(info), &info);
1647 }
1648
1649 return 0;
1650 }
1651
1652 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1653 {
1654 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1655 u16 scid, dcid, result, status;
1656 struct sock *sk;
1657 u8 req[128];
1658
1659 scid = __le16_to_cpu(rsp->scid);
1660 dcid = __le16_to_cpu(rsp->dcid);
1661 result = __le16_to_cpu(rsp->result);
1662 status = __le16_to_cpu(rsp->status);
1663
1664 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1665
1666 if (scid) {
1667 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1668 return 0;
1669 } else {
1670 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1671 return 0;
1672 }
1673
1674 switch (result) {
1675 case L2CAP_CR_SUCCESS:
1676 sk->sk_state = BT_CONFIG;
1677 l2cap_pi(sk)->ident = 0;
1678 l2cap_pi(sk)->dcid = dcid;
1679 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1680
1681 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1682 l2cap_build_conf_req(sk, req), req);
1683 break;
1684
1685 case L2CAP_CR_PEND:
1686 break;
1687
1688 default:
1689 l2cap_chan_del(sk, ECONNREFUSED);
1690 break;
1691 }
1692
1693 bh_unlock_sock(sk);
1694 return 0;
1695 }
1696
1697 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1698 {
1699 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1700 u16 dcid, flags;
1701 u8 rsp[64];
1702 struct sock *sk;
1703 int len;
1704
1705 dcid = __le16_to_cpu(req->dcid);
1706 flags = __le16_to_cpu(req->flags);
1707
1708 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1709
1710 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1711 return -ENOENT;
1712
1713 if (sk->sk_state == BT_DISCONN)
1714 goto unlock;
1715
1716 /* Reject if config buffer is too small. */
1717 len = cmd_len - sizeof(*req);
1718 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1719 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1720 l2cap_build_conf_rsp(sk, rsp,
1721 L2CAP_CONF_REJECT, flags), rsp);
1722 goto unlock;
1723 }
1724
1725 /* Store config. */
1726 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1727 l2cap_pi(sk)->conf_len += len;
1728
1729 if (flags & 0x0001) {
1730 /* Incomplete config. Send empty response. */
1731 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1732 l2cap_build_conf_rsp(sk, rsp,
1733 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1734 goto unlock;
1735 }
1736
1737 /* Complete config. */
1738 len = l2cap_parse_conf_req(sk, rsp);
1739 if (len < 0)
1740 goto unlock;
1741
1742 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1743
1744 /* Reset config buffer. */
1745 l2cap_pi(sk)->conf_len = 0;
1746
1747 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1748 goto unlock;
1749
1750 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1751 sk->sk_state = BT_CONNECTED;
1752 l2cap_chan_ready(sk);
1753 goto unlock;
1754 }
1755
1756 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1757 u8 buf[64];
1758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1759 l2cap_build_conf_req(sk, buf), buf);
1760 }
1761
1762 unlock:
1763 bh_unlock_sock(sk);
1764 return 0;
1765 }
1766
1767 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1768 {
1769 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1770 u16 scid, flags, result;
1771 struct sock *sk;
1772
1773 scid = __le16_to_cpu(rsp->scid);
1774 flags = __le16_to_cpu(rsp->flags);
1775 result = __le16_to_cpu(rsp->result);
1776
1777 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1778
1779 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1780 return 0;
1781
1782 switch (result) {
1783 case L2CAP_CONF_SUCCESS:
1784 break;
1785
1786 case L2CAP_CONF_UNACCEPT:
1787 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1788 char req[128];
1789 /* It does not make sense to adjust L2CAP parameters
1790 * that are currently defined in the spec. We simply
1791 * resend config request that we sent earlier. It is
1792 * stupid, but it helps qualification testing which
1793 * expects at least some response from us. */
1794 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1795 l2cap_build_conf_req(sk, req), req);
1796 goto done;
1797 }
1798
1799 default:
1800 sk->sk_state = BT_DISCONN;
1801 sk->sk_err = ECONNRESET;
1802 l2cap_sock_set_timer(sk, HZ * 5);
1803 {
1804 struct l2cap_disconn_req req;
1805 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1806 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1807 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1808 L2CAP_DISCONN_REQ, sizeof(req), &req);
1809 }
1810 goto done;
1811 }
1812
1813 if (flags & 0x01)
1814 goto done;
1815
1816 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1817
1818 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1819 sk->sk_state = BT_CONNECTED;
1820 l2cap_chan_ready(sk);
1821 }
1822
1823 done:
1824 bh_unlock_sock(sk);
1825 return 0;
1826 }
1827
1828 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1829 {
1830 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1831 struct l2cap_disconn_rsp rsp;
1832 u16 dcid, scid;
1833 struct sock *sk;
1834
1835 scid = __le16_to_cpu(req->scid);
1836 dcid = __le16_to_cpu(req->dcid);
1837
1838 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1839
1840 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1841 return 0;
1842
1843 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1844 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1845 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1846
1847 sk->sk_shutdown = SHUTDOWN_MASK;
1848
1849 l2cap_chan_del(sk, ECONNRESET);
1850 bh_unlock_sock(sk);
1851
1852 l2cap_sock_kill(sk);
1853 return 0;
1854 }
1855
1856 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1857 {
1858 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1859 u16 dcid, scid;
1860 struct sock *sk;
1861
1862 scid = __le16_to_cpu(rsp->scid);
1863 dcid = __le16_to_cpu(rsp->dcid);
1864
1865 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1866
1867 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1868 return 0;
1869
1870 l2cap_chan_del(sk, 0);
1871 bh_unlock_sock(sk);
1872
1873 l2cap_sock_kill(sk);
1874 return 0;
1875 }
1876
1877 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1878 {
1879 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1880 u16 type;
1881
1882 type = __le16_to_cpu(req->type);
1883
1884 BT_DBG("type 0x%4.4x", type);
1885
1886 if (type == L2CAP_IT_FEAT_MASK) {
1887 u8 buf[8];
1888 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1889 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1890 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1891 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1892 l2cap_send_cmd(conn, cmd->ident,
1893 L2CAP_INFO_RSP, sizeof(buf), buf);
1894 } else {
1895 struct l2cap_info_rsp rsp;
1896 rsp.type = cpu_to_le16(type);
1897 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1898 l2cap_send_cmd(conn, cmd->ident,
1899 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1900 }
1901
1902 return 0;
1903 }
1904
1905 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1906 {
1907 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1908 u16 type, result;
1909
1910 type = __le16_to_cpu(rsp->type);
1911 result = __le16_to_cpu(rsp->result);
1912
1913 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1914
1915 conn->info_ident = 0;
1916
1917 del_timer(&conn->info_timer);
1918
1919 if (type == L2CAP_IT_FEAT_MASK)
1920 conn->feat_mask = get_unaligned_le32(rsp->data);
1921
1922 l2cap_conn_start(conn);
1923
1924 return 0;
1925 }
1926
1927 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1928 {
1929 u8 *data = skb->data;
1930 int len = skb->len;
1931 struct l2cap_cmd_hdr cmd;
1932 int err = 0;
1933
1934 l2cap_raw_recv(conn, skb);
1935
1936 while (len >= L2CAP_CMD_HDR_SIZE) {
1937 u16 cmd_len;
1938 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1939 data += L2CAP_CMD_HDR_SIZE;
1940 len -= L2CAP_CMD_HDR_SIZE;
1941
1942 cmd_len = le16_to_cpu(cmd.len);
1943
1944 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1945
1946 if (cmd_len > len || !cmd.ident) {
1947 BT_DBG("corrupted command");
1948 break;
1949 }
1950
1951 switch (cmd.code) {
1952 case L2CAP_COMMAND_REJ:
1953 l2cap_command_rej(conn, &cmd, data);
1954 break;
1955
1956 case L2CAP_CONN_REQ:
1957 err = l2cap_connect_req(conn, &cmd, data);
1958 break;
1959
1960 case L2CAP_CONN_RSP:
1961 err = l2cap_connect_rsp(conn, &cmd, data);
1962 break;
1963
1964 case L2CAP_CONF_REQ:
1965 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1966 break;
1967
1968 case L2CAP_CONF_RSP:
1969 err = l2cap_config_rsp(conn, &cmd, data);
1970 break;
1971
1972 case L2CAP_DISCONN_REQ:
1973 err = l2cap_disconnect_req(conn, &cmd, data);
1974 break;
1975
1976 case L2CAP_DISCONN_RSP:
1977 err = l2cap_disconnect_rsp(conn, &cmd, data);
1978 break;
1979
1980 case L2CAP_ECHO_REQ:
1981 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1982 break;
1983
1984 case L2CAP_ECHO_RSP:
1985 break;
1986
1987 case L2CAP_INFO_REQ:
1988 err = l2cap_information_req(conn, &cmd, data);
1989 break;
1990
1991 case L2CAP_INFO_RSP:
1992 err = l2cap_information_rsp(conn, &cmd, data);
1993 break;
1994
1995 default:
1996 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1997 err = -EINVAL;
1998 break;
1999 }
2000
2001 if (err) {
2002 struct l2cap_cmd_rej rej;
2003 BT_DBG("error %d", err);
2004
2005 /* FIXME: Map err to a valid reason */
2006 rej.reason = cpu_to_le16(0);
2007 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2008 }
2009
2010 data += cmd_len;
2011 len -= cmd_len;
2012 }
2013
2014 kfree_skb(skb);
2015 }
2016
2017 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2018 {
2019 struct sock *sk;
2020
2021 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2022 if (!sk) {
2023 BT_DBG("unknown cid 0x%4.4x", cid);
2024 goto drop;
2025 }
2026
2027 BT_DBG("sk %p, len %d", sk, skb->len);
2028
2029 if (sk->sk_state != BT_CONNECTED)
2030 goto drop;
2031
2032 if (l2cap_pi(sk)->imtu < skb->len)
2033 goto drop;
2034
2035 /* If socket recv buffers overflows we drop data here
2036 * which is *bad* because L2CAP has to be reliable.
2037 * But we don't have any other choice. L2CAP doesn't
2038 * provide flow control mechanism. */
2039
2040 if (!sock_queue_rcv_skb(sk, skb))
2041 goto done;
2042
2043 drop:
2044 kfree_skb(skb);
2045
2046 done:
2047 if (sk)
2048 bh_unlock_sock(sk);
2049
2050 return 0;
2051 }
2052
2053 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2054 {
2055 struct sock *sk;
2056
2057 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2058 if (!sk)
2059 goto drop;
2060
2061 BT_DBG("sk %p, len %d", sk, skb->len);
2062
2063 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2064 goto drop;
2065
2066 if (l2cap_pi(sk)->imtu < skb->len)
2067 goto drop;
2068
2069 if (!sock_queue_rcv_skb(sk, skb))
2070 goto done;
2071
2072 drop:
2073 kfree_skb(skb);
2074
2075 done:
2076 if (sk) bh_unlock_sock(sk);
2077 return 0;
2078 }
2079
2080 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2081 {
2082 struct l2cap_hdr *lh = (void *) skb->data;
2083 u16 cid, len;
2084 __le16 psm;
2085
2086 skb_pull(skb, L2CAP_HDR_SIZE);
2087 cid = __le16_to_cpu(lh->cid);
2088 len = __le16_to_cpu(lh->len);
2089
2090 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2091
2092 switch (cid) {
2093 case 0x0001:
2094 l2cap_sig_channel(conn, skb);
2095 break;
2096
2097 case 0x0002:
2098 psm = get_unaligned((__le16 *) skb->data);
2099 skb_pull(skb, 2);
2100 l2cap_conless_channel(conn, psm, skb);
2101 break;
2102
2103 default:
2104 l2cap_data_channel(conn, cid, skb);
2105 break;
2106 }
2107 }
2108
2109 /* ---- L2CAP interface with lower layer (HCI) ---- */
2110
2111 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2112 {
2113 int exact = 0, lm1 = 0, lm2 = 0;
2114 register struct sock *sk;
2115 struct hlist_node *node;
2116
2117 if (type != ACL_LINK)
2118 return 0;
2119
2120 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2121
2122 /* Find listening sockets and check their link_mode */
2123 read_lock(&l2cap_sk_list.lock);
2124 sk_for_each(sk, node, &l2cap_sk_list.head) {
2125 if (sk->sk_state != BT_LISTEN)
2126 continue;
2127
2128 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2129 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2130 exact++;
2131 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2132 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2133 }
2134 read_unlock(&l2cap_sk_list.lock);
2135
2136 return exact ? lm1 : lm2;
2137 }
2138
2139 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2140 {
2141 struct l2cap_conn *conn;
2142
2143 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2144
2145 if (hcon->type != ACL_LINK)
2146 return 0;
2147
2148 if (!status) {
2149 conn = l2cap_conn_add(hcon, status);
2150 if (conn)
2151 l2cap_conn_ready(conn);
2152 } else
2153 l2cap_conn_del(hcon, bt_err(status));
2154
2155 return 0;
2156 }
2157
2158 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2159 {
2160 BT_DBG("hcon %p reason %d", hcon, reason);
2161
2162 if (hcon->type != ACL_LINK)
2163 return 0;
2164
2165 l2cap_conn_del(hcon, bt_err(reason));
2166
2167 return 0;
2168 }
2169
2170 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2171 {
2172 struct l2cap_chan_list *l;
2173 struct l2cap_conn *conn = hcon->l2cap_data;
2174 struct sock *sk;
2175
2176 if (!conn)
2177 return 0;
2178
2179 l = &conn->chan_list;
2180
2181 BT_DBG("conn %p", conn);
2182
2183 read_lock(&l->lock);
2184
2185 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2186 struct l2cap_pinfo *pi = l2cap_pi(sk);
2187
2188 bh_lock_sock(sk);
2189
2190 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2191 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2192 !status) {
2193 bh_unlock_sock(sk);
2194 continue;
2195 }
2196
2197 if (sk->sk_state == BT_CONNECT) {
2198 if (!status) {
2199 struct l2cap_conn_req req;
2200 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2201 req.psm = l2cap_pi(sk)->psm;
2202
2203 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2204
2205 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2206 L2CAP_CONN_REQ, sizeof(req), &req);
2207 } else {
2208 l2cap_sock_clear_timer(sk);
2209 l2cap_sock_set_timer(sk, HZ / 10);
2210 }
2211 } else if (sk->sk_state == BT_CONNECT2) {
2212 struct l2cap_conn_rsp rsp;
2213 __u16 result;
2214
2215 if (!status) {
2216 sk->sk_state = BT_CONFIG;
2217 result = L2CAP_CR_SUCCESS;
2218 } else {
2219 sk->sk_state = BT_DISCONN;
2220 l2cap_sock_set_timer(sk, HZ / 10);
2221 result = L2CAP_CR_SEC_BLOCK;
2222 }
2223
2224 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2225 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2226 rsp.result = cpu_to_le16(result);
2227 rsp.status = cpu_to_le16(0);
2228 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2229 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2230 }
2231
2232 bh_unlock_sock(sk);
2233 }
2234
2235 read_unlock(&l->lock);
2236
2237 return 0;
2238 }
2239
2240 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2241 {
2242 struct l2cap_chan_list *l;
2243 struct l2cap_conn *conn = hcon->l2cap_data;
2244 struct sock *sk;
2245
2246 if (!conn)
2247 return 0;
2248
2249 l = &conn->chan_list;
2250
2251 BT_DBG("conn %p", conn);
2252
2253 read_lock(&l->lock);
2254
2255 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2256 struct l2cap_pinfo *pi = l2cap_pi(sk);
2257
2258 bh_lock_sock(sk);
2259
2260 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2261 (sk->sk_state == BT_CONNECTED ||
2262 sk->sk_state == BT_CONFIG) &&
2263 !status && encrypt == 0x00) {
2264 __l2cap_sock_close(sk, ECONNREFUSED);
2265 bh_unlock_sock(sk);
2266 continue;
2267 }
2268
2269 if (sk->sk_state == BT_CONNECT) {
2270 if (!status) {
2271 struct l2cap_conn_req req;
2272 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2273 req.psm = l2cap_pi(sk)->psm;
2274
2275 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2276
2277 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2278 L2CAP_CONN_REQ, sizeof(req), &req);
2279 } else {
2280 l2cap_sock_clear_timer(sk);
2281 l2cap_sock_set_timer(sk, HZ / 10);
2282 }
2283 } else if (sk->sk_state == BT_CONNECT2) {
2284 struct l2cap_conn_rsp rsp;
2285 __u16 result;
2286
2287 if (!status) {
2288 sk->sk_state = BT_CONFIG;
2289 result = L2CAP_CR_SUCCESS;
2290 } else {
2291 sk->sk_state = BT_DISCONN;
2292 l2cap_sock_set_timer(sk, HZ / 10);
2293 result = L2CAP_CR_SEC_BLOCK;
2294 }
2295
2296 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2297 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2298 rsp.result = cpu_to_le16(result);
2299 rsp.status = cpu_to_le16(0);
2300 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2301 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2302 }
2303
2304 bh_unlock_sock(sk);
2305 }
2306
2307 read_unlock(&l->lock);
2308
2309 return 0;
2310 }
2311
2312 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2313 {
2314 struct l2cap_conn *conn = hcon->l2cap_data;
2315
2316 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2317 goto drop;
2318
2319 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2320
2321 if (flags & ACL_START) {
2322 struct l2cap_hdr *hdr;
2323 int len;
2324
2325 if (conn->rx_len) {
2326 BT_ERR("Unexpected start frame (len %d)", skb->len);
2327 kfree_skb(conn->rx_skb);
2328 conn->rx_skb = NULL;
2329 conn->rx_len = 0;
2330 l2cap_conn_unreliable(conn, ECOMM);
2331 }
2332
2333 if (skb->len < 2) {
2334 BT_ERR("Frame is too short (len %d)", skb->len);
2335 l2cap_conn_unreliable(conn, ECOMM);
2336 goto drop;
2337 }
2338
2339 hdr = (struct l2cap_hdr *) skb->data;
2340 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2341
2342 if (len == skb->len) {
2343 /* Complete frame received */
2344 l2cap_recv_frame(conn, skb);
2345 return 0;
2346 }
2347
2348 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2349
2350 if (skb->len > len) {
2351 BT_ERR("Frame is too long (len %d, expected len %d)",
2352 skb->len, len);
2353 l2cap_conn_unreliable(conn, ECOMM);
2354 goto drop;
2355 }
2356
2357 /* Allocate skb for the complete frame (with header) */
2358 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2359 goto drop;
2360
2361 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2362 skb->len);
2363 conn->rx_len = len - skb->len;
2364 } else {
2365 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2366
2367 if (!conn->rx_len) {
2368 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2369 l2cap_conn_unreliable(conn, ECOMM);
2370 goto drop;
2371 }
2372
2373 if (skb->len > conn->rx_len) {
2374 BT_ERR("Fragment is too long (len %d, expected %d)",
2375 skb->len, conn->rx_len);
2376 kfree_skb(conn->rx_skb);
2377 conn->rx_skb = NULL;
2378 conn->rx_len = 0;
2379 l2cap_conn_unreliable(conn, ECOMM);
2380 goto drop;
2381 }
2382
2383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2384 skb->len);
2385 conn->rx_len -= skb->len;
2386
2387 if (!conn->rx_len) {
2388 /* Complete frame received */
2389 l2cap_recv_frame(conn, conn->rx_skb);
2390 conn->rx_skb = NULL;
2391 }
2392 }
2393
2394 drop:
2395 kfree_skb(skb);
2396 return 0;
2397 }
2398
2399 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2400 {
2401 struct sock *sk;
2402 struct hlist_node *node;
2403 char *str = buf;
2404
2405 read_lock_bh(&l2cap_sk_list.lock);
2406
2407 sk_for_each(sk, node, &l2cap_sk_list.head) {
2408 struct l2cap_pinfo *pi = l2cap_pi(sk);
2409
2410 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2411 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2412 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2413 pi->imtu, pi->omtu, pi->link_mode);
2414 }
2415
2416 read_unlock_bh(&l2cap_sk_list.lock);
2417
2418 return (str - buf);
2419 }
2420
2421 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2422
2423 static const struct proto_ops l2cap_sock_ops = {
2424 .family = PF_BLUETOOTH,
2425 .owner = THIS_MODULE,
2426 .release = l2cap_sock_release,
2427 .bind = l2cap_sock_bind,
2428 .connect = l2cap_sock_connect,
2429 .listen = l2cap_sock_listen,
2430 .accept = l2cap_sock_accept,
2431 .getname = l2cap_sock_getname,
2432 .sendmsg = l2cap_sock_sendmsg,
2433 .recvmsg = bt_sock_recvmsg,
2434 .poll = bt_sock_poll,
2435 .ioctl = bt_sock_ioctl,
2436 .mmap = sock_no_mmap,
2437 .socketpair = sock_no_socketpair,
2438 .shutdown = l2cap_sock_shutdown,
2439 .setsockopt = l2cap_sock_setsockopt,
2440 .getsockopt = l2cap_sock_getsockopt
2441 };
2442
2443 static struct net_proto_family l2cap_sock_family_ops = {
2444 .family = PF_BLUETOOTH,
2445 .owner = THIS_MODULE,
2446 .create = l2cap_sock_create,
2447 };
2448
2449 static struct hci_proto l2cap_hci_proto = {
2450 .name = "L2CAP",
2451 .id = HCI_PROTO_L2CAP,
2452 .connect_ind = l2cap_connect_ind,
2453 .connect_cfm = l2cap_connect_cfm,
2454 .disconn_ind = l2cap_disconn_ind,
2455 .auth_cfm = l2cap_auth_cfm,
2456 .encrypt_cfm = l2cap_encrypt_cfm,
2457 .recv_acldata = l2cap_recv_acldata
2458 };
2459
2460 static int __init l2cap_init(void)
2461 {
2462 int err;
2463
2464 err = proto_register(&l2cap_proto, 0);
2465 if (err < 0)
2466 return err;
2467
2468 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2469 if (err < 0) {
2470 BT_ERR("L2CAP socket registration failed");
2471 goto error;
2472 }
2473
2474 err = hci_register_proto(&l2cap_hci_proto);
2475 if (err < 0) {
2476 BT_ERR("L2CAP protocol registration failed");
2477 bt_sock_unregister(BTPROTO_L2CAP);
2478 goto error;
2479 }
2480
2481 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2482 BT_ERR("Failed to create L2CAP info file");
2483
2484 BT_INFO("L2CAP ver %s", VERSION);
2485 BT_INFO("L2CAP socket layer initialized");
2486
2487 return 0;
2488
2489 error:
2490 proto_unregister(&l2cap_proto);
2491 return err;
2492 }
2493
2494 static void __exit l2cap_exit(void)
2495 {
2496 class_remove_file(bt_class, &class_attr_l2cap);
2497
2498 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2499 BT_ERR("L2CAP socket unregistration failed");
2500
2501 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2502 BT_ERR("L2CAP protocol unregistration failed");
2503
2504 proto_unregister(&l2cap_proto);
2505 }
2506
2507 void l2cap_load(void)
2508 {
2509 /* Dummy function to trigger automatic L2CAP module loading by
2510 * other modules that use L2CAP sockets but don't use any other
2511 * symbols from it. */
2512 return;
2513 }
2514 EXPORT_SYMBOL(l2cap_load);
2515
2516 module_init(l2cap_init);
2517 module_exit(l2cap_exit);
2518
2519 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2520 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2521 MODULE_VERSION(VERSION);
2522 MODULE_LICENSE("GPL");
2523 MODULE_ALIAS("bt-proto-0");