]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/l2cap.c
[NET] NETNS: Omit sock->sk_net without CONFIG_NET_NS.
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
57
58 #define VERSION "2.9"
59
60 static u32 l2cap_feat_mask = 0x0000;
61
62 static const struct proto_ops l2cap_sock_ops;
63
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = RW_LOCK_UNLOCKED
66 };
67
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
71
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
74
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
77 {
78 struct sock *sk = (struct sock *) arg;
79
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
81
82 bh_lock_sock(sk);
83 __l2cap_sock_close(sk, ETIMEDOUT);
84 bh_unlock_sock(sk);
85
86 l2cap_sock_kill(sk);
87 sock_put(sk);
88 }
89
90 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
91 {
92 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
93 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 }
95
96 static void l2cap_sock_clear_timer(struct sock *sk)
97 {
98 BT_DBG("sock %p state %d", sk, sk->sk_state);
99 sk_stop_timer(sk, &sk->sk_timer);
100 }
101
102 /* ---- L2CAP channels ---- */
103 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
104 {
105 struct sock *s;
106 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
107 if (l2cap_pi(s)->dcid == cid)
108 break;
109 }
110 return s;
111 }
112
113 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
114 {
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->scid == cid)
118 break;
119 }
120 return s;
121 }
122
123 /* Find channel with given SCID.
124 * Returns locked socket */
125 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 {
127 struct sock *s;
128 read_lock(&l->lock);
129 s = __l2cap_get_chan_by_scid(l, cid);
130 if (s) bh_lock_sock(s);
131 read_unlock(&l->lock);
132 return s;
133 }
134
135 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
136 {
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->ident == ident)
140 break;
141 }
142 return s;
143 }
144
145 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
146 {
147 struct sock *s;
148 read_lock(&l->lock);
149 s = __l2cap_get_chan_by_ident(l, ident);
150 if (s) bh_lock_sock(s);
151 read_unlock(&l->lock);
152 return s;
153 }
154
155 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
156 {
157 u16 cid = 0x0040;
158
159 for (; cid < 0xffff; cid++) {
160 if(!__l2cap_get_chan_by_scid(l, cid))
161 return cid;
162 }
163
164 return 0;
165 }
166
167 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
168 {
169 sock_hold(sk);
170
171 if (l->head)
172 l2cap_pi(l->head)->prev_c = sk;
173
174 l2cap_pi(sk)->next_c = l->head;
175 l2cap_pi(sk)->prev_c = NULL;
176 l->head = sk;
177 }
178
179 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
180 {
181 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
182
183 write_lock_bh(&l->lock);
184 if (sk == l->head)
185 l->head = next;
186
187 if (next)
188 l2cap_pi(next)->prev_c = prev;
189 if (prev)
190 l2cap_pi(prev)->next_c = next;
191 write_unlock_bh(&l->lock);
192
193 __sock_put(sk);
194 }
195
196 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
197 {
198 struct l2cap_chan_list *l = &conn->chan_list;
199
200 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
201
202 l2cap_pi(sk)->conn = conn;
203
204 if (sk->sk_type == SOCK_SEQPACKET) {
205 /* Alloc CID for connection-oriented socket */
206 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
207 } else if (sk->sk_type == SOCK_DGRAM) {
208 /* Connectionless socket */
209 l2cap_pi(sk)->scid = 0x0002;
210 l2cap_pi(sk)->dcid = 0x0002;
211 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
212 } else {
213 /* Raw socket can send/recv signalling messages only */
214 l2cap_pi(sk)->scid = 0x0001;
215 l2cap_pi(sk)->dcid = 0x0001;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
217 }
218
219 __l2cap_chan_link(l, sk);
220
221 if (parent)
222 bt_accept_enqueue(parent, sk);
223 }
224
225 /* Delete channel.
226 * Must be called on the locked socket. */
227 static void l2cap_chan_del(struct sock *sk, int err)
228 {
229 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
230 struct sock *parent = bt_sk(sk)->parent;
231
232 l2cap_sock_clear_timer(sk);
233
234 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
235
236 if (conn) {
237 /* Unlink from channel list */
238 l2cap_chan_unlink(&conn->chan_list, sk);
239 l2cap_pi(sk)->conn = NULL;
240 hci_conn_put(conn->hcon);
241 }
242
243 sk->sk_state = BT_CLOSED;
244 sock_set_flag(sk, SOCK_ZAPPED);
245
246 if (err)
247 sk->sk_err = err;
248
249 if (parent) {
250 bt_accept_unlink(sk);
251 parent->sk_data_ready(parent, 0);
252 } else
253 sk->sk_state_change(sk);
254 }
255
256 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
257 {
258 u8 id;
259
260 /* Get next available identificator.
261 * 1 - 128 are used by kernel.
262 * 129 - 199 are reserved.
263 * 200 - 254 are used by utilities like l2ping, etc.
264 */
265
266 spin_lock_bh(&conn->lock);
267
268 if (++conn->tx_ident > 128)
269 conn->tx_ident = 1;
270
271 id = conn->tx_ident;
272
273 spin_unlock_bh(&conn->lock);
274
275 return id;
276 }
277
278 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
279 {
280 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
281
282 BT_DBG("code 0x%2.2x", code);
283
284 if (!skb)
285 return -ENOMEM;
286
287 return hci_send_acl(conn->hcon, skb, 0);
288 }
289
290 /* ---- L2CAP connections ---- */
291 static void l2cap_conn_start(struct l2cap_conn *conn)
292 {
293 struct l2cap_chan_list *l = &conn->chan_list;
294 struct sock *sk;
295
296 BT_DBG("conn %p", conn);
297
298 read_lock(&l->lock);
299
300 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
301 bh_lock_sock(sk);
302
303 if (sk->sk_type != SOCK_SEQPACKET) {
304 l2cap_sock_clear_timer(sk);
305 sk->sk_state = BT_CONNECTED;
306 sk->sk_state_change(sk);
307 } else if (sk->sk_state == BT_CONNECT) {
308 struct l2cap_conn_req req;
309 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
310 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
311 req.psm = l2cap_pi(sk)->psm;
312 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
313 L2CAP_CONN_REQ, sizeof(req), &req);
314 }
315
316 bh_unlock_sock(sk);
317 }
318
319 read_unlock(&l->lock);
320 }
321
322 static void l2cap_conn_ready(struct l2cap_conn *conn)
323 {
324 BT_DBG("conn %p", conn);
325
326 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
327 struct l2cap_info_req req;
328
329 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
330
331 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
332 conn->info_ident = l2cap_get_ident(conn);
333
334 mod_timer(&conn->info_timer,
335 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
336
337 l2cap_send_cmd(conn, conn->info_ident,
338 L2CAP_INFO_REQ, sizeof(req), &req);
339 }
340 }
341
342 /* Notify sockets that we cannot guaranty reliability anymore */
343 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
344 {
345 struct l2cap_chan_list *l = &conn->chan_list;
346 struct sock *sk;
347
348 BT_DBG("conn %p", conn);
349
350 read_lock(&l->lock);
351
352 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
353 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
354 sk->sk_err = err;
355 }
356
357 read_unlock(&l->lock);
358 }
359
360 static void l2cap_info_timeout(unsigned long arg)
361 {
362 struct l2cap_conn *conn = (void *) arg;
363
364 conn->info_ident = 0;
365
366 l2cap_conn_start(conn);
367 }
368
369 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
370 {
371 struct l2cap_conn *conn = hcon->l2cap_data;
372
373 if (conn || status)
374 return conn;
375
376 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
377 if (!conn)
378 return NULL;
379
380 hcon->l2cap_data = conn;
381 conn->hcon = hcon;
382
383 BT_DBG("hcon %p conn %p", hcon, conn);
384
385 conn->mtu = hcon->hdev->acl_mtu;
386 conn->src = &hcon->hdev->bdaddr;
387 conn->dst = &hcon->dst;
388
389 conn->feat_mask = 0;
390
391 setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long)conn);
392
393 spin_lock_init(&conn->lock);
394 rwlock_init(&conn->chan_list.lock);
395
396 return conn;
397 }
398
399 static void l2cap_conn_del(struct hci_conn *hcon, int err)
400 {
401 struct l2cap_conn *conn = hcon->l2cap_data;
402 struct sock *sk;
403
404 if (!conn)
405 return;
406
407 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
408
409 if (conn->rx_skb)
410 kfree_skb(conn->rx_skb);
411
412 /* Kill channels */
413 while ((sk = conn->chan_list.head)) {
414 bh_lock_sock(sk);
415 l2cap_chan_del(sk, err);
416 bh_unlock_sock(sk);
417 l2cap_sock_kill(sk);
418 }
419
420 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
421 del_timer_sync(&conn->info_timer);
422
423 hcon->l2cap_data = NULL;
424 kfree(conn);
425 }
426
427 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
428 {
429 struct l2cap_chan_list *l = &conn->chan_list;
430 write_lock_bh(&l->lock);
431 __l2cap_chan_add(conn, sk, parent);
432 write_unlock_bh(&l->lock);
433 }
434
435 /* ---- Socket interface ---- */
436 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
437 {
438 struct sock *sk;
439 struct hlist_node *node;
440 sk_for_each(sk, node, &l2cap_sk_list.head)
441 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
442 goto found;
443 sk = NULL;
444 found:
445 return sk;
446 }
447
448 /* Find socket with psm and source bdaddr.
449 * Returns closest match.
450 */
451 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
452 {
453 struct sock *sk = NULL, *sk1 = NULL;
454 struct hlist_node *node;
455
456 sk_for_each(sk, node, &l2cap_sk_list.head) {
457 if (state && sk->sk_state != state)
458 continue;
459
460 if (l2cap_pi(sk)->psm == psm) {
461 /* Exact match. */
462 if (!bacmp(&bt_sk(sk)->src, src))
463 break;
464
465 /* Closest match */
466 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
467 sk1 = sk;
468 }
469 }
470 return node ? sk : sk1;
471 }
472
473 /* Find socket with given address (psm, src).
474 * Returns locked socket */
475 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
476 {
477 struct sock *s;
478 read_lock(&l2cap_sk_list.lock);
479 s = __l2cap_get_sock_by_psm(state, psm, src);
480 if (s) bh_lock_sock(s);
481 read_unlock(&l2cap_sk_list.lock);
482 return s;
483 }
484
485 static void l2cap_sock_destruct(struct sock *sk)
486 {
487 BT_DBG("sk %p", sk);
488
489 skb_queue_purge(&sk->sk_receive_queue);
490 skb_queue_purge(&sk->sk_write_queue);
491 }
492
493 static void l2cap_sock_cleanup_listen(struct sock *parent)
494 {
495 struct sock *sk;
496
497 BT_DBG("parent %p", parent);
498
499 /* Close not yet accepted channels */
500 while ((sk = bt_accept_dequeue(parent, NULL)))
501 l2cap_sock_close(sk);
502
503 parent->sk_state = BT_CLOSED;
504 sock_set_flag(parent, SOCK_ZAPPED);
505 }
506
507 /* Kill socket (only if zapped and orphan)
508 * Must be called on unlocked socket.
509 */
510 static void l2cap_sock_kill(struct sock *sk)
511 {
512 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
513 return;
514
515 BT_DBG("sk %p state %d", sk, sk->sk_state);
516
517 /* Kill poor orphan */
518 bt_sock_unlink(&l2cap_sk_list, sk);
519 sock_set_flag(sk, SOCK_DEAD);
520 sock_put(sk);
521 }
522
523 static void __l2cap_sock_close(struct sock *sk, int reason)
524 {
525 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
526
527 switch (sk->sk_state) {
528 case BT_LISTEN:
529 l2cap_sock_cleanup_listen(sk);
530 break;
531
532 case BT_CONNECTED:
533 case BT_CONFIG:
534 case BT_CONNECT2:
535 if (sk->sk_type == SOCK_SEQPACKET) {
536 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
537 struct l2cap_disconn_req req;
538
539 sk->sk_state = BT_DISCONN;
540 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
541
542 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
543 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
544 l2cap_send_cmd(conn, l2cap_get_ident(conn),
545 L2CAP_DISCONN_REQ, sizeof(req), &req);
546 } else {
547 l2cap_chan_del(sk, reason);
548 }
549 break;
550
551 case BT_CONNECT:
552 case BT_DISCONN:
553 l2cap_chan_del(sk, reason);
554 break;
555
556 default:
557 sock_set_flag(sk, SOCK_ZAPPED);
558 break;
559 }
560 }
561
562 /* Must be called on unlocked socket. */
563 static void l2cap_sock_close(struct sock *sk)
564 {
565 l2cap_sock_clear_timer(sk);
566 lock_sock(sk);
567 __l2cap_sock_close(sk, ECONNRESET);
568 release_sock(sk);
569 l2cap_sock_kill(sk);
570 }
571
572 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
573 {
574 struct l2cap_pinfo *pi = l2cap_pi(sk);
575
576 BT_DBG("sk %p", sk);
577
578 if (parent) {
579 sk->sk_type = parent->sk_type;
580 pi->imtu = l2cap_pi(parent)->imtu;
581 pi->omtu = l2cap_pi(parent)->omtu;
582 pi->link_mode = l2cap_pi(parent)->link_mode;
583 } else {
584 pi->imtu = L2CAP_DEFAULT_MTU;
585 pi->omtu = 0;
586 pi->link_mode = 0;
587 }
588
589 /* Default config options */
590 pi->conf_len = 0;
591 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
592 }
593
594 static struct proto l2cap_proto = {
595 .name = "L2CAP",
596 .owner = THIS_MODULE,
597 .obj_size = sizeof(struct l2cap_pinfo)
598 };
599
600 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
601 {
602 struct sock *sk;
603
604 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
605 if (!sk)
606 return NULL;
607
608 sock_init_data(sock, sk);
609 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
610
611 sk->sk_destruct = l2cap_sock_destruct;
612 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
613
614 sock_reset_flag(sk, SOCK_ZAPPED);
615
616 sk->sk_protocol = proto;
617 sk->sk_state = BT_OPEN;
618
619 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long)sk);
620
621 bt_sock_link(&l2cap_sk_list, sk);
622 return sk;
623 }
624
625 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
626 {
627 struct sock *sk;
628
629 BT_DBG("sock %p", sock);
630
631 sock->state = SS_UNCONNECTED;
632
633 if (sock->type != SOCK_SEQPACKET &&
634 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
635 return -ESOCKTNOSUPPORT;
636
637 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
638 return -EPERM;
639
640 sock->ops = &l2cap_sock_ops;
641
642 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
643 if (!sk)
644 return -ENOMEM;
645
646 l2cap_sock_init(sk, NULL);
647 return 0;
648 }
649
650 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
651 {
652 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
653 struct sock *sk = sock->sk;
654 int err = 0;
655
656 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
657
658 if (!addr || addr->sa_family != AF_BLUETOOTH)
659 return -EINVAL;
660
661 lock_sock(sk);
662
663 if (sk->sk_state != BT_OPEN) {
664 err = -EBADFD;
665 goto done;
666 }
667
668 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
669 !capable(CAP_NET_BIND_SERVICE)) {
670 err = -EACCES;
671 goto done;
672 }
673
674 write_lock_bh(&l2cap_sk_list.lock);
675
676 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
677 err = -EADDRINUSE;
678 } else {
679 /* Save source address */
680 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
681 l2cap_pi(sk)->psm = la->l2_psm;
682 l2cap_pi(sk)->sport = la->l2_psm;
683 sk->sk_state = BT_BOUND;
684 }
685
686 write_unlock_bh(&l2cap_sk_list.lock);
687
688 done:
689 release_sock(sk);
690 return err;
691 }
692
693 static int l2cap_do_connect(struct sock *sk)
694 {
695 bdaddr_t *src = &bt_sk(sk)->src;
696 bdaddr_t *dst = &bt_sk(sk)->dst;
697 struct l2cap_conn *conn;
698 struct hci_conn *hcon;
699 struct hci_dev *hdev;
700 int err = 0;
701
702 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
703
704 if (!(hdev = hci_get_route(dst, src)))
705 return -EHOSTUNREACH;
706
707 hci_dev_lock_bh(hdev);
708
709 err = -ENOMEM;
710
711 hcon = hci_connect(hdev, ACL_LINK, dst);
712 if (!hcon)
713 goto done;
714
715 conn = l2cap_conn_add(hcon, 0);
716 if (!conn) {
717 hci_conn_put(hcon);
718 goto done;
719 }
720
721 err = 0;
722
723 /* Update source addr of the socket */
724 bacpy(src, conn->src);
725
726 l2cap_chan_add(conn, sk, NULL);
727
728 sk->sk_state = BT_CONNECT;
729 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
730
731 if (hcon->state == BT_CONNECTED) {
732 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
733 l2cap_conn_ready(conn);
734 goto done;
735 }
736
737 if (sk->sk_type == SOCK_SEQPACKET) {
738 struct l2cap_conn_req req;
739 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
740 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
741 req.psm = l2cap_pi(sk)->psm;
742 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
743 L2CAP_CONN_REQ, sizeof(req), &req);
744 } else {
745 l2cap_sock_clear_timer(sk);
746 sk->sk_state = BT_CONNECTED;
747 }
748 }
749
750 done:
751 hci_dev_unlock_bh(hdev);
752 hci_dev_put(hdev);
753 return err;
754 }
755
756 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
757 {
758 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
759 struct sock *sk = sock->sk;
760 int err = 0;
761
762 lock_sock(sk);
763
764 BT_DBG("sk %p", sk);
765
766 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
767 err = -EINVAL;
768 goto done;
769 }
770
771 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
772 err = -EINVAL;
773 goto done;
774 }
775
776 switch(sk->sk_state) {
777 case BT_CONNECT:
778 case BT_CONNECT2:
779 case BT_CONFIG:
780 /* Already connecting */
781 goto wait;
782
783 case BT_CONNECTED:
784 /* Already connected */
785 goto done;
786
787 case BT_OPEN:
788 case BT_BOUND:
789 /* Can connect */
790 break;
791
792 default:
793 err = -EBADFD;
794 goto done;
795 }
796
797 /* Set destination address and psm */
798 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
799 l2cap_pi(sk)->psm = la->l2_psm;
800
801 if ((err = l2cap_do_connect(sk)))
802 goto done;
803
804 wait:
805 err = bt_sock_wait_state(sk, BT_CONNECTED,
806 sock_sndtimeo(sk, flags & O_NONBLOCK));
807 done:
808 release_sock(sk);
809 return err;
810 }
811
812 static int l2cap_sock_listen(struct socket *sock, int backlog)
813 {
814 struct sock *sk = sock->sk;
815 int err = 0;
816
817 BT_DBG("sk %p backlog %d", sk, backlog);
818
819 lock_sock(sk);
820
821 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
822 err = -EBADFD;
823 goto done;
824 }
825
826 if (!l2cap_pi(sk)->psm) {
827 bdaddr_t *src = &bt_sk(sk)->src;
828 u16 psm;
829
830 err = -EINVAL;
831
832 write_lock_bh(&l2cap_sk_list.lock);
833
834 for (psm = 0x1001; psm < 0x1100; psm += 2)
835 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
836 l2cap_pi(sk)->psm = htobs(psm);
837 l2cap_pi(sk)->sport = htobs(psm);
838 err = 0;
839 break;
840 }
841
842 write_unlock_bh(&l2cap_sk_list.lock);
843
844 if (err < 0)
845 goto done;
846 }
847
848 sk->sk_max_ack_backlog = backlog;
849 sk->sk_ack_backlog = 0;
850 sk->sk_state = BT_LISTEN;
851
852 done:
853 release_sock(sk);
854 return err;
855 }
856
857 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
858 {
859 DECLARE_WAITQUEUE(wait, current);
860 struct sock *sk = sock->sk, *nsk;
861 long timeo;
862 int err = 0;
863
864 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
865
866 if (sk->sk_state != BT_LISTEN) {
867 err = -EBADFD;
868 goto done;
869 }
870
871 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
872
873 BT_DBG("sk %p timeo %ld", sk, timeo);
874
875 /* Wait for an incoming connection. (wake-one). */
876 add_wait_queue_exclusive(sk->sk_sleep, &wait);
877 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
878 set_current_state(TASK_INTERRUPTIBLE);
879 if (!timeo) {
880 err = -EAGAIN;
881 break;
882 }
883
884 release_sock(sk);
885 timeo = schedule_timeout(timeo);
886 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
887
888 if (sk->sk_state != BT_LISTEN) {
889 err = -EBADFD;
890 break;
891 }
892
893 if (signal_pending(current)) {
894 err = sock_intr_errno(timeo);
895 break;
896 }
897 }
898 set_current_state(TASK_RUNNING);
899 remove_wait_queue(sk->sk_sleep, &wait);
900
901 if (err)
902 goto done;
903
904 newsock->state = SS_CONNECTED;
905
906 BT_DBG("new socket %p", nsk);
907
908 done:
909 release_sock(sk);
910 return err;
911 }
912
913 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
914 {
915 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
916 struct sock *sk = sock->sk;
917
918 BT_DBG("sock %p, sk %p", sock, sk);
919
920 addr->sa_family = AF_BLUETOOTH;
921 *len = sizeof(struct sockaddr_l2);
922
923 if (peer)
924 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
925 else
926 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
927
928 la->l2_psm = l2cap_pi(sk)->psm;
929 return 0;
930 }
931
932 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
933 {
934 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
935 struct sk_buff *skb, **frag;
936 int err, hlen, count, sent=0;
937 struct l2cap_hdr *lh;
938
939 BT_DBG("sk %p len %d", sk, len);
940
941 /* First fragment (with L2CAP header) */
942 if (sk->sk_type == SOCK_DGRAM)
943 hlen = L2CAP_HDR_SIZE + 2;
944 else
945 hlen = L2CAP_HDR_SIZE;
946
947 count = min_t(unsigned int, (conn->mtu - hlen), len);
948
949 skb = bt_skb_send_alloc(sk, hlen + count,
950 msg->msg_flags & MSG_DONTWAIT, &err);
951 if (!skb)
952 return err;
953
954 /* Create L2CAP header */
955 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
956 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
957 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
958
959 if (sk->sk_type == SOCK_DGRAM)
960 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
961
962 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
963 err = -EFAULT;
964 goto fail;
965 }
966
967 sent += count;
968 len -= count;
969
970 /* Continuation fragments (no L2CAP header) */
971 frag = &skb_shinfo(skb)->frag_list;
972 while (len) {
973 count = min_t(unsigned int, conn->mtu, len);
974
975 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
976 if (!*frag)
977 goto fail;
978
979 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
980 err = -EFAULT;
981 goto fail;
982 }
983
984 sent += count;
985 len -= count;
986
987 frag = &(*frag)->next;
988 }
989
990 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
991 goto fail;
992
993 return sent;
994
995 fail:
996 kfree_skb(skb);
997 return err;
998 }
999
1000 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1001 {
1002 struct sock *sk = sock->sk;
1003 int err = 0;
1004
1005 BT_DBG("sock %p, sk %p", sock, sk);
1006
1007 err = sock_error(sk);
1008 if (err)
1009 return err;
1010
1011 if (msg->msg_flags & MSG_OOB)
1012 return -EOPNOTSUPP;
1013
1014 /* Check outgoing MTU */
1015 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1016 return -EINVAL;
1017
1018 lock_sock(sk);
1019
1020 if (sk->sk_state == BT_CONNECTED)
1021 err = l2cap_do_send(sk, msg, len);
1022 else
1023 err = -ENOTCONN;
1024
1025 release_sock(sk);
1026 return err;
1027 }
1028
1029 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1030 {
1031 struct sock *sk = sock->sk;
1032 struct l2cap_options opts;
1033 int err = 0, len;
1034 u32 opt;
1035
1036 BT_DBG("sk %p", sk);
1037
1038 lock_sock(sk);
1039
1040 switch (optname) {
1041 case L2CAP_OPTIONS:
1042 opts.imtu = l2cap_pi(sk)->imtu;
1043 opts.omtu = l2cap_pi(sk)->omtu;
1044 opts.flush_to = l2cap_pi(sk)->flush_to;
1045 opts.mode = L2CAP_MODE_BASIC;
1046
1047 len = min_t(unsigned int, sizeof(opts), optlen);
1048 if (copy_from_user((char *) &opts, optval, len)) {
1049 err = -EFAULT;
1050 break;
1051 }
1052
1053 l2cap_pi(sk)->imtu = opts.imtu;
1054 l2cap_pi(sk)->omtu = opts.omtu;
1055 break;
1056
1057 case L2CAP_LM:
1058 if (get_user(opt, (u32 __user *) optval)) {
1059 err = -EFAULT;
1060 break;
1061 }
1062
1063 l2cap_pi(sk)->link_mode = opt;
1064 break;
1065
1066 default:
1067 err = -ENOPROTOOPT;
1068 break;
1069 }
1070
1071 release_sock(sk);
1072 return err;
1073 }
1074
1075 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1076 {
1077 struct sock *sk = sock->sk;
1078 struct l2cap_options opts;
1079 struct l2cap_conninfo cinfo;
1080 int len, err = 0;
1081
1082 BT_DBG("sk %p", sk);
1083
1084 if (get_user(len, optlen))
1085 return -EFAULT;
1086
1087 lock_sock(sk);
1088
1089 switch (optname) {
1090 case L2CAP_OPTIONS:
1091 opts.imtu = l2cap_pi(sk)->imtu;
1092 opts.omtu = l2cap_pi(sk)->omtu;
1093 opts.flush_to = l2cap_pi(sk)->flush_to;
1094 opts.mode = L2CAP_MODE_BASIC;
1095
1096 len = min_t(unsigned int, len, sizeof(opts));
1097 if (copy_to_user(optval, (char *) &opts, len))
1098 err = -EFAULT;
1099
1100 break;
1101
1102 case L2CAP_LM:
1103 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1104 err = -EFAULT;
1105 break;
1106
1107 case L2CAP_CONNINFO:
1108 if (sk->sk_state != BT_CONNECTED) {
1109 err = -ENOTCONN;
1110 break;
1111 }
1112
1113 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1114 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1115
1116 len = min_t(unsigned int, len, sizeof(cinfo));
1117 if (copy_to_user(optval, (char *) &cinfo, len))
1118 err = -EFAULT;
1119
1120 break;
1121
1122 default:
1123 err = -ENOPROTOOPT;
1124 break;
1125 }
1126
1127 release_sock(sk);
1128 return err;
1129 }
1130
1131 static int l2cap_sock_shutdown(struct socket *sock, int how)
1132 {
1133 struct sock *sk = sock->sk;
1134 int err = 0;
1135
1136 BT_DBG("sock %p, sk %p", sock, sk);
1137
1138 if (!sk)
1139 return 0;
1140
1141 lock_sock(sk);
1142 if (!sk->sk_shutdown) {
1143 sk->sk_shutdown = SHUTDOWN_MASK;
1144 l2cap_sock_clear_timer(sk);
1145 __l2cap_sock_close(sk, 0);
1146
1147 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1148 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1149 }
1150 release_sock(sk);
1151 return err;
1152 }
1153
1154 static int l2cap_sock_release(struct socket *sock)
1155 {
1156 struct sock *sk = sock->sk;
1157 int err;
1158
1159 BT_DBG("sock %p, sk %p", sock, sk);
1160
1161 if (!sk)
1162 return 0;
1163
1164 err = l2cap_sock_shutdown(sock, 2);
1165
1166 sock_orphan(sk);
1167 l2cap_sock_kill(sk);
1168 return err;
1169 }
1170
1171 static void l2cap_chan_ready(struct sock *sk)
1172 {
1173 struct sock *parent = bt_sk(sk)->parent;
1174
1175 BT_DBG("sk %p, parent %p", sk, parent);
1176
1177 l2cap_pi(sk)->conf_state = 0;
1178 l2cap_sock_clear_timer(sk);
1179
1180 if (!parent) {
1181 /* Outgoing channel.
1182 * Wake up socket sleeping on connect.
1183 */
1184 sk->sk_state = BT_CONNECTED;
1185 sk->sk_state_change(sk);
1186 } else {
1187 /* Incoming channel.
1188 * Wake up socket sleeping on accept.
1189 */
1190 parent->sk_data_ready(parent, 0);
1191 }
1192 }
1193
1194 /* Copy frame to all raw sockets on that connection */
1195 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1196 {
1197 struct l2cap_chan_list *l = &conn->chan_list;
1198 struct sk_buff *nskb;
1199 struct sock * sk;
1200
1201 BT_DBG("conn %p", conn);
1202
1203 read_lock(&l->lock);
1204 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1205 if (sk->sk_type != SOCK_RAW)
1206 continue;
1207
1208 /* Don't send frame to the socket it came from */
1209 if (skb->sk == sk)
1210 continue;
1211
1212 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1213 continue;
1214
1215 if (sock_queue_rcv_skb(sk, nskb))
1216 kfree_skb(nskb);
1217 }
1218 read_unlock(&l->lock);
1219 }
1220
1221 /* ---- L2CAP signalling commands ---- */
1222 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1223 u8 code, u8 ident, u16 dlen, void *data)
1224 {
1225 struct sk_buff *skb, **frag;
1226 struct l2cap_cmd_hdr *cmd;
1227 struct l2cap_hdr *lh;
1228 int len, count;
1229
1230 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1231
1232 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1233 count = min_t(unsigned int, conn->mtu, len);
1234
1235 skb = bt_skb_alloc(count, GFP_ATOMIC);
1236 if (!skb)
1237 return NULL;
1238
1239 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1240 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1241 lh->cid = cpu_to_le16(0x0001);
1242
1243 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1244 cmd->code = code;
1245 cmd->ident = ident;
1246 cmd->len = cpu_to_le16(dlen);
1247
1248 if (dlen) {
1249 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1250 memcpy(skb_put(skb, count), data, count);
1251 data += count;
1252 }
1253
1254 len -= skb->len;
1255
1256 /* Continuation fragments (no L2CAP header) */
1257 frag = &skb_shinfo(skb)->frag_list;
1258 while (len) {
1259 count = min_t(unsigned int, conn->mtu, len);
1260
1261 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1262 if (!*frag)
1263 goto fail;
1264
1265 memcpy(skb_put(*frag, count), data, count);
1266
1267 len -= count;
1268 data += count;
1269
1270 frag = &(*frag)->next;
1271 }
1272
1273 return skb;
1274
1275 fail:
1276 kfree_skb(skb);
1277 return NULL;
1278 }
1279
1280 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1281 {
1282 struct l2cap_conf_opt *opt = *ptr;
1283 int len;
1284
1285 len = L2CAP_CONF_OPT_SIZE + opt->len;
1286 *ptr += len;
1287
1288 *type = opt->type;
1289 *olen = opt->len;
1290
1291 switch (opt->len) {
1292 case 1:
1293 *val = *((u8 *) opt->val);
1294 break;
1295
1296 case 2:
1297 *val = __le16_to_cpu(*((__le16 *) opt->val));
1298 break;
1299
1300 case 4:
1301 *val = __le32_to_cpu(*((__le32 *) opt->val));
1302 break;
1303
1304 default:
1305 *val = (unsigned long) opt->val;
1306 break;
1307 }
1308
1309 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1310 return len;
1311 }
1312
1313 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1314 {
1315 struct l2cap_conf_opt *opt = *ptr;
1316
1317 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1318
1319 opt->type = type;
1320 opt->len = len;
1321
1322 switch (len) {
1323 case 1:
1324 *((u8 *) opt->val) = val;
1325 break;
1326
1327 case 2:
1328 *((__le16 *) opt->val) = cpu_to_le16(val);
1329 break;
1330
1331 case 4:
1332 *((__le32 *) opt->val) = cpu_to_le32(val);
1333 break;
1334
1335 default:
1336 memcpy(opt->val, (void *) val, len);
1337 break;
1338 }
1339
1340 *ptr += L2CAP_CONF_OPT_SIZE + len;
1341 }
1342
1343 static int l2cap_build_conf_req(struct sock *sk, void *data)
1344 {
1345 struct l2cap_pinfo *pi = l2cap_pi(sk);
1346 struct l2cap_conf_req *req = data;
1347 void *ptr = req->data;
1348
1349 BT_DBG("sk %p", sk);
1350
1351 if (pi->imtu != L2CAP_DEFAULT_MTU)
1352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1353
1354 /* FIXME: Need actual value of the flush timeout */
1355 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1356 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1357
1358 req->dcid = cpu_to_le16(pi->dcid);
1359 req->flags = cpu_to_le16(0);
1360
1361 return ptr - data;
1362 }
1363
1364 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1365 {
1366 struct l2cap_pinfo *pi = l2cap_pi(sk);
1367 struct l2cap_conf_rsp *rsp = data;
1368 void *ptr = rsp->data;
1369 void *req = pi->conf_req;
1370 int len = pi->conf_len;
1371 int type, hint, olen;
1372 unsigned long val;
1373 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1374 u16 mtu = L2CAP_DEFAULT_MTU;
1375 u16 result = L2CAP_CONF_SUCCESS;
1376
1377 BT_DBG("sk %p", sk);
1378
1379 while (len >= L2CAP_CONF_OPT_SIZE) {
1380 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1381
1382 hint = type & 0x80;
1383 type &= 0x7f;
1384
1385 switch (type) {
1386 case L2CAP_CONF_MTU:
1387 mtu = val;
1388 break;
1389
1390 case L2CAP_CONF_FLUSH_TO:
1391 pi->flush_to = val;
1392 break;
1393
1394 case L2CAP_CONF_QOS:
1395 break;
1396
1397 case L2CAP_CONF_RFC:
1398 if (olen == sizeof(rfc))
1399 memcpy(&rfc, (void *) val, olen);
1400 break;
1401
1402 default:
1403 if (hint)
1404 break;
1405
1406 result = L2CAP_CONF_UNKNOWN;
1407 *((u8 *) ptr++) = type;
1408 break;
1409 }
1410 }
1411
1412 if (result == L2CAP_CONF_SUCCESS) {
1413 /* Configure output options and let the other side know
1414 * which ones we don't like. */
1415
1416 if (rfc.mode == L2CAP_MODE_BASIC) {
1417 if (mtu < pi->omtu)
1418 result = L2CAP_CONF_UNACCEPT;
1419 else {
1420 pi->omtu = mtu;
1421 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1422 }
1423
1424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1425 } else {
1426 result = L2CAP_CONF_UNACCEPT;
1427
1428 memset(&rfc, 0, sizeof(rfc));
1429 rfc.mode = L2CAP_MODE_BASIC;
1430
1431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1432 sizeof(rfc), (unsigned long) &rfc);
1433 }
1434 }
1435
1436 rsp->scid = cpu_to_le16(pi->dcid);
1437 rsp->result = cpu_to_le16(result);
1438 rsp->flags = cpu_to_le16(0x0000);
1439
1440 return ptr - data;
1441 }
1442
1443 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1444 {
1445 struct l2cap_conf_rsp *rsp = data;
1446 void *ptr = rsp->data;
1447
1448 BT_DBG("sk %p", sk);
1449
1450 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1451 rsp->result = cpu_to_le16(result);
1452 rsp->flags = cpu_to_le16(flags);
1453
1454 return ptr - data;
1455 }
1456
1457 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1458 {
1459 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1460
1461 if (rej->reason != 0x0000)
1462 return 0;
1463
1464 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1465 cmd->ident == conn->info_ident) {
1466 conn->info_ident = 0;
1467 del_timer(&conn->info_timer);
1468 l2cap_conn_start(conn);
1469 }
1470
1471 return 0;
1472 }
1473
1474 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1475 {
1476 struct l2cap_chan_list *list = &conn->chan_list;
1477 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1478 struct l2cap_conn_rsp rsp;
1479 struct sock *sk, *parent;
1480 int result = 0, status = 0;
1481
1482 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1483 __le16 psm = req->psm;
1484
1485 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1486
1487 /* Check if we have socket listening on psm */
1488 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1489 if (!parent) {
1490 result = L2CAP_CR_BAD_PSM;
1491 goto sendresp;
1492 }
1493
1494 result = L2CAP_CR_NO_MEM;
1495
1496 /* Check for backlog size */
1497 if (sk_acceptq_is_full(parent)) {
1498 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1499 goto response;
1500 }
1501
1502 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1503 if (!sk)
1504 goto response;
1505
1506 write_lock_bh(&list->lock);
1507
1508 /* Check if we already have channel with that dcid */
1509 if (__l2cap_get_chan_by_dcid(list, scid)) {
1510 write_unlock_bh(&list->lock);
1511 sock_set_flag(sk, SOCK_ZAPPED);
1512 l2cap_sock_kill(sk);
1513 goto response;
1514 }
1515
1516 hci_conn_hold(conn->hcon);
1517
1518 l2cap_sock_init(sk, parent);
1519 bacpy(&bt_sk(sk)->src, conn->src);
1520 bacpy(&bt_sk(sk)->dst, conn->dst);
1521 l2cap_pi(sk)->psm = psm;
1522 l2cap_pi(sk)->dcid = scid;
1523
1524 __l2cap_chan_add(conn, sk, parent);
1525 dcid = l2cap_pi(sk)->scid;
1526
1527 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1528
1529 /* Service level security */
1530 result = L2CAP_CR_PEND;
1531 status = L2CAP_CS_AUTHEN_PEND;
1532 sk->sk_state = BT_CONNECT2;
1533 l2cap_pi(sk)->ident = cmd->ident;
1534
1535 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1536 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1537 if (!hci_conn_encrypt(conn->hcon))
1538 goto done;
1539 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1540 if (!hci_conn_auth(conn->hcon))
1541 goto done;
1542 }
1543
1544 sk->sk_state = BT_CONFIG;
1545 result = status = 0;
1546
1547 done:
1548 write_unlock_bh(&list->lock);
1549
1550 response:
1551 bh_unlock_sock(parent);
1552
1553 sendresp:
1554 rsp.scid = cpu_to_le16(scid);
1555 rsp.dcid = cpu_to_le16(dcid);
1556 rsp.result = cpu_to_le16(result);
1557 rsp.status = cpu_to_le16(status);
1558 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1559 return 0;
1560 }
1561
1562 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1563 {
1564 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1565 u16 scid, dcid, result, status;
1566 struct sock *sk;
1567 u8 req[128];
1568
1569 scid = __le16_to_cpu(rsp->scid);
1570 dcid = __le16_to_cpu(rsp->dcid);
1571 result = __le16_to_cpu(rsp->result);
1572 status = __le16_to_cpu(rsp->status);
1573
1574 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1575
1576 if (scid) {
1577 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1578 return 0;
1579 } else {
1580 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1581 return 0;
1582 }
1583
1584 switch (result) {
1585 case L2CAP_CR_SUCCESS:
1586 sk->sk_state = BT_CONFIG;
1587 l2cap_pi(sk)->ident = 0;
1588 l2cap_pi(sk)->dcid = dcid;
1589 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1590
1591 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1592 l2cap_build_conf_req(sk, req), req);
1593 break;
1594
1595 case L2CAP_CR_PEND:
1596 break;
1597
1598 default:
1599 l2cap_chan_del(sk, ECONNREFUSED);
1600 break;
1601 }
1602
1603 bh_unlock_sock(sk);
1604 return 0;
1605 }
1606
1607 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1608 {
1609 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1610 u16 dcid, flags;
1611 u8 rsp[64];
1612 struct sock *sk;
1613 int len;
1614
1615 dcid = __le16_to_cpu(req->dcid);
1616 flags = __le16_to_cpu(req->flags);
1617
1618 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1619
1620 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1621 return -ENOENT;
1622
1623 if (sk->sk_state == BT_DISCONN)
1624 goto unlock;
1625
1626 /* Reject if config buffer is too small. */
1627 len = cmd_len - sizeof(*req);
1628 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1630 l2cap_build_conf_rsp(sk, rsp,
1631 L2CAP_CONF_REJECT, flags), rsp);
1632 goto unlock;
1633 }
1634
1635 /* Store config. */
1636 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1637 l2cap_pi(sk)->conf_len += len;
1638
1639 if (flags & 0x0001) {
1640 /* Incomplete config. Send empty response. */
1641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1642 l2cap_build_conf_rsp(sk, rsp,
1643 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1644 goto unlock;
1645 }
1646
1647 /* Complete config. */
1648 len = l2cap_parse_conf_req(sk, rsp);
1649 if (len < 0)
1650 goto unlock;
1651
1652 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1653
1654 /* Reset config buffer. */
1655 l2cap_pi(sk)->conf_len = 0;
1656
1657 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1658 goto unlock;
1659
1660 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1661 sk->sk_state = BT_CONNECTED;
1662 l2cap_chan_ready(sk);
1663 goto unlock;
1664 }
1665
1666 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1667 u8 req[64];
1668 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1669 l2cap_build_conf_req(sk, req), req);
1670 }
1671
1672 unlock:
1673 bh_unlock_sock(sk);
1674 return 0;
1675 }
1676
1677 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1678 {
1679 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1680 u16 scid, flags, result;
1681 struct sock *sk;
1682
1683 scid = __le16_to_cpu(rsp->scid);
1684 flags = __le16_to_cpu(rsp->flags);
1685 result = __le16_to_cpu(rsp->result);
1686
1687 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1688
1689 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1690 return 0;
1691
1692 switch (result) {
1693 case L2CAP_CONF_SUCCESS:
1694 break;
1695
1696 case L2CAP_CONF_UNACCEPT:
1697 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1698 char req[128];
1699 /* It does not make sense to adjust L2CAP parameters
1700 * that are currently defined in the spec. We simply
1701 * resend config request that we sent earlier. It is
1702 * stupid, but it helps qualification testing which
1703 * expects at least some response from us. */
1704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1705 l2cap_build_conf_req(sk, req), req);
1706 goto done;
1707 }
1708
1709 default:
1710 sk->sk_state = BT_DISCONN;
1711 sk->sk_err = ECONNRESET;
1712 l2cap_sock_set_timer(sk, HZ * 5);
1713 {
1714 struct l2cap_disconn_req req;
1715 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1716 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1717 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1718 L2CAP_DISCONN_REQ, sizeof(req), &req);
1719 }
1720 goto done;
1721 }
1722
1723 if (flags & 0x01)
1724 goto done;
1725
1726 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1727
1728 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1729 sk->sk_state = BT_CONNECTED;
1730 l2cap_chan_ready(sk);
1731 }
1732
1733 done:
1734 bh_unlock_sock(sk);
1735 return 0;
1736 }
1737
1738 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1739 {
1740 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1741 struct l2cap_disconn_rsp rsp;
1742 u16 dcid, scid;
1743 struct sock *sk;
1744
1745 scid = __le16_to_cpu(req->scid);
1746 dcid = __le16_to_cpu(req->dcid);
1747
1748 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1749
1750 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1751 return 0;
1752
1753 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1755 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1756
1757 sk->sk_shutdown = SHUTDOWN_MASK;
1758
1759 l2cap_chan_del(sk, ECONNRESET);
1760 bh_unlock_sock(sk);
1761
1762 l2cap_sock_kill(sk);
1763 return 0;
1764 }
1765
1766 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1767 {
1768 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1769 u16 dcid, scid;
1770 struct sock *sk;
1771
1772 scid = __le16_to_cpu(rsp->scid);
1773 dcid = __le16_to_cpu(rsp->dcid);
1774
1775 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1776
1777 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1778 return 0;
1779
1780 l2cap_chan_del(sk, 0);
1781 bh_unlock_sock(sk);
1782
1783 l2cap_sock_kill(sk);
1784 return 0;
1785 }
1786
1787 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1788 {
1789 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1790 u16 type;
1791
1792 type = __le16_to_cpu(req->type);
1793
1794 BT_DBG("type 0x%4.4x", type);
1795
1796 if (type == L2CAP_IT_FEAT_MASK) {
1797 u8 buf[8];
1798 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1799 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1800 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1801 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1802 l2cap_send_cmd(conn, cmd->ident,
1803 L2CAP_INFO_RSP, sizeof(buf), buf);
1804 } else {
1805 struct l2cap_info_rsp rsp;
1806 rsp.type = cpu_to_le16(type);
1807 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1808 l2cap_send_cmd(conn, cmd->ident,
1809 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1810 }
1811
1812 return 0;
1813 }
1814
1815 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1816 {
1817 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1818 u16 type, result;
1819
1820 type = __le16_to_cpu(rsp->type);
1821 result = __le16_to_cpu(rsp->result);
1822
1823 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1824
1825 conn->info_ident = 0;
1826
1827 del_timer(&conn->info_timer);
1828
1829 if (type == L2CAP_IT_FEAT_MASK)
1830 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1831
1832 l2cap_conn_start(conn);
1833
1834 return 0;
1835 }
1836
1837 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1838 {
1839 u8 *data = skb->data;
1840 int len = skb->len;
1841 struct l2cap_cmd_hdr cmd;
1842 int err = 0;
1843
1844 l2cap_raw_recv(conn, skb);
1845
1846 while (len >= L2CAP_CMD_HDR_SIZE) {
1847 u16 cmd_len;
1848 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1849 data += L2CAP_CMD_HDR_SIZE;
1850 len -= L2CAP_CMD_HDR_SIZE;
1851
1852 cmd_len = le16_to_cpu(cmd.len);
1853
1854 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1855
1856 if (cmd_len > len || !cmd.ident) {
1857 BT_DBG("corrupted command");
1858 break;
1859 }
1860
1861 switch (cmd.code) {
1862 case L2CAP_COMMAND_REJ:
1863 l2cap_command_rej(conn, &cmd, data);
1864 break;
1865
1866 case L2CAP_CONN_REQ:
1867 err = l2cap_connect_req(conn, &cmd, data);
1868 break;
1869
1870 case L2CAP_CONN_RSP:
1871 err = l2cap_connect_rsp(conn, &cmd, data);
1872 break;
1873
1874 case L2CAP_CONF_REQ:
1875 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1876 break;
1877
1878 case L2CAP_CONF_RSP:
1879 err = l2cap_config_rsp(conn, &cmd, data);
1880 break;
1881
1882 case L2CAP_DISCONN_REQ:
1883 err = l2cap_disconnect_req(conn, &cmd, data);
1884 break;
1885
1886 case L2CAP_DISCONN_RSP:
1887 err = l2cap_disconnect_rsp(conn, &cmd, data);
1888 break;
1889
1890 case L2CAP_ECHO_REQ:
1891 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1892 break;
1893
1894 case L2CAP_ECHO_RSP:
1895 break;
1896
1897 case L2CAP_INFO_REQ:
1898 err = l2cap_information_req(conn, &cmd, data);
1899 break;
1900
1901 case L2CAP_INFO_RSP:
1902 err = l2cap_information_rsp(conn, &cmd, data);
1903 break;
1904
1905 default:
1906 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1907 err = -EINVAL;
1908 break;
1909 }
1910
1911 if (err) {
1912 struct l2cap_cmd_rej rej;
1913 BT_DBG("error %d", err);
1914
1915 /* FIXME: Map err to a valid reason */
1916 rej.reason = cpu_to_le16(0);
1917 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1918 }
1919
1920 data += cmd_len;
1921 len -= cmd_len;
1922 }
1923
1924 kfree_skb(skb);
1925 }
1926
1927 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1928 {
1929 struct sock *sk;
1930
1931 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1932 if (!sk) {
1933 BT_DBG("unknown cid 0x%4.4x", cid);
1934 goto drop;
1935 }
1936
1937 BT_DBG("sk %p, len %d", sk, skb->len);
1938
1939 if (sk->sk_state != BT_CONNECTED)
1940 goto drop;
1941
1942 if (l2cap_pi(sk)->imtu < skb->len)
1943 goto drop;
1944
1945 /* If socket recv buffers overflows we drop data here
1946 * which is *bad* because L2CAP has to be reliable.
1947 * But we don't have any other choice. L2CAP doesn't
1948 * provide flow control mechanism. */
1949
1950 if (!sock_queue_rcv_skb(sk, skb))
1951 goto done;
1952
1953 drop:
1954 kfree_skb(skb);
1955
1956 done:
1957 if (sk)
1958 bh_unlock_sock(sk);
1959
1960 return 0;
1961 }
1962
1963 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1964 {
1965 struct sock *sk;
1966
1967 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1968 if (!sk)
1969 goto drop;
1970
1971 BT_DBG("sk %p, len %d", sk, skb->len);
1972
1973 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1974 goto drop;
1975
1976 if (l2cap_pi(sk)->imtu < skb->len)
1977 goto drop;
1978
1979 if (!sock_queue_rcv_skb(sk, skb))
1980 goto done;
1981
1982 drop:
1983 kfree_skb(skb);
1984
1985 done:
1986 if (sk) bh_unlock_sock(sk);
1987 return 0;
1988 }
1989
1990 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1991 {
1992 struct l2cap_hdr *lh = (void *) skb->data;
1993 u16 cid, len;
1994 __le16 psm;
1995
1996 skb_pull(skb, L2CAP_HDR_SIZE);
1997 cid = __le16_to_cpu(lh->cid);
1998 len = __le16_to_cpu(lh->len);
1999
2000 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2001
2002 switch (cid) {
2003 case 0x0001:
2004 l2cap_sig_channel(conn, skb);
2005 break;
2006
2007 case 0x0002:
2008 psm = get_unaligned((__le16 *) skb->data);
2009 skb_pull(skb, 2);
2010 l2cap_conless_channel(conn, psm, skb);
2011 break;
2012
2013 default:
2014 l2cap_data_channel(conn, cid, skb);
2015 break;
2016 }
2017 }
2018
2019 /* ---- L2CAP interface with lower layer (HCI) ---- */
2020
2021 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2022 {
2023 int exact = 0, lm1 = 0, lm2 = 0;
2024 register struct sock *sk;
2025 struct hlist_node *node;
2026
2027 if (type != ACL_LINK)
2028 return 0;
2029
2030 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2031
2032 /* Find listening sockets and check their link_mode */
2033 read_lock(&l2cap_sk_list.lock);
2034 sk_for_each(sk, node, &l2cap_sk_list.head) {
2035 if (sk->sk_state != BT_LISTEN)
2036 continue;
2037
2038 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2039 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2040 exact++;
2041 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2042 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2043 }
2044 read_unlock(&l2cap_sk_list.lock);
2045
2046 return exact ? lm1 : lm2;
2047 }
2048
2049 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2050 {
2051 struct l2cap_conn *conn;
2052
2053 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2054
2055 if (hcon->type != ACL_LINK)
2056 return 0;
2057
2058 if (!status) {
2059 conn = l2cap_conn_add(hcon, status);
2060 if (conn)
2061 l2cap_conn_ready(conn);
2062 } else
2063 l2cap_conn_del(hcon, bt_err(status));
2064
2065 return 0;
2066 }
2067
2068 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2069 {
2070 BT_DBG("hcon %p reason %d", hcon, reason);
2071
2072 if (hcon->type != ACL_LINK)
2073 return 0;
2074
2075 l2cap_conn_del(hcon, bt_err(reason));
2076
2077 return 0;
2078 }
2079
2080 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2081 {
2082 struct l2cap_chan_list *l;
2083 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2084 struct l2cap_conn_rsp rsp;
2085 struct sock *sk;
2086 int result;
2087
2088 if (!conn)
2089 return 0;
2090
2091 l = &conn->chan_list;
2092
2093 BT_DBG("conn %p", conn);
2094
2095 read_lock(&l->lock);
2096
2097 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2098 bh_lock_sock(sk);
2099
2100 if (sk->sk_state != BT_CONNECT2 ||
2101 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2102 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2103 bh_unlock_sock(sk);
2104 continue;
2105 }
2106
2107 if (!status) {
2108 sk->sk_state = BT_CONFIG;
2109 result = 0;
2110 } else {
2111 sk->sk_state = BT_DISCONN;
2112 l2cap_sock_set_timer(sk, HZ/10);
2113 result = L2CAP_CR_SEC_BLOCK;
2114 }
2115
2116 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2117 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2118 rsp.result = cpu_to_le16(result);
2119 rsp.status = cpu_to_le16(0);
2120 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2121 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2122
2123 bh_unlock_sock(sk);
2124 }
2125
2126 read_unlock(&l->lock);
2127 return 0;
2128 }
2129
2130 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2131 {
2132 struct l2cap_chan_list *l;
2133 struct l2cap_conn *conn = hcon->l2cap_data;
2134 struct l2cap_conn_rsp rsp;
2135 struct sock *sk;
2136 int result;
2137
2138 if (!conn)
2139 return 0;
2140
2141 l = &conn->chan_list;
2142
2143 BT_DBG("conn %p", conn);
2144
2145 read_lock(&l->lock);
2146
2147 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2148 bh_lock_sock(sk);
2149
2150 if (sk->sk_state != BT_CONNECT2) {
2151 bh_unlock_sock(sk);
2152 continue;
2153 }
2154
2155 if (!status) {
2156 sk->sk_state = BT_CONFIG;
2157 result = 0;
2158 } else {
2159 sk->sk_state = BT_DISCONN;
2160 l2cap_sock_set_timer(sk, HZ/10);
2161 result = L2CAP_CR_SEC_BLOCK;
2162 }
2163
2164 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2165 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2166 rsp.result = cpu_to_le16(result);
2167 rsp.status = cpu_to_le16(0);
2168 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2169 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2170
2171 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2172 hci_conn_change_link_key(hcon);
2173
2174 bh_unlock_sock(sk);
2175 }
2176
2177 read_unlock(&l->lock);
2178 return 0;
2179 }
2180
2181 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2182 {
2183 struct l2cap_conn *conn = hcon->l2cap_data;
2184
2185 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2186 goto drop;
2187
2188 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2189
2190 if (flags & ACL_START) {
2191 struct l2cap_hdr *hdr;
2192 int len;
2193
2194 if (conn->rx_len) {
2195 BT_ERR("Unexpected start frame (len %d)", skb->len);
2196 kfree_skb(conn->rx_skb);
2197 conn->rx_skb = NULL;
2198 conn->rx_len = 0;
2199 l2cap_conn_unreliable(conn, ECOMM);
2200 }
2201
2202 if (skb->len < 2) {
2203 BT_ERR("Frame is too short (len %d)", skb->len);
2204 l2cap_conn_unreliable(conn, ECOMM);
2205 goto drop;
2206 }
2207
2208 hdr = (struct l2cap_hdr *) skb->data;
2209 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2210
2211 if (len == skb->len) {
2212 /* Complete frame received */
2213 l2cap_recv_frame(conn, skb);
2214 return 0;
2215 }
2216
2217 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2218
2219 if (skb->len > len) {
2220 BT_ERR("Frame is too long (len %d, expected len %d)",
2221 skb->len, len);
2222 l2cap_conn_unreliable(conn, ECOMM);
2223 goto drop;
2224 }
2225
2226 /* Allocate skb for the complete frame (with header) */
2227 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2228 goto drop;
2229
2230 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2231 skb->len);
2232 conn->rx_len = len - skb->len;
2233 } else {
2234 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2235
2236 if (!conn->rx_len) {
2237 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2238 l2cap_conn_unreliable(conn, ECOMM);
2239 goto drop;
2240 }
2241
2242 if (skb->len > conn->rx_len) {
2243 BT_ERR("Fragment is too long (len %d, expected %d)",
2244 skb->len, conn->rx_len);
2245 kfree_skb(conn->rx_skb);
2246 conn->rx_skb = NULL;
2247 conn->rx_len = 0;
2248 l2cap_conn_unreliable(conn, ECOMM);
2249 goto drop;
2250 }
2251
2252 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2253 skb->len);
2254 conn->rx_len -= skb->len;
2255
2256 if (!conn->rx_len) {
2257 /* Complete frame received */
2258 l2cap_recv_frame(conn, conn->rx_skb);
2259 conn->rx_skb = NULL;
2260 }
2261 }
2262
2263 drop:
2264 kfree_skb(skb);
2265 return 0;
2266 }
2267
2268 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2269 {
2270 struct sock *sk;
2271 struct hlist_node *node;
2272 char *str = buf;
2273
2274 read_lock_bh(&l2cap_sk_list.lock);
2275
2276 sk_for_each(sk, node, &l2cap_sk_list.head) {
2277 struct l2cap_pinfo *pi = l2cap_pi(sk);
2278
2279 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2280 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2281 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2282 pi->imtu, pi->omtu, pi->link_mode);
2283 }
2284
2285 read_unlock_bh(&l2cap_sk_list.lock);
2286
2287 return (str - buf);
2288 }
2289
2290 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2291
2292 static const struct proto_ops l2cap_sock_ops = {
2293 .family = PF_BLUETOOTH,
2294 .owner = THIS_MODULE,
2295 .release = l2cap_sock_release,
2296 .bind = l2cap_sock_bind,
2297 .connect = l2cap_sock_connect,
2298 .listen = l2cap_sock_listen,
2299 .accept = l2cap_sock_accept,
2300 .getname = l2cap_sock_getname,
2301 .sendmsg = l2cap_sock_sendmsg,
2302 .recvmsg = bt_sock_recvmsg,
2303 .poll = bt_sock_poll,
2304 .mmap = sock_no_mmap,
2305 .socketpair = sock_no_socketpair,
2306 .ioctl = sock_no_ioctl,
2307 .shutdown = l2cap_sock_shutdown,
2308 .setsockopt = l2cap_sock_setsockopt,
2309 .getsockopt = l2cap_sock_getsockopt
2310 };
2311
2312 static struct net_proto_family l2cap_sock_family_ops = {
2313 .family = PF_BLUETOOTH,
2314 .owner = THIS_MODULE,
2315 .create = l2cap_sock_create,
2316 };
2317
2318 static struct hci_proto l2cap_hci_proto = {
2319 .name = "L2CAP",
2320 .id = HCI_PROTO_L2CAP,
2321 .connect_ind = l2cap_connect_ind,
2322 .connect_cfm = l2cap_connect_cfm,
2323 .disconn_ind = l2cap_disconn_ind,
2324 .auth_cfm = l2cap_auth_cfm,
2325 .encrypt_cfm = l2cap_encrypt_cfm,
2326 .recv_acldata = l2cap_recv_acldata
2327 };
2328
2329 static int __init l2cap_init(void)
2330 {
2331 int err;
2332
2333 err = proto_register(&l2cap_proto, 0);
2334 if (err < 0)
2335 return err;
2336
2337 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2338 if (err < 0) {
2339 BT_ERR("L2CAP socket registration failed");
2340 goto error;
2341 }
2342
2343 err = hci_register_proto(&l2cap_hci_proto);
2344 if (err < 0) {
2345 BT_ERR("L2CAP protocol registration failed");
2346 bt_sock_unregister(BTPROTO_L2CAP);
2347 goto error;
2348 }
2349
2350 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2351 BT_ERR("Failed to create L2CAP info file");
2352
2353 BT_INFO("L2CAP ver %s", VERSION);
2354 BT_INFO("L2CAP socket layer initialized");
2355
2356 return 0;
2357
2358 error:
2359 proto_unregister(&l2cap_proto);
2360 return err;
2361 }
2362
2363 static void __exit l2cap_exit(void)
2364 {
2365 class_remove_file(bt_class, &class_attr_l2cap);
2366
2367 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2368 BT_ERR("L2CAP socket unregistration failed");
2369
2370 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2371 BT_ERR("L2CAP protocol unregistration failed");
2372
2373 proto_unregister(&l2cap_proto);
2374 }
2375
2376 void l2cap_load(void)
2377 {
2378 /* Dummy function to trigger automatic L2CAP module loading by
2379 * other modules that use L2CAP sockets but don't use any other
2380 * symbols from it. */
2381 return;
2382 }
2383 EXPORT_SYMBOL(l2cap_load);
2384
2385 module_init(l2cap_init);
2386 module_exit(l2cap_exit);
2387
2388 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2389 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2390 MODULE_VERSION(VERSION);
2391 MODULE_LICENSE("GPL");
2392 MODULE_ALIAS("bt-proto-0");