]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/af_bluetooth.c
Bluetooth: Fix locking in bt_accept_enqueue() for BH context
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / af_bluetooth.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth address family and sockets. */
26
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <linux/sched/signal.h>
31
32 #include <asm/ioctls.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <linux/proc_fs.h>
36
37 #include "leds.h"
38 #include "selftest.h"
39
40 /* Bluetooth sockets */
41 #define BT_MAX_PROTO 8
42 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
43 static DEFINE_RWLOCK(bt_proto_lock);
44
45 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
46 static const char *const bt_key_strings[BT_MAX_PROTO] = {
47 "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
48 "sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
49 "sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
50 "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
51 "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
52 "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
53 "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
54 "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
55 };
56
57 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
58 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
59 "slock-AF_BLUETOOTH-BTPROTO_L2CAP",
60 "slock-AF_BLUETOOTH-BTPROTO_HCI",
61 "slock-AF_BLUETOOTH-BTPROTO_SCO",
62 "slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
63 "slock-AF_BLUETOOTH-BTPROTO_BNEP",
64 "slock-AF_BLUETOOTH-BTPROTO_CMTP",
65 "slock-AF_BLUETOOTH-BTPROTO_HIDP",
66 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
67 };
68
69 void bt_sock_reclassify_lock(struct sock *sk, int proto)
70 {
71 BUG_ON(!sk);
72 BUG_ON(!sock_allow_reclassification(sk));
73
74 sock_lock_init_class_and_name(sk,
75 bt_slock_key_strings[proto], &bt_slock_key[proto],
76 bt_key_strings[proto], &bt_lock_key[proto]);
77 }
78 EXPORT_SYMBOL(bt_sock_reclassify_lock);
79
80 int bt_sock_register(int proto, const struct net_proto_family *ops)
81 {
82 int err = 0;
83
84 if (proto < 0 || proto >= BT_MAX_PROTO)
85 return -EINVAL;
86
87 write_lock(&bt_proto_lock);
88
89 if (bt_proto[proto])
90 err = -EEXIST;
91 else
92 bt_proto[proto] = ops;
93
94 write_unlock(&bt_proto_lock);
95
96 return err;
97 }
98 EXPORT_SYMBOL(bt_sock_register);
99
100 void bt_sock_unregister(int proto)
101 {
102 if (proto < 0 || proto >= BT_MAX_PROTO)
103 return;
104
105 write_lock(&bt_proto_lock);
106 bt_proto[proto] = NULL;
107 write_unlock(&bt_proto_lock);
108 }
109 EXPORT_SYMBOL(bt_sock_unregister);
110
111 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
112 int kern)
113 {
114 int err;
115
116 if (net != &init_net)
117 return -EAFNOSUPPORT;
118
119 if (proto < 0 || proto >= BT_MAX_PROTO)
120 return -EINVAL;
121
122 if (!bt_proto[proto])
123 request_module("bt-proto-%d", proto);
124
125 err = -EPROTONOSUPPORT;
126
127 read_lock(&bt_proto_lock);
128
129 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
130 err = bt_proto[proto]->create(net, sock, proto, kern);
131 if (!err)
132 bt_sock_reclassify_lock(sock->sk, proto);
133 module_put(bt_proto[proto]->owner);
134 }
135
136 read_unlock(&bt_proto_lock);
137
138 return err;
139 }
140
141 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
142 {
143 write_lock(&l->lock);
144 sk_add_node(sk, &l->head);
145 write_unlock(&l->lock);
146 }
147 EXPORT_SYMBOL(bt_sock_link);
148
149 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
150 {
151 write_lock(&l->lock);
152 sk_del_node_init(sk);
153 write_unlock(&l->lock);
154 }
155 EXPORT_SYMBOL(bt_sock_unlink);
156
157 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
158 {
159 BT_DBG("parent %p, sk %p", parent, sk);
160
161 sock_hold(sk);
162
163 if (bh)
164 bh_lock_sock_nested(sk);
165 else
166 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
167
168 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
169 bt_sk(sk)->parent = parent;
170
171 if (bh)
172 bh_unlock_sock(sk);
173 else
174 release_sock(sk);
175
176 parent->sk_ack_backlog++;
177 }
178 EXPORT_SYMBOL(bt_accept_enqueue);
179
180 /* Calling function must hold the sk lock.
181 * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
182 */
183 void bt_accept_unlink(struct sock *sk)
184 {
185 BT_DBG("sk %p state %d", sk, sk->sk_state);
186
187 list_del_init(&bt_sk(sk)->accept_q);
188 bt_sk(sk)->parent->sk_ack_backlog--;
189 bt_sk(sk)->parent = NULL;
190 sock_put(sk);
191 }
192 EXPORT_SYMBOL(bt_accept_unlink);
193
194 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
195 {
196 struct bt_sock *s, *n;
197 struct sock *sk;
198
199 BT_DBG("parent %p", parent);
200
201 restart:
202 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
203 sk = (struct sock *)s;
204
205 /* Prevent early freeing of sk due to unlink and sock_kill */
206 sock_hold(sk);
207 lock_sock(sk);
208
209 /* Check sk has not already been unlinked via
210 * bt_accept_unlink() due to serialisation caused by sk locking
211 */
212 if (!bt_sk(sk)->parent) {
213 BT_DBG("sk %p, already unlinked", sk);
214 release_sock(sk);
215 sock_put(sk);
216
217 /* Restart the loop as sk is no longer in the list
218 * and also avoid a potential infinite loop because
219 * list_for_each_entry_safe() is not thread safe.
220 */
221 goto restart;
222 }
223
224 /* sk is safely in the parent list so reduce reference count */
225 sock_put(sk);
226
227 /* FIXME: Is this check still needed */
228 if (sk->sk_state == BT_CLOSED) {
229 bt_accept_unlink(sk);
230 release_sock(sk);
231 continue;
232 }
233
234 if (sk->sk_state == BT_CONNECTED || !newsock ||
235 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
236 bt_accept_unlink(sk);
237 if (newsock)
238 sock_graft(sk, newsock);
239
240 release_sock(sk);
241 return sk;
242 }
243
244 release_sock(sk);
245 }
246
247 return NULL;
248 }
249 EXPORT_SYMBOL(bt_accept_dequeue);
250
251 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
252 int flags)
253 {
254 int noblock = flags & MSG_DONTWAIT;
255 struct sock *sk = sock->sk;
256 struct sk_buff *skb;
257 size_t copied;
258 size_t skblen;
259 int err;
260
261 BT_DBG("sock %p sk %p len %zu", sock, sk, len);
262
263 if (flags & MSG_OOB)
264 return -EOPNOTSUPP;
265
266 skb = skb_recv_datagram(sk, flags, noblock, &err);
267 if (!skb) {
268 if (sk->sk_shutdown & RCV_SHUTDOWN)
269 return 0;
270
271 return err;
272 }
273
274 skblen = skb->len;
275 copied = skb->len;
276 if (len < copied) {
277 msg->msg_flags |= MSG_TRUNC;
278 copied = len;
279 }
280
281 skb_reset_transport_header(skb);
282 err = skb_copy_datagram_msg(skb, 0, msg, copied);
283 if (err == 0) {
284 sock_recv_ts_and_drops(msg, sk, skb);
285
286 if (msg->msg_name && bt_sk(sk)->skb_msg_name)
287 bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
288 &msg->msg_namelen);
289 }
290
291 skb_free_datagram(sk, skb);
292
293 if (flags & MSG_TRUNC)
294 copied = skblen;
295
296 return err ? : copied;
297 }
298 EXPORT_SYMBOL(bt_sock_recvmsg);
299
300 static long bt_sock_data_wait(struct sock *sk, long timeo)
301 {
302 DECLARE_WAITQUEUE(wait, current);
303
304 add_wait_queue(sk_sleep(sk), &wait);
305 for (;;) {
306 set_current_state(TASK_INTERRUPTIBLE);
307
308 if (!skb_queue_empty(&sk->sk_receive_queue))
309 break;
310
311 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
312 break;
313
314 if (signal_pending(current) || !timeo)
315 break;
316
317 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
318 release_sock(sk);
319 timeo = schedule_timeout(timeo);
320 lock_sock(sk);
321 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
322 }
323
324 __set_current_state(TASK_RUNNING);
325 remove_wait_queue(sk_sleep(sk), &wait);
326 return timeo;
327 }
328
329 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
330 size_t size, int flags)
331 {
332 struct sock *sk = sock->sk;
333 int err = 0;
334 size_t target, copied = 0;
335 long timeo;
336
337 if (flags & MSG_OOB)
338 return -EOPNOTSUPP;
339
340 BT_DBG("sk %p size %zu", sk, size);
341
342 lock_sock(sk);
343
344 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
345 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
346
347 do {
348 struct sk_buff *skb;
349 int chunk;
350
351 skb = skb_dequeue(&sk->sk_receive_queue);
352 if (!skb) {
353 if (copied >= target)
354 break;
355
356 err = sock_error(sk);
357 if (err)
358 break;
359 if (sk->sk_shutdown & RCV_SHUTDOWN)
360 break;
361
362 err = -EAGAIN;
363 if (!timeo)
364 break;
365
366 timeo = bt_sock_data_wait(sk, timeo);
367
368 if (signal_pending(current)) {
369 err = sock_intr_errno(timeo);
370 goto out;
371 }
372 continue;
373 }
374
375 chunk = min_t(unsigned int, skb->len, size);
376 if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
377 skb_queue_head(&sk->sk_receive_queue, skb);
378 if (!copied)
379 copied = -EFAULT;
380 break;
381 }
382 copied += chunk;
383 size -= chunk;
384
385 sock_recv_ts_and_drops(msg, sk, skb);
386
387 if (!(flags & MSG_PEEK)) {
388 int skb_len = skb_headlen(skb);
389
390 if (chunk <= skb_len) {
391 __skb_pull(skb, chunk);
392 } else {
393 struct sk_buff *frag;
394
395 __skb_pull(skb, skb_len);
396 chunk -= skb_len;
397
398 skb_walk_frags(skb, frag) {
399 if (chunk <= frag->len) {
400 /* Pulling partial data */
401 skb->len -= chunk;
402 skb->data_len -= chunk;
403 __skb_pull(frag, chunk);
404 break;
405 } else if (frag->len) {
406 /* Pulling all frag data */
407 chunk -= frag->len;
408 skb->len -= frag->len;
409 skb->data_len -= frag->len;
410 __skb_pull(frag, frag->len);
411 }
412 }
413 }
414
415 if (skb->len) {
416 skb_queue_head(&sk->sk_receive_queue, skb);
417 break;
418 }
419 kfree_skb(skb);
420
421 } else {
422 /* put message back and return */
423 skb_queue_head(&sk->sk_receive_queue, skb);
424 break;
425 }
426 } while (size);
427
428 out:
429 release_sock(sk);
430 return copied ? : err;
431 }
432 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
433
434 static inline unsigned int bt_accept_poll(struct sock *parent)
435 {
436 struct bt_sock *s, *n;
437 struct sock *sk;
438
439 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
440 sk = (struct sock *)s;
441 if (sk->sk_state == BT_CONNECTED ||
442 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
443 sk->sk_state == BT_CONNECT2))
444 return POLLIN | POLLRDNORM;
445 }
446
447 return 0;
448 }
449
450 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
451 poll_table *wait)
452 {
453 struct sock *sk = sock->sk;
454 unsigned int mask = 0;
455
456 BT_DBG("sock %p, sk %p", sock, sk);
457
458 poll_wait(file, sk_sleep(sk), wait);
459
460 if (sk->sk_state == BT_LISTEN)
461 return bt_accept_poll(sk);
462
463 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
464 mask |= POLLERR |
465 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
466
467 if (sk->sk_shutdown & RCV_SHUTDOWN)
468 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
469
470 if (sk->sk_shutdown == SHUTDOWN_MASK)
471 mask |= POLLHUP;
472
473 if (!skb_queue_empty(&sk->sk_receive_queue))
474 mask |= POLLIN | POLLRDNORM;
475
476 if (sk->sk_state == BT_CLOSED)
477 mask |= POLLHUP;
478
479 if (sk->sk_state == BT_CONNECT ||
480 sk->sk_state == BT_CONNECT2 ||
481 sk->sk_state == BT_CONFIG)
482 return mask;
483
484 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
485 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
486 else
487 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
488
489 return mask;
490 }
491 EXPORT_SYMBOL(bt_sock_poll);
492
493 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
494 {
495 struct sock *sk = sock->sk;
496 struct sk_buff *skb;
497 long amount;
498 int err;
499
500 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
501
502 switch (cmd) {
503 case TIOCOUTQ:
504 if (sk->sk_state == BT_LISTEN)
505 return -EINVAL;
506
507 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
508 if (amount < 0)
509 amount = 0;
510 err = put_user(amount, (int __user *) arg);
511 break;
512
513 case TIOCINQ:
514 if (sk->sk_state == BT_LISTEN)
515 return -EINVAL;
516
517 lock_sock(sk);
518 skb = skb_peek(&sk->sk_receive_queue);
519 amount = skb ? skb->len : 0;
520 release_sock(sk);
521 err = put_user(amount, (int __user *) arg);
522 break;
523
524 case SIOCGSTAMP:
525 err = sock_get_timestamp(sk, (struct timeval __user *) arg);
526 break;
527
528 case SIOCGSTAMPNS:
529 err = sock_get_timestampns(sk, (struct timespec __user *) arg);
530 break;
531
532 default:
533 err = -ENOIOCTLCMD;
534 break;
535 }
536
537 return err;
538 }
539 EXPORT_SYMBOL(bt_sock_ioctl);
540
541 /* This function expects the sk lock to be held when called */
542 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
543 {
544 DECLARE_WAITQUEUE(wait, current);
545 int err = 0;
546
547 BT_DBG("sk %p", sk);
548
549 add_wait_queue(sk_sleep(sk), &wait);
550 set_current_state(TASK_INTERRUPTIBLE);
551 while (sk->sk_state != state) {
552 if (!timeo) {
553 err = -EINPROGRESS;
554 break;
555 }
556
557 if (signal_pending(current)) {
558 err = sock_intr_errno(timeo);
559 break;
560 }
561
562 release_sock(sk);
563 timeo = schedule_timeout(timeo);
564 lock_sock(sk);
565 set_current_state(TASK_INTERRUPTIBLE);
566
567 err = sock_error(sk);
568 if (err)
569 break;
570 }
571 __set_current_state(TASK_RUNNING);
572 remove_wait_queue(sk_sleep(sk), &wait);
573 return err;
574 }
575 EXPORT_SYMBOL(bt_sock_wait_state);
576
577 /* This function expects the sk lock to be held when called */
578 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
579 {
580 DECLARE_WAITQUEUE(wait, current);
581 unsigned long timeo;
582 int err = 0;
583
584 BT_DBG("sk %p", sk);
585
586 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
587
588 add_wait_queue(sk_sleep(sk), &wait);
589 set_current_state(TASK_INTERRUPTIBLE);
590 while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
591 if (!timeo) {
592 err = -EAGAIN;
593 break;
594 }
595
596 if (signal_pending(current)) {
597 err = sock_intr_errno(timeo);
598 break;
599 }
600
601 release_sock(sk);
602 timeo = schedule_timeout(timeo);
603 lock_sock(sk);
604 set_current_state(TASK_INTERRUPTIBLE);
605
606 err = sock_error(sk);
607 if (err)
608 break;
609 }
610 __set_current_state(TASK_RUNNING);
611 remove_wait_queue(sk_sleep(sk), &wait);
612
613 return err;
614 }
615 EXPORT_SYMBOL(bt_sock_wait_ready);
616
617 #ifdef CONFIG_PROC_FS
618 struct bt_seq_state {
619 struct bt_sock_list *l;
620 };
621
622 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
623 __acquires(seq->private->l->lock)
624 {
625 struct bt_seq_state *s = seq->private;
626 struct bt_sock_list *l = s->l;
627
628 read_lock(&l->lock);
629 return seq_hlist_start_head(&l->head, *pos);
630 }
631
632 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
633 {
634 struct bt_seq_state *s = seq->private;
635 struct bt_sock_list *l = s->l;
636
637 return seq_hlist_next(v, &l->head, pos);
638 }
639
640 static void bt_seq_stop(struct seq_file *seq, void *v)
641 __releases(seq->private->l->lock)
642 {
643 struct bt_seq_state *s = seq->private;
644 struct bt_sock_list *l = s->l;
645
646 read_unlock(&l->lock);
647 }
648
649 static int bt_seq_show(struct seq_file *seq, void *v)
650 {
651 struct bt_seq_state *s = seq->private;
652 struct bt_sock_list *l = s->l;
653
654 if (v == SEQ_START_TOKEN) {
655 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
656
657 if (l->custom_seq_show) {
658 seq_putc(seq, ' ');
659 l->custom_seq_show(seq, v);
660 }
661
662 seq_putc(seq, '\n');
663 } else {
664 struct sock *sk = sk_entry(v);
665 struct bt_sock *bt = bt_sk(sk);
666
667 seq_printf(seq,
668 "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
669 sk,
670 refcount_read(&sk->sk_refcnt),
671 sk_rmem_alloc_get(sk),
672 sk_wmem_alloc_get(sk),
673 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
674 sock_i_ino(sk),
675 bt->parent? sock_i_ino(bt->parent): 0LU);
676
677 if (l->custom_seq_show) {
678 seq_putc(seq, ' ');
679 l->custom_seq_show(seq, v);
680 }
681
682 seq_putc(seq, '\n');
683 }
684 return 0;
685 }
686
687 static const struct seq_operations bt_seq_ops = {
688 .start = bt_seq_start,
689 .next = bt_seq_next,
690 .stop = bt_seq_stop,
691 .show = bt_seq_show,
692 };
693
694 static int bt_seq_open(struct inode *inode, struct file *file)
695 {
696 struct bt_sock_list *sk_list;
697 struct bt_seq_state *s;
698
699 sk_list = PDE_DATA(inode);
700 s = __seq_open_private(file, &bt_seq_ops,
701 sizeof(struct bt_seq_state));
702 if (!s)
703 return -ENOMEM;
704
705 s->l = sk_list;
706 return 0;
707 }
708
709 static const struct file_operations bt_fops = {
710 .open = bt_seq_open,
711 .read = seq_read,
712 .llseek = seq_lseek,
713 .release = seq_release_private
714 };
715
716 int bt_procfs_init(struct net *net, const char *name,
717 struct bt_sock_list *sk_list,
718 int (* seq_show)(struct seq_file *, void *))
719 {
720 sk_list->custom_seq_show = seq_show;
721
722 if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
723 return -ENOMEM;
724 return 0;
725 }
726
727 void bt_procfs_cleanup(struct net *net, const char *name)
728 {
729 remove_proc_entry(name, net->proc_net);
730 }
731 #else
732 int bt_procfs_init(struct net *net, const char *name,
733 struct bt_sock_list *sk_list,
734 int (* seq_show)(struct seq_file *, void *))
735 {
736 return 0;
737 }
738
739 void bt_procfs_cleanup(struct net *net, const char *name)
740 {
741 }
742 #endif
743 EXPORT_SYMBOL(bt_procfs_init);
744 EXPORT_SYMBOL(bt_procfs_cleanup);
745
746 static const struct net_proto_family bt_sock_family_ops = {
747 .owner = THIS_MODULE,
748 .family = PF_BLUETOOTH,
749 .create = bt_sock_create,
750 };
751
752 struct dentry *bt_debugfs;
753 EXPORT_SYMBOL_GPL(bt_debugfs);
754
755 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
756 __stringify(BT_SUBSYS_REVISION)
757
758 static int __init bt_init(void)
759 {
760 int err;
761
762 sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
763
764 BT_INFO("Core ver %s", VERSION);
765
766 err = bt_selftest();
767 if (err < 0)
768 return err;
769
770 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
771
772 bt_leds_init();
773
774 err = bt_sysfs_init();
775 if (err < 0)
776 return err;
777
778 err = sock_register(&bt_sock_family_ops);
779 if (err < 0) {
780 bt_sysfs_cleanup();
781 return err;
782 }
783
784 BT_INFO("HCI device and connection manager initialized");
785
786 err = hci_sock_init();
787 if (err < 0)
788 goto error;
789
790 err = l2cap_init();
791 if (err < 0)
792 goto sock_err;
793
794 err = sco_init();
795 if (err < 0) {
796 l2cap_exit();
797 goto sock_err;
798 }
799
800 err = mgmt_init();
801 if (err < 0) {
802 sco_exit();
803 l2cap_exit();
804 goto sock_err;
805 }
806
807 return 0;
808
809 sock_err:
810 hci_sock_cleanup();
811
812 error:
813 sock_unregister(PF_BLUETOOTH);
814 bt_sysfs_cleanup();
815
816 return err;
817 }
818
819 static void __exit bt_exit(void)
820 {
821 mgmt_exit();
822
823 sco_exit();
824
825 l2cap_exit();
826
827 hci_sock_cleanup();
828
829 sock_unregister(PF_BLUETOOTH);
830
831 bt_sysfs_cleanup();
832
833 bt_leds_cleanup();
834
835 debugfs_remove_recursive(bt_debugfs);
836 }
837
838 subsys_initcall(bt_init);
839 module_exit(bt_exit);
840
841 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
842 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
843 MODULE_VERSION(VERSION);
844 MODULE_LICENSE("GPL");
845 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);