]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/af_bluetooth.c
s390/speculation: Support 'mitigations=' cmdline option
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / af_bluetooth.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth address family and sockets. */
26
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <linux/sched/signal.h>
31
32 #include <asm/ioctls.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <linux/proc_fs.h>
36
37 #include "leds.h"
38 #include "selftest.h"
39
40 /* Bluetooth sockets */
41 #define BT_MAX_PROTO 8
42 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
43 static DEFINE_RWLOCK(bt_proto_lock);
44
45 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
46 static const char *const bt_key_strings[BT_MAX_PROTO] = {
47 "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
48 "sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
49 "sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
50 "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
51 "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
52 "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
53 "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
54 "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
55 };
56
57 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
58 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
59 "slock-AF_BLUETOOTH-BTPROTO_L2CAP",
60 "slock-AF_BLUETOOTH-BTPROTO_HCI",
61 "slock-AF_BLUETOOTH-BTPROTO_SCO",
62 "slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
63 "slock-AF_BLUETOOTH-BTPROTO_BNEP",
64 "slock-AF_BLUETOOTH-BTPROTO_CMTP",
65 "slock-AF_BLUETOOTH-BTPROTO_HIDP",
66 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
67 };
68
69 void bt_sock_reclassify_lock(struct sock *sk, int proto)
70 {
71 BUG_ON(!sk);
72 BUG_ON(!sock_allow_reclassification(sk));
73
74 sock_lock_init_class_and_name(sk,
75 bt_slock_key_strings[proto], &bt_slock_key[proto],
76 bt_key_strings[proto], &bt_lock_key[proto]);
77 }
78 EXPORT_SYMBOL(bt_sock_reclassify_lock);
79
80 int bt_sock_register(int proto, const struct net_proto_family *ops)
81 {
82 int err = 0;
83
84 if (proto < 0 || proto >= BT_MAX_PROTO)
85 return -EINVAL;
86
87 write_lock(&bt_proto_lock);
88
89 if (bt_proto[proto])
90 err = -EEXIST;
91 else
92 bt_proto[proto] = ops;
93
94 write_unlock(&bt_proto_lock);
95
96 return err;
97 }
98 EXPORT_SYMBOL(bt_sock_register);
99
100 void bt_sock_unregister(int proto)
101 {
102 if (proto < 0 || proto >= BT_MAX_PROTO)
103 return;
104
105 write_lock(&bt_proto_lock);
106 bt_proto[proto] = NULL;
107 write_unlock(&bt_proto_lock);
108 }
109 EXPORT_SYMBOL(bt_sock_unregister);
110
111 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
112 int kern)
113 {
114 int err;
115
116 if (net != &init_net)
117 return -EAFNOSUPPORT;
118
119 if (proto < 0 || proto >= BT_MAX_PROTO)
120 return -EINVAL;
121
122 if (!bt_proto[proto])
123 request_module("bt-proto-%d", proto);
124
125 err = -EPROTONOSUPPORT;
126
127 read_lock(&bt_proto_lock);
128
129 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
130 err = bt_proto[proto]->create(net, sock, proto, kern);
131 if (!err)
132 bt_sock_reclassify_lock(sock->sk, proto);
133 module_put(bt_proto[proto]->owner);
134 }
135
136 read_unlock(&bt_proto_lock);
137
138 return err;
139 }
140
141 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
142 {
143 write_lock(&l->lock);
144 sk_add_node(sk, &l->head);
145 write_unlock(&l->lock);
146 }
147 EXPORT_SYMBOL(bt_sock_link);
148
149 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
150 {
151 write_lock(&l->lock);
152 sk_del_node_init(sk);
153 write_unlock(&l->lock);
154 }
155 EXPORT_SYMBOL(bt_sock_unlink);
156
157 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
158 {
159 BT_DBG("parent %p, sk %p", parent, sk);
160
161 sock_hold(sk);
162 lock_sock(sk);
163 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
164 bt_sk(sk)->parent = parent;
165 release_sock(sk);
166 parent->sk_ack_backlog++;
167 }
168 EXPORT_SYMBOL(bt_accept_enqueue);
169
170 /* Calling function must hold the sk lock.
171 * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
172 */
173 void bt_accept_unlink(struct sock *sk)
174 {
175 BT_DBG("sk %p state %d", sk, sk->sk_state);
176
177 list_del_init(&bt_sk(sk)->accept_q);
178 bt_sk(sk)->parent->sk_ack_backlog--;
179 bt_sk(sk)->parent = NULL;
180 sock_put(sk);
181 }
182 EXPORT_SYMBOL(bt_accept_unlink);
183
184 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
185 {
186 struct bt_sock *s, *n;
187 struct sock *sk;
188
189 BT_DBG("parent %p", parent);
190
191 restart:
192 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
193 sk = (struct sock *)s;
194
195 /* Prevent early freeing of sk due to unlink and sock_kill */
196 sock_hold(sk);
197 lock_sock(sk);
198
199 /* Check sk has not already been unlinked via
200 * bt_accept_unlink() due to serialisation caused by sk locking
201 */
202 if (!bt_sk(sk)->parent) {
203 BT_DBG("sk %p, already unlinked", sk);
204 release_sock(sk);
205 sock_put(sk);
206
207 /* Restart the loop as sk is no longer in the list
208 * and also avoid a potential infinite loop because
209 * list_for_each_entry_safe() is not thread safe.
210 */
211 goto restart;
212 }
213
214 /* sk is safely in the parent list so reduce reference count */
215 sock_put(sk);
216
217 /* FIXME: Is this check still needed */
218 if (sk->sk_state == BT_CLOSED) {
219 bt_accept_unlink(sk);
220 release_sock(sk);
221 continue;
222 }
223
224 if (sk->sk_state == BT_CONNECTED || !newsock ||
225 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
226 bt_accept_unlink(sk);
227 if (newsock)
228 sock_graft(sk, newsock);
229
230 release_sock(sk);
231 return sk;
232 }
233
234 release_sock(sk);
235 }
236
237 return NULL;
238 }
239 EXPORT_SYMBOL(bt_accept_dequeue);
240
241 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
242 int flags)
243 {
244 int noblock = flags & MSG_DONTWAIT;
245 struct sock *sk = sock->sk;
246 struct sk_buff *skb;
247 size_t copied;
248 size_t skblen;
249 int err;
250
251 BT_DBG("sock %p sk %p len %zu", sock, sk, len);
252
253 if (flags & MSG_OOB)
254 return -EOPNOTSUPP;
255
256 skb = skb_recv_datagram(sk, flags, noblock, &err);
257 if (!skb) {
258 if (sk->sk_shutdown & RCV_SHUTDOWN)
259 return 0;
260
261 return err;
262 }
263
264 skblen = skb->len;
265 copied = skb->len;
266 if (len < copied) {
267 msg->msg_flags |= MSG_TRUNC;
268 copied = len;
269 }
270
271 skb_reset_transport_header(skb);
272 err = skb_copy_datagram_msg(skb, 0, msg, copied);
273 if (err == 0) {
274 sock_recv_ts_and_drops(msg, sk, skb);
275
276 if (msg->msg_name && bt_sk(sk)->skb_msg_name)
277 bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
278 &msg->msg_namelen);
279 }
280
281 skb_free_datagram(sk, skb);
282
283 if (flags & MSG_TRUNC)
284 copied = skblen;
285
286 return err ? : copied;
287 }
288 EXPORT_SYMBOL(bt_sock_recvmsg);
289
290 static long bt_sock_data_wait(struct sock *sk, long timeo)
291 {
292 DECLARE_WAITQUEUE(wait, current);
293
294 add_wait_queue(sk_sleep(sk), &wait);
295 for (;;) {
296 set_current_state(TASK_INTERRUPTIBLE);
297
298 if (!skb_queue_empty(&sk->sk_receive_queue))
299 break;
300
301 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
302 break;
303
304 if (signal_pending(current) || !timeo)
305 break;
306
307 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
308 release_sock(sk);
309 timeo = schedule_timeout(timeo);
310 lock_sock(sk);
311 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
312 }
313
314 __set_current_state(TASK_RUNNING);
315 remove_wait_queue(sk_sleep(sk), &wait);
316 return timeo;
317 }
318
319 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
320 size_t size, int flags)
321 {
322 struct sock *sk = sock->sk;
323 int err = 0;
324 size_t target, copied = 0;
325 long timeo;
326
327 if (flags & MSG_OOB)
328 return -EOPNOTSUPP;
329
330 BT_DBG("sk %p size %zu", sk, size);
331
332 lock_sock(sk);
333
334 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
335 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
336
337 do {
338 struct sk_buff *skb;
339 int chunk;
340
341 skb = skb_dequeue(&sk->sk_receive_queue);
342 if (!skb) {
343 if (copied >= target)
344 break;
345
346 err = sock_error(sk);
347 if (err)
348 break;
349 if (sk->sk_shutdown & RCV_SHUTDOWN)
350 break;
351
352 err = -EAGAIN;
353 if (!timeo)
354 break;
355
356 timeo = bt_sock_data_wait(sk, timeo);
357
358 if (signal_pending(current)) {
359 err = sock_intr_errno(timeo);
360 goto out;
361 }
362 continue;
363 }
364
365 chunk = min_t(unsigned int, skb->len, size);
366 if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
367 skb_queue_head(&sk->sk_receive_queue, skb);
368 if (!copied)
369 copied = -EFAULT;
370 break;
371 }
372 copied += chunk;
373 size -= chunk;
374
375 sock_recv_ts_and_drops(msg, sk, skb);
376
377 if (!(flags & MSG_PEEK)) {
378 int skb_len = skb_headlen(skb);
379
380 if (chunk <= skb_len) {
381 __skb_pull(skb, chunk);
382 } else {
383 struct sk_buff *frag;
384
385 __skb_pull(skb, skb_len);
386 chunk -= skb_len;
387
388 skb_walk_frags(skb, frag) {
389 if (chunk <= frag->len) {
390 /* Pulling partial data */
391 skb->len -= chunk;
392 skb->data_len -= chunk;
393 __skb_pull(frag, chunk);
394 break;
395 } else if (frag->len) {
396 /* Pulling all frag data */
397 chunk -= frag->len;
398 skb->len -= frag->len;
399 skb->data_len -= frag->len;
400 __skb_pull(frag, frag->len);
401 }
402 }
403 }
404
405 if (skb->len) {
406 skb_queue_head(&sk->sk_receive_queue, skb);
407 break;
408 }
409 kfree_skb(skb);
410
411 } else {
412 /* put message back and return */
413 skb_queue_head(&sk->sk_receive_queue, skb);
414 break;
415 }
416 } while (size);
417
418 out:
419 release_sock(sk);
420 return copied ? : err;
421 }
422 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
423
424 static inline unsigned int bt_accept_poll(struct sock *parent)
425 {
426 struct bt_sock *s, *n;
427 struct sock *sk;
428
429 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
430 sk = (struct sock *)s;
431 if (sk->sk_state == BT_CONNECTED ||
432 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
433 sk->sk_state == BT_CONNECT2))
434 return POLLIN | POLLRDNORM;
435 }
436
437 return 0;
438 }
439
440 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
441 poll_table *wait)
442 {
443 struct sock *sk = sock->sk;
444 unsigned int mask = 0;
445
446 BT_DBG("sock %p, sk %p", sock, sk);
447
448 poll_wait(file, sk_sleep(sk), wait);
449
450 if (sk->sk_state == BT_LISTEN)
451 return bt_accept_poll(sk);
452
453 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
454 mask |= POLLERR |
455 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
456
457 if (sk->sk_shutdown & RCV_SHUTDOWN)
458 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
459
460 if (sk->sk_shutdown == SHUTDOWN_MASK)
461 mask |= POLLHUP;
462
463 if (!skb_queue_empty(&sk->sk_receive_queue))
464 mask |= POLLIN | POLLRDNORM;
465
466 if (sk->sk_state == BT_CLOSED)
467 mask |= POLLHUP;
468
469 if (sk->sk_state == BT_CONNECT ||
470 sk->sk_state == BT_CONNECT2 ||
471 sk->sk_state == BT_CONFIG)
472 return mask;
473
474 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
475 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
476 else
477 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
478
479 return mask;
480 }
481 EXPORT_SYMBOL(bt_sock_poll);
482
483 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
484 {
485 struct sock *sk = sock->sk;
486 struct sk_buff *skb;
487 long amount;
488 int err;
489
490 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
491
492 switch (cmd) {
493 case TIOCOUTQ:
494 if (sk->sk_state == BT_LISTEN)
495 return -EINVAL;
496
497 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
498 if (amount < 0)
499 amount = 0;
500 err = put_user(amount, (int __user *) arg);
501 break;
502
503 case TIOCINQ:
504 if (sk->sk_state == BT_LISTEN)
505 return -EINVAL;
506
507 lock_sock(sk);
508 skb = skb_peek(&sk->sk_receive_queue);
509 amount = skb ? skb->len : 0;
510 release_sock(sk);
511 err = put_user(amount, (int __user *) arg);
512 break;
513
514 case SIOCGSTAMP:
515 err = sock_get_timestamp(sk, (struct timeval __user *) arg);
516 break;
517
518 case SIOCGSTAMPNS:
519 err = sock_get_timestampns(sk, (struct timespec __user *) arg);
520 break;
521
522 default:
523 err = -ENOIOCTLCMD;
524 break;
525 }
526
527 return err;
528 }
529 EXPORT_SYMBOL(bt_sock_ioctl);
530
531 /* This function expects the sk lock to be held when called */
532 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
533 {
534 DECLARE_WAITQUEUE(wait, current);
535 int err = 0;
536
537 BT_DBG("sk %p", sk);
538
539 add_wait_queue(sk_sleep(sk), &wait);
540 set_current_state(TASK_INTERRUPTIBLE);
541 while (sk->sk_state != state) {
542 if (!timeo) {
543 err = -EINPROGRESS;
544 break;
545 }
546
547 if (signal_pending(current)) {
548 err = sock_intr_errno(timeo);
549 break;
550 }
551
552 release_sock(sk);
553 timeo = schedule_timeout(timeo);
554 lock_sock(sk);
555 set_current_state(TASK_INTERRUPTIBLE);
556
557 err = sock_error(sk);
558 if (err)
559 break;
560 }
561 __set_current_state(TASK_RUNNING);
562 remove_wait_queue(sk_sleep(sk), &wait);
563 return err;
564 }
565 EXPORT_SYMBOL(bt_sock_wait_state);
566
567 /* This function expects the sk lock to be held when called */
568 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
569 {
570 DECLARE_WAITQUEUE(wait, current);
571 unsigned long timeo;
572 int err = 0;
573
574 BT_DBG("sk %p", sk);
575
576 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
577
578 add_wait_queue(sk_sleep(sk), &wait);
579 set_current_state(TASK_INTERRUPTIBLE);
580 while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
581 if (!timeo) {
582 err = -EAGAIN;
583 break;
584 }
585
586 if (signal_pending(current)) {
587 err = sock_intr_errno(timeo);
588 break;
589 }
590
591 release_sock(sk);
592 timeo = schedule_timeout(timeo);
593 lock_sock(sk);
594 set_current_state(TASK_INTERRUPTIBLE);
595
596 err = sock_error(sk);
597 if (err)
598 break;
599 }
600 __set_current_state(TASK_RUNNING);
601 remove_wait_queue(sk_sleep(sk), &wait);
602
603 return err;
604 }
605 EXPORT_SYMBOL(bt_sock_wait_ready);
606
607 #ifdef CONFIG_PROC_FS
608 struct bt_seq_state {
609 struct bt_sock_list *l;
610 };
611
612 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
613 __acquires(seq->private->l->lock)
614 {
615 struct bt_seq_state *s = seq->private;
616 struct bt_sock_list *l = s->l;
617
618 read_lock(&l->lock);
619 return seq_hlist_start_head(&l->head, *pos);
620 }
621
622 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
623 {
624 struct bt_seq_state *s = seq->private;
625 struct bt_sock_list *l = s->l;
626
627 return seq_hlist_next(v, &l->head, pos);
628 }
629
630 static void bt_seq_stop(struct seq_file *seq, void *v)
631 __releases(seq->private->l->lock)
632 {
633 struct bt_seq_state *s = seq->private;
634 struct bt_sock_list *l = s->l;
635
636 read_unlock(&l->lock);
637 }
638
639 static int bt_seq_show(struct seq_file *seq, void *v)
640 {
641 struct bt_seq_state *s = seq->private;
642 struct bt_sock_list *l = s->l;
643
644 if (v == SEQ_START_TOKEN) {
645 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
646
647 if (l->custom_seq_show) {
648 seq_putc(seq, ' ');
649 l->custom_seq_show(seq, v);
650 }
651
652 seq_putc(seq, '\n');
653 } else {
654 struct sock *sk = sk_entry(v);
655 struct bt_sock *bt = bt_sk(sk);
656
657 seq_printf(seq,
658 "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
659 sk,
660 refcount_read(&sk->sk_refcnt),
661 sk_rmem_alloc_get(sk),
662 sk_wmem_alloc_get(sk),
663 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
664 sock_i_ino(sk),
665 bt->parent? sock_i_ino(bt->parent): 0LU);
666
667 if (l->custom_seq_show) {
668 seq_putc(seq, ' ');
669 l->custom_seq_show(seq, v);
670 }
671
672 seq_putc(seq, '\n');
673 }
674 return 0;
675 }
676
677 static const struct seq_operations bt_seq_ops = {
678 .start = bt_seq_start,
679 .next = bt_seq_next,
680 .stop = bt_seq_stop,
681 .show = bt_seq_show,
682 };
683
684 static int bt_seq_open(struct inode *inode, struct file *file)
685 {
686 struct bt_sock_list *sk_list;
687 struct bt_seq_state *s;
688
689 sk_list = PDE_DATA(inode);
690 s = __seq_open_private(file, &bt_seq_ops,
691 sizeof(struct bt_seq_state));
692 if (!s)
693 return -ENOMEM;
694
695 s->l = sk_list;
696 return 0;
697 }
698
699 static const struct file_operations bt_fops = {
700 .open = bt_seq_open,
701 .read = seq_read,
702 .llseek = seq_lseek,
703 .release = seq_release_private
704 };
705
706 int bt_procfs_init(struct net *net, const char *name,
707 struct bt_sock_list *sk_list,
708 int (* seq_show)(struct seq_file *, void *))
709 {
710 sk_list->custom_seq_show = seq_show;
711
712 if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
713 return -ENOMEM;
714 return 0;
715 }
716
717 void bt_procfs_cleanup(struct net *net, const char *name)
718 {
719 remove_proc_entry(name, net->proc_net);
720 }
721 #else
722 int bt_procfs_init(struct net *net, const char *name,
723 struct bt_sock_list *sk_list,
724 int (* seq_show)(struct seq_file *, void *))
725 {
726 return 0;
727 }
728
729 void bt_procfs_cleanup(struct net *net, const char *name)
730 {
731 }
732 #endif
733 EXPORT_SYMBOL(bt_procfs_init);
734 EXPORT_SYMBOL(bt_procfs_cleanup);
735
736 static const struct net_proto_family bt_sock_family_ops = {
737 .owner = THIS_MODULE,
738 .family = PF_BLUETOOTH,
739 .create = bt_sock_create,
740 };
741
742 struct dentry *bt_debugfs;
743 EXPORT_SYMBOL_GPL(bt_debugfs);
744
745 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
746 __stringify(BT_SUBSYS_REVISION)
747
748 static int __init bt_init(void)
749 {
750 int err;
751
752 sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
753
754 BT_INFO("Core ver %s", VERSION);
755
756 err = bt_selftest();
757 if (err < 0)
758 return err;
759
760 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
761
762 bt_leds_init();
763
764 err = bt_sysfs_init();
765 if (err < 0)
766 return err;
767
768 err = sock_register(&bt_sock_family_ops);
769 if (err < 0) {
770 bt_sysfs_cleanup();
771 return err;
772 }
773
774 BT_INFO("HCI device and connection manager initialized");
775
776 err = hci_sock_init();
777 if (err < 0)
778 goto error;
779
780 err = l2cap_init();
781 if (err < 0)
782 goto sock_err;
783
784 err = sco_init();
785 if (err < 0) {
786 l2cap_exit();
787 goto sock_err;
788 }
789
790 err = mgmt_init();
791 if (err < 0) {
792 sco_exit();
793 l2cap_exit();
794 goto sock_err;
795 }
796
797 return 0;
798
799 sock_err:
800 hci_sock_cleanup();
801
802 error:
803 sock_unregister(PF_BLUETOOTH);
804 bt_sysfs_cleanup();
805
806 return err;
807 }
808
809 static void __exit bt_exit(void)
810 {
811 mgmt_exit();
812
813 sco_exit();
814
815 l2cap_exit();
816
817 hci_sock_cleanup();
818
819 sock_unregister(PF_BLUETOOTH);
820
821 bt_sysfs_cleanup();
822
823 bt_leds_cleanup();
824
825 debugfs_remove_recursive(bt_debugfs);
826 }
827
828 subsys_initcall(bt_init);
829 module_exit(bt_exit);
830
831 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
832 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
833 MODULE_VERSION(VERSION);
834 MODULE_LICENSE("GPL");
835 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);