]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_core.c
8ca8cf147058913acea2187223fe21d653ce9f32
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 #define AUTO_OFF_TIMEOUT 2000
54
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81 return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 return;
105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126 unsigned long opt, __u32 timeout)
127 {
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
158 }
159
160 hdev->req_status = hdev->req_result = 0;
161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169 {
170 int ret;
171
172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
189 }
190
191 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 {
193 struct hci_cp_delete_stored_link_key cp;
194 struct sk_buff *skb;
195 __le16 param;
196 __u8 flt_type;
197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
205 skb->dev = (void *) hdev;
206
207 skb_queue_tail(&hdev->cmd_q, skb);
208 tasklet_schedule(&hdev->cmd_task);
209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217
218 /* Read Local Supported Features */
219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
220
221 /* Read Local Version */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
226
227 #if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
236 }
237 #endif
238
239 /* Read BD Address */
240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
247
248 /* Read Voice Setting */
249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
254 flt_type = HCI_FLT_CLEAR_ALL;
255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
256
257 /* Connection accept timeout ~20 secs */
258 param = cpu_to_le16(0x7d00);
259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
264 }
265
266 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267 {
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272 /* Inquiry and Page scans */
273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
274 }
275
276 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277 {
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282 /* Authentication */
283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
284 }
285
286 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287 {
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
292 /* Encryption */
293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
294 }
295
296 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297 {
298 __le16 policy = cpu_to_le16(opt);
299
300 BT_DBG("%s %x", hdev->name, policy);
301
302 /* Default link policy */
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304 }
305
306 /* Get HCI device by index.
307 * Device is held on return. */
308 struct hci_dev *hci_dev_get(int index)
309 {
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328 }
329
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev *hdev)
332 {
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343 }
344
345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346 {
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356 }
357
358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359 {
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *ie;
362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
367 /* Entry not in the cache. Add new one. */
368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
370 return;
371
372 ie->next = cache->list;
373 cache->list = ie;
374 }
375
376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
378 cache->timestamp = jiffies;
379 }
380
381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382 {
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401 }
402
403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404 {
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
418 }
419
420 int hci_inquiry(void __user *arg)
421 {
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
432 if (!(hdev = hci_dev_get(ir.dev_id)))
433 return -ENODEV;
434
435 hci_dev_lock_bh(hdev);
436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
437 inquiry_cache_empty(hdev) ||
438 ir.flags & IREQ_CACHE_FLUSH) {
439 inquiry_cache_flush(hdev);
440 do_inquiry = 1;
441 }
442 hci_dev_unlock_bh(hdev);
443
444 timeo = ir.length * msecs_to_jiffies(2000);
445
446 if (do_inquiry) {
447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
448 if (err < 0)
449 goto done;
450 }
451
452 /* for unlimited number of responses we will use buffer with 255 entries */
453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
454
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space.
457 */
458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
459 if (!buf) {
460 err = -ENOMEM;
461 goto done;
462 }
463
464 hci_dev_lock_bh(hdev);
465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
466 hci_dev_unlock_bh(hdev);
467
468 BT_DBG("num_rsp %d", ir.num_rsp);
469
470 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
471 ptr += sizeof(ir);
472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
473 ir.num_rsp))
474 err = -EFAULT;
475 } else
476 err = -EFAULT;
477
478 kfree(buf);
479
480 done:
481 hci_dev_put(hdev);
482 return err;
483 }
484
485 /* ---- HCI ioctl helpers ---- */
486
487 int hci_dev_open(__u16 dev)
488 {
489 struct hci_dev *hdev;
490 int ret = 0;
491
492 if (!(hdev = hci_dev_get(dev)))
493 return -ENODEV;
494
495 BT_DBG("%s %p", hdev->name, hdev);
496
497 hci_req_lock(hdev);
498
499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
500 ret = -ERFKILL;
501 goto done;
502 }
503
504 if (test_bit(HCI_UP, &hdev->flags)) {
505 ret = -EALREADY;
506 goto done;
507 }
508
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
510 set_bit(HCI_RAW, &hdev->flags);
511
512 /* Treat all non BR/EDR controllers as raw devices for now */
513 if (hdev->dev_type != HCI_BREDR)
514 set_bit(HCI_RAW, &hdev->flags);
515
516 if (hdev->open(hdev)) {
517 ret = -EIO;
518 goto done;
519 }
520
521 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
524 hdev->init_last_cmd = 0;
525
526 //__hci_request(hdev, hci_reset_req, 0, HZ);
527 ret = __hci_request(hdev, hci_init_req, 0,
528 msecs_to_jiffies(HCI_INIT_TIMEOUT));
529
530 clear_bit(HCI_INIT, &hdev->flags);
531 }
532
533 if (!ret) {
534 hci_dev_hold(hdev);
535 set_bit(HCI_UP, &hdev->flags);
536 hci_notify(hdev, HCI_DEV_UP);
537 if (!test_bit(HCI_SETUP, &hdev->flags))
538 mgmt_powered(hdev->id, 1);
539 } else {
540 /* Init failed, cleanup */
541 tasklet_kill(&hdev->rx_task);
542 tasklet_kill(&hdev->tx_task);
543 tasklet_kill(&hdev->cmd_task);
544
545 skb_queue_purge(&hdev->cmd_q);
546 skb_queue_purge(&hdev->rx_q);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551 if (hdev->sent_cmd) {
552 kfree_skb(hdev->sent_cmd);
553 hdev->sent_cmd = NULL;
554 }
555
556 hdev->close(hdev);
557 hdev->flags = 0;
558 }
559
560 done:
561 hci_req_unlock(hdev);
562 hci_dev_put(hdev);
563 return ret;
564 }
565
566 static int hci_dev_do_close(struct hci_dev *hdev)
567 {
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_cancel(hdev, ENODEV);
571 hci_req_lock(hdev);
572
573 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
574 hci_req_unlock(hdev);
575 return 0;
576 }
577
578 /* Kill RX and TX tasks */
579 tasklet_kill(&hdev->rx_task);
580 tasklet_kill(&hdev->tx_task);
581
582 hci_dev_lock_bh(hdev);
583 inquiry_cache_flush(hdev);
584 hci_conn_hash_flush(hdev);
585 hci_dev_unlock_bh(hdev);
586
587 hci_notify(hdev, HCI_DEV_DOWN);
588
589 if (hdev->flush)
590 hdev->flush(hdev);
591
592 /* Reset device */
593 skb_queue_purge(&hdev->cmd_q);
594 atomic_set(&hdev->cmd_cnt, 1);
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 set_bit(HCI_INIT, &hdev->flags);
597 __hci_request(hdev, hci_reset_req, 0,
598 msecs_to_jiffies(250));
599 clear_bit(HCI_INIT, &hdev->flags);
600 }
601
602 /* Kill cmd task */
603 tasklet_kill(&hdev->cmd_task);
604
605 /* Drop queues */
606 skb_queue_purge(&hdev->rx_q);
607 skb_queue_purge(&hdev->cmd_q);
608 skb_queue_purge(&hdev->raw_q);
609
610 /* Drop last sent command */
611 if (hdev->sent_cmd) {
612 kfree_skb(hdev->sent_cmd);
613 hdev->sent_cmd = NULL;
614 }
615
616 /* After this point our queues are empty
617 * and no tasks are scheduled. */
618 hdev->close(hdev);
619
620 mgmt_powered(hdev->id, 0);
621
622 /* Clear flags */
623 hdev->flags = 0;
624
625 hci_req_unlock(hdev);
626
627 hci_dev_put(hdev);
628 return 0;
629 }
630
631 int hci_dev_close(__u16 dev)
632 {
633 struct hci_dev *hdev;
634 int err;
635
636 hdev = hci_dev_get(dev);
637 if (!hdev)
638 return -ENODEV;
639 err = hci_dev_do_close(hdev);
640 hci_dev_put(hdev);
641 return err;
642 }
643
644 int hci_dev_reset(__u16 dev)
645 {
646 struct hci_dev *hdev;
647 int ret = 0;
648
649 hdev = hci_dev_get(dev);
650 if (!hdev)
651 return -ENODEV;
652
653 hci_req_lock(hdev);
654 tasklet_disable(&hdev->tx_task);
655
656 if (!test_bit(HCI_UP, &hdev->flags))
657 goto done;
658
659 /* Drop queues */
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662
663 hci_dev_lock_bh(hdev);
664 inquiry_cache_flush(hdev);
665 hci_conn_hash_flush(hdev);
666 hci_dev_unlock_bh(hdev);
667
668 if (hdev->flush)
669 hdev->flush(hdev);
670
671 atomic_set(&hdev->cmd_cnt, 1);
672 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
673
674 if (!test_bit(HCI_RAW, &hdev->flags))
675 ret = __hci_request(hdev, hci_reset_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
677
678 done:
679 tasklet_enable(&hdev->tx_task);
680 hci_req_unlock(hdev);
681 hci_dev_put(hdev);
682 return ret;
683 }
684
685 int hci_dev_reset_stat(__u16 dev)
686 {
687 struct hci_dev *hdev;
688 int ret = 0;
689
690 hdev = hci_dev_get(dev);
691 if (!hdev)
692 return -ENODEV;
693
694 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
695
696 hci_dev_put(hdev);
697
698 return ret;
699 }
700
701 int hci_dev_cmd(unsigned int cmd, void __user *arg)
702 {
703 struct hci_dev *hdev;
704 struct hci_dev_req dr;
705 int err = 0;
706
707 if (copy_from_user(&dr, arg, sizeof(dr)))
708 return -EFAULT;
709
710 hdev = hci_dev_get(dr.dev_id);
711 if (!hdev)
712 return -ENODEV;
713
714 switch (cmd) {
715 case HCISETAUTH:
716 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
717 msecs_to_jiffies(HCI_INIT_TIMEOUT));
718 break;
719
720 case HCISETENCRYPT:
721 if (!lmp_encrypt_capable(hdev)) {
722 err = -EOPNOTSUPP;
723 break;
724 }
725
726 if (!test_bit(HCI_AUTH, &hdev->flags)) {
727 /* Auth must be enabled first */
728 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
730 if (err)
731 break;
732 }
733
734 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
736 break;
737
738 case HCISETSCAN:
739 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
741 break;
742
743 case HCISETLINKPOL:
744 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
745 msecs_to_jiffies(HCI_INIT_TIMEOUT));
746 break;
747
748 case HCISETLINKMODE:
749 hdev->link_mode = ((__u16) dr.dev_opt) &
750 (HCI_LM_MASTER | HCI_LM_ACCEPT);
751 break;
752
753 case HCISETPTYPE:
754 hdev->pkt_type = (__u16) dr.dev_opt;
755 break;
756
757 case HCISETACLMTU:
758 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
759 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
760 break;
761
762 case HCISETSCOMTU:
763 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
764 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
765 break;
766
767 default:
768 err = -EINVAL;
769 break;
770 }
771
772 hci_dev_put(hdev);
773 return err;
774 }
775
776 int hci_get_dev_list(void __user *arg)
777 {
778 struct hci_dev_list_req *dl;
779 struct hci_dev_req *dr;
780 struct list_head *p;
781 int n = 0, size, err;
782 __u16 dev_num;
783
784 if (get_user(dev_num, (__u16 __user *) arg))
785 return -EFAULT;
786
787 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
788 return -EINVAL;
789
790 size = sizeof(*dl) + dev_num * sizeof(*dr);
791
792 dl = kzalloc(size, GFP_KERNEL);
793 if (!dl)
794 return -ENOMEM;
795
796 dr = dl->dev_req;
797
798 read_lock_bh(&hci_dev_list_lock);
799 list_for_each(p, &hci_dev_list) {
800 struct hci_dev *hdev;
801
802 hdev = list_entry(p, struct hci_dev, list);
803
804 hci_del_off_timer(hdev);
805
806 if (!test_bit(HCI_MGMT, &hdev->flags))
807 set_bit(HCI_PAIRABLE, &hdev->flags);
808
809 (dr + n)->dev_id = hdev->id;
810 (dr + n)->dev_opt = hdev->flags;
811
812 if (++n >= dev_num)
813 break;
814 }
815 read_unlock_bh(&hci_dev_list_lock);
816
817 dl->dev_num = n;
818 size = sizeof(*dl) + n * sizeof(*dr);
819
820 err = copy_to_user(arg, dl, size);
821 kfree(dl);
822
823 return err ? -EFAULT : 0;
824 }
825
826 int hci_get_dev_info(void __user *arg)
827 {
828 struct hci_dev *hdev;
829 struct hci_dev_info di;
830 int err = 0;
831
832 if (copy_from_user(&di, arg, sizeof(di)))
833 return -EFAULT;
834
835 hdev = hci_dev_get(di.dev_id);
836 if (!hdev)
837 return -ENODEV;
838
839 hci_del_off_timer(hdev);
840
841 if (!test_bit(HCI_MGMT, &hdev->flags))
842 set_bit(HCI_PAIRABLE, &hdev->flags);
843
844 strcpy(di.name, hdev->name);
845 di.bdaddr = hdev->bdaddr;
846 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
847 di.flags = hdev->flags;
848 di.pkt_type = hdev->pkt_type;
849 di.acl_mtu = hdev->acl_mtu;
850 di.acl_pkts = hdev->acl_pkts;
851 di.sco_mtu = hdev->sco_mtu;
852 di.sco_pkts = hdev->sco_pkts;
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(arg, &di, sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(hdev);
863
864 return err;
865 }
866
867 /* ---- Interface to HCI drivers ---- */
868
869 static int hci_rfkill_set_block(void *data, bool blocked)
870 {
871 struct hci_dev *hdev = data;
872
873 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
874
875 if (!blocked)
876 return 0;
877
878 hci_dev_do_close(hdev);
879
880 return 0;
881 }
882
883 static const struct rfkill_ops hci_rfkill_ops = {
884 .set_block = hci_rfkill_set_block,
885 };
886
887 /* Alloc HCI device */
888 struct hci_dev *hci_alloc_dev(void)
889 {
890 struct hci_dev *hdev;
891
892 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
893 if (!hdev)
894 return NULL;
895
896 skb_queue_head_init(&hdev->driver_init);
897
898 return hdev;
899 }
900 EXPORT_SYMBOL(hci_alloc_dev);
901
902 /* Free HCI device */
903 void hci_free_dev(struct hci_dev *hdev)
904 {
905 skb_queue_purge(&hdev->driver_init);
906
907 /* will free via device release */
908 put_device(&hdev->dev);
909 }
910 EXPORT_SYMBOL(hci_free_dev);
911
912 static void hci_power_on(struct work_struct *work)
913 {
914 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
915
916 BT_DBG("%s", hdev->name);
917
918 if (hci_dev_open(hdev->id) < 0)
919 return;
920
921 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
922 mod_timer(&hdev->off_timer,
923 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
924
925 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
926 mgmt_index_added(hdev->id);
927 }
928
929 static void hci_power_off(struct work_struct *work)
930 {
931 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
932
933 BT_DBG("%s", hdev->name);
934
935 hci_dev_close(hdev->id);
936 }
937
938 static void hci_auto_off(unsigned long data)
939 {
940 struct hci_dev *hdev = (struct hci_dev *) data;
941
942 BT_DBG("%s", hdev->name);
943
944 clear_bit(HCI_AUTO_OFF, &hdev->flags);
945
946 queue_work(hdev->workqueue, &hdev->power_off);
947 }
948
949 void hci_del_off_timer(struct hci_dev *hdev)
950 {
951 BT_DBG("%s", hdev->name);
952
953 clear_bit(HCI_AUTO_OFF, &hdev->flags);
954 del_timer(&hdev->off_timer);
955 }
956
957 int hci_uuids_clear(struct hci_dev *hdev)
958 {
959 struct list_head *p, *n;
960
961 list_for_each_safe(p, n, &hdev->uuids) {
962 struct bt_uuid *uuid;
963
964 uuid = list_entry(p, struct bt_uuid, list);
965
966 list_del(p);
967 kfree(uuid);
968 }
969
970 return 0;
971 }
972
973 int hci_link_keys_clear(struct hci_dev *hdev)
974 {
975 struct list_head *p, *n;
976
977 list_for_each_safe(p, n, &hdev->link_keys) {
978 struct link_key *key;
979
980 key = list_entry(p, struct link_key, list);
981
982 list_del(p);
983 kfree(key);
984 }
985
986 return 0;
987 }
988
989 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
990 {
991 struct list_head *p;
992
993 list_for_each(p, &hdev->link_keys) {
994 struct link_key *k;
995
996 k = list_entry(p, struct link_key, list);
997
998 if (bacmp(bdaddr, &k->bdaddr) == 0)
999 return k;
1000 }
1001
1002 return NULL;
1003 }
1004
1005 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1006 u8 *val, u8 type, u8 pin_len)
1007 {
1008 struct link_key *key, *old_key;
1009 u8 old_key_type;
1010
1011 old_key = hci_find_link_key(hdev, bdaddr);
1012 if (old_key) {
1013 old_key_type = old_key->type;
1014 key = old_key;
1015 } else {
1016 old_key_type = 0xff;
1017 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1018 if (!key)
1019 return -ENOMEM;
1020 list_add(&key->list, &hdev->link_keys);
1021 }
1022
1023 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1024
1025 bacpy(&key->bdaddr, bdaddr);
1026 memcpy(key->val, val, 16);
1027 key->type = type;
1028 key->pin_len = pin_len;
1029
1030 if (new_key)
1031 mgmt_new_key(hdev->id, key, old_key_type);
1032
1033 if (type == 0x06)
1034 key->type = old_key_type;
1035
1036 return 0;
1037 }
1038
1039 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1040 {
1041 struct link_key *key;
1042
1043 key = hci_find_link_key(hdev, bdaddr);
1044 if (!key)
1045 return -ENOENT;
1046
1047 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1048
1049 list_del(&key->list);
1050 kfree(key);
1051
1052 return 0;
1053 }
1054
1055 /* Register HCI device */
1056 int hci_register_dev(struct hci_dev *hdev)
1057 {
1058 struct list_head *head = &hci_dev_list, *p;
1059 int i, id = 0;
1060
1061 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1062 hdev->bus, hdev->owner);
1063
1064 if (!hdev->open || !hdev->close || !hdev->destruct)
1065 return -EINVAL;
1066
1067 write_lock_bh(&hci_dev_list_lock);
1068
1069 /* Find first available device id */
1070 list_for_each(p, &hci_dev_list) {
1071 if (list_entry(p, struct hci_dev, list)->id != id)
1072 break;
1073 head = p; id++;
1074 }
1075
1076 sprintf(hdev->name, "hci%d", id);
1077 hdev->id = id;
1078 list_add(&hdev->list, head);
1079
1080 atomic_set(&hdev->refcnt, 1);
1081 spin_lock_init(&hdev->lock);
1082
1083 hdev->flags = 0;
1084 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1085 hdev->esco_type = (ESCO_HV1);
1086 hdev->link_mode = (HCI_LM_ACCEPT);
1087
1088 hdev->idle_timeout = 0;
1089 hdev->sniff_max_interval = 800;
1090 hdev->sniff_min_interval = 80;
1091
1092 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1093 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1094 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1095
1096 skb_queue_head_init(&hdev->rx_q);
1097 skb_queue_head_init(&hdev->cmd_q);
1098 skb_queue_head_init(&hdev->raw_q);
1099
1100 for (i = 0; i < NUM_REASSEMBLY; i++)
1101 hdev->reassembly[i] = NULL;
1102
1103 init_waitqueue_head(&hdev->req_wait_q);
1104 mutex_init(&hdev->req_lock);
1105
1106 inquiry_cache_init(hdev);
1107
1108 hci_conn_hash_init(hdev);
1109
1110 INIT_LIST_HEAD(&hdev->blacklist);
1111
1112 INIT_LIST_HEAD(&hdev->uuids);
1113
1114 INIT_LIST_HEAD(&hdev->link_keys);
1115
1116 INIT_WORK(&hdev->power_on, hci_power_on);
1117 INIT_WORK(&hdev->power_off, hci_power_off);
1118 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1119
1120 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1121
1122 atomic_set(&hdev->promisc, 0);
1123
1124 write_unlock_bh(&hci_dev_list_lock);
1125
1126 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1127 if (!hdev->workqueue)
1128 goto nomem;
1129
1130 hci_register_sysfs(hdev);
1131
1132 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1133 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1134 if (hdev->rfkill) {
1135 if (rfkill_register(hdev->rfkill) < 0) {
1136 rfkill_destroy(hdev->rfkill);
1137 hdev->rfkill = NULL;
1138 }
1139 }
1140
1141 set_bit(HCI_AUTO_OFF, &hdev->flags);
1142 set_bit(HCI_SETUP, &hdev->flags);
1143 queue_work(hdev->workqueue, &hdev->power_on);
1144
1145 hci_notify(hdev, HCI_DEV_REG);
1146
1147 return id;
1148
1149 nomem:
1150 write_lock_bh(&hci_dev_list_lock);
1151 list_del(&hdev->list);
1152 write_unlock_bh(&hci_dev_list_lock);
1153
1154 return -ENOMEM;
1155 }
1156 EXPORT_SYMBOL(hci_register_dev);
1157
1158 /* Unregister HCI device */
1159 int hci_unregister_dev(struct hci_dev *hdev)
1160 {
1161 int i;
1162
1163 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1164
1165 write_lock_bh(&hci_dev_list_lock);
1166 list_del(&hdev->list);
1167 write_unlock_bh(&hci_dev_list_lock);
1168
1169 hci_dev_do_close(hdev);
1170
1171 for (i = 0; i < NUM_REASSEMBLY; i++)
1172 kfree_skb(hdev->reassembly[i]);
1173
1174 if (!test_bit(HCI_INIT, &hdev->flags) &&
1175 !test_bit(HCI_SETUP, &hdev->flags))
1176 mgmt_index_removed(hdev->id);
1177
1178 hci_notify(hdev, HCI_DEV_UNREG);
1179
1180 if (hdev->rfkill) {
1181 rfkill_unregister(hdev->rfkill);
1182 rfkill_destroy(hdev->rfkill);
1183 }
1184
1185 hci_unregister_sysfs(hdev);
1186
1187 destroy_workqueue(hdev->workqueue);
1188
1189 hci_dev_lock_bh(hdev);
1190 hci_blacklist_clear(hdev);
1191 hci_uuids_clear(hdev);
1192 hci_link_keys_clear(hdev);
1193 hci_dev_unlock_bh(hdev);
1194
1195 __hci_dev_put(hdev);
1196
1197 return 0;
1198 }
1199 EXPORT_SYMBOL(hci_unregister_dev);
1200
1201 /* Suspend HCI device */
1202 int hci_suspend_dev(struct hci_dev *hdev)
1203 {
1204 hci_notify(hdev, HCI_DEV_SUSPEND);
1205 return 0;
1206 }
1207 EXPORT_SYMBOL(hci_suspend_dev);
1208
1209 /* Resume HCI device */
1210 int hci_resume_dev(struct hci_dev *hdev)
1211 {
1212 hci_notify(hdev, HCI_DEV_RESUME);
1213 return 0;
1214 }
1215 EXPORT_SYMBOL(hci_resume_dev);
1216
1217 /* Receive frame from HCI drivers */
1218 int hci_recv_frame(struct sk_buff *skb)
1219 {
1220 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1221 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1222 && !test_bit(HCI_INIT, &hdev->flags))) {
1223 kfree_skb(skb);
1224 return -ENXIO;
1225 }
1226
1227 /* Incomming skb */
1228 bt_cb(skb)->incoming = 1;
1229
1230 /* Time stamp */
1231 __net_timestamp(skb);
1232
1233 /* Queue frame for rx task */
1234 skb_queue_tail(&hdev->rx_q, skb);
1235 tasklet_schedule(&hdev->rx_task);
1236
1237 return 0;
1238 }
1239 EXPORT_SYMBOL(hci_recv_frame);
1240
1241 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1242 int count, __u8 index, gfp_t gfp_mask)
1243 {
1244 int len = 0;
1245 int hlen = 0;
1246 int remain = count;
1247 struct sk_buff *skb;
1248 struct bt_skb_cb *scb;
1249
1250 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1251 index >= NUM_REASSEMBLY)
1252 return -EILSEQ;
1253
1254 skb = hdev->reassembly[index];
1255
1256 if (!skb) {
1257 switch (type) {
1258 case HCI_ACLDATA_PKT:
1259 len = HCI_MAX_FRAME_SIZE;
1260 hlen = HCI_ACL_HDR_SIZE;
1261 break;
1262 case HCI_EVENT_PKT:
1263 len = HCI_MAX_EVENT_SIZE;
1264 hlen = HCI_EVENT_HDR_SIZE;
1265 break;
1266 case HCI_SCODATA_PKT:
1267 len = HCI_MAX_SCO_SIZE;
1268 hlen = HCI_SCO_HDR_SIZE;
1269 break;
1270 }
1271
1272 skb = bt_skb_alloc(len, gfp_mask);
1273 if (!skb)
1274 return -ENOMEM;
1275
1276 scb = (void *) skb->cb;
1277 scb->expect = hlen;
1278 scb->pkt_type = type;
1279
1280 skb->dev = (void *) hdev;
1281 hdev->reassembly[index] = skb;
1282 }
1283
1284 while (count) {
1285 scb = (void *) skb->cb;
1286 len = min(scb->expect, (__u16)count);
1287
1288 memcpy(skb_put(skb, len), data, len);
1289
1290 count -= len;
1291 data += len;
1292 scb->expect -= len;
1293 remain = count;
1294
1295 switch (type) {
1296 case HCI_EVENT_PKT:
1297 if (skb->len == HCI_EVENT_HDR_SIZE) {
1298 struct hci_event_hdr *h = hci_event_hdr(skb);
1299 scb->expect = h->plen;
1300
1301 if (skb_tailroom(skb) < scb->expect) {
1302 kfree_skb(skb);
1303 hdev->reassembly[index] = NULL;
1304 return -ENOMEM;
1305 }
1306 }
1307 break;
1308
1309 case HCI_ACLDATA_PKT:
1310 if (skb->len == HCI_ACL_HDR_SIZE) {
1311 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1312 scb->expect = __le16_to_cpu(h->dlen);
1313
1314 if (skb_tailroom(skb) < scb->expect) {
1315 kfree_skb(skb);
1316 hdev->reassembly[index] = NULL;
1317 return -ENOMEM;
1318 }
1319 }
1320 break;
1321
1322 case HCI_SCODATA_PKT:
1323 if (skb->len == HCI_SCO_HDR_SIZE) {
1324 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1325 scb->expect = h->dlen;
1326
1327 if (skb_tailroom(skb) < scb->expect) {
1328 kfree_skb(skb);
1329 hdev->reassembly[index] = NULL;
1330 return -ENOMEM;
1331 }
1332 }
1333 break;
1334 }
1335
1336 if (scb->expect == 0) {
1337 /* Complete frame */
1338
1339 bt_cb(skb)->pkt_type = type;
1340 hci_recv_frame(skb);
1341
1342 hdev->reassembly[index] = NULL;
1343 return remain;
1344 }
1345 }
1346
1347 return remain;
1348 }
1349
1350 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1351 {
1352 int rem = 0;
1353
1354 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1355 return -EILSEQ;
1356
1357 while (count) {
1358 rem = hci_reassembly(hdev, type, data, count,
1359 type - 1, GFP_ATOMIC);
1360 if (rem < 0)
1361 return rem;
1362
1363 data += (count - rem);
1364 count = rem;
1365 };
1366
1367 return rem;
1368 }
1369 EXPORT_SYMBOL(hci_recv_fragment);
1370
1371 #define STREAM_REASSEMBLY 0
1372
1373 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1374 {
1375 int type;
1376 int rem = 0;
1377
1378 while (count) {
1379 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1380
1381 if (!skb) {
1382 struct { char type; } *pkt;
1383
1384 /* Start of the frame */
1385 pkt = data;
1386 type = pkt->type;
1387
1388 data++;
1389 count--;
1390 } else
1391 type = bt_cb(skb)->pkt_type;
1392
1393 rem = hci_reassembly(hdev, type, data,
1394 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1395 if (rem < 0)
1396 return rem;
1397
1398 data += (count - rem);
1399 count = rem;
1400 };
1401
1402 return rem;
1403 }
1404 EXPORT_SYMBOL(hci_recv_stream_fragment);
1405
1406 /* ---- Interface to upper protocols ---- */
1407
1408 /* Register/Unregister protocols.
1409 * hci_task_lock is used to ensure that no tasks are running. */
1410 int hci_register_proto(struct hci_proto *hp)
1411 {
1412 int err = 0;
1413
1414 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1415
1416 if (hp->id >= HCI_MAX_PROTO)
1417 return -EINVAL;
1418
1419 write_lock_bh(&hci_task_lock);
1420
1421 if (!hci_proto[hp->id])
1422 hci_proto[hp->id] = hp;
1423 else
1424 err = -EEXIST;
1425
1426 write_unlock_bh(&hci_task_lock);
1427
1428 return err;
1429 }
1430 EXPORT_SYMBOL(hci_register_proto);
1431
1432 int hci_unregister_proto(struct hci_proto *hp)
1433 {
1434 int err = 0;
1435
1436 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1437
1438 if (hp->id >= HCI_MAX_PROTO)
1439 return -EINVAL;
1440
1441 write_lock_bh(&hci_task_lock);
1442
1443 if (hci_proto[hp->id])
1444 hci_proto[hp->id] = NULL;
1445 else
1446 err = -ENOENT;
1447
1448 write_unlock_bh(&hci_task_lock);
1449
1450 return err;
1451 }
1452 EXPORT_SYMBOL(hci_unregister_proto);
1453
1454 int hci_register_cb(struct hci_cb *cb)
1455 {
1456 BT_DBG("%p name %s", cb, cb->name);
1457
1458 write_lock_bh(&hci_cb_list_lock);
1459 list_add(&cb->list, &hci_cb_list);
1460 write_unlock_bh(&hci_cb_list_lock);
1461
1462 return 0;
1463 }
1464 EXPORT_SYMBOL(hci_register_cb);
1465
1466 int hci_unregister_cb(struct hci_cb *cb)
1467 {
1468 BT_DBG("%p name %s", cb, cb->name);
1469
1470 write_lock_bh(&hci_cb_list_lock);
1471 list_del(&cb->list);
1472 write_unlock_bh(&hci_cb_list_lock);
1473
1474 return 0;
1475 }
1476 EXPORT_SYMBOL(hci_unregister_cb);
1477
1478 static int hci_send_frame(struct sk_buff *skb)
1479 {
1480 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1481
1482 if (!hdev) {
1483 kfree_skb(skb);
1484 return -ENODEV;
1485 }
1486
1487 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1488
1489 if (atomic_read(&hdev->promisc)) {
1490 /* Time stamp */
1491 __net_timestamp(skb);
1492
1493 hci_send_to_sock(hdev, skb, NULL);
1494 }
1495
1496 /* Get rid of skb owner, prior to sending to the driver. */
1497 skb_orphan(skb);
1498
1499 return hdev->send(skb);
1500 }
1501
1502 /* Send HCI command */
1503 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1504 {
1505 int len = HCI_COMMAND_HDR_SIZE + plen;
1506 struct hci_command_hdr *hdr;
1507 struct sk_buff *skb;
1508
1509 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1510
1511 skb = bt_skb_alloc(len, GFP_ATOMIC);
1512 if (!skb) {
1513 BT_ERR("%s no memory for command", hdev->name);
1514 return -ENOMEM;
1515 }
1516
1517 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1518 hdr->opcode = cpu_to_le16(opcode);
1519 hdr->plen = plen;
1520
1521 if (plen)
1522 memcpy(skb_put(skb, plen), param, plen);
1523
1524 BT_DBG("skb len %d", skb->len);
1525
1526 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1527 skb->dev = (void *) hdev;
1528
1529 if (test_bit(HCI_INIT, &hdev->flags))
1530 hdev->init_last_cmd = opcode;
1531
1532 skb_queue_tail(&hdev->cmd_q, skb);
1533 tasklet_schedule(&hdev->cmd_task);
1534
1535 return 0;
1536 }
1537
1538 /* Get data from the previously sent command */
1539 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1540 {
1541 struct hci_command_hdr *hdr;
1542
1543 if (!hdev->sent_cmd)
1544 return NULL;
1545
1546 hdr = (void *) hdev->sent_cmd->data;
1547
1548 if (hdr->opcode != cpu_to_le16(opcode))
1549 return NULL;
1550
1551 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1552
1553 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1554 }
1555
1556 /* Send ACL data */
1557 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1558 {
1559 struct hci_acl_hdr *hdr;
1560 int len = skb->len;
1561
1562 skb_push(skb, HCI_ACL_HDR_SIZE);
1563 skb_reset_transport_header(skb);
1564 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1565 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1566 hdr->dlen = cpu_to_le16(len);
1567 }
1568
1569 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1570 {
1571 struct hci_dev *hdev = conn->hdev;
1572 struct sk_buff *list;
1573
1574 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1575
1576 skb->dev = (void *) hdev;
1577 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1578 hci_add_acl_hdr(skb, conn->handle, flags);
1579
1580 list = skb_shinfo(skb)->frag_list;
1581 if (!list) {
1582 /* Non fragmented */
1583 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1584
1585 skb_queue_tail(&conn->data_q, skb);
1586 } else {
1587 /* Fragmented */
1588 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1589
1590 skb_shinfo(skb)->frag_list = NULL;
1591
1592 /* Queue all fragments atomically */
1593 spin_lock_bh(&conn->data_q.lock);
1594
1595 __skb_queue_tail(&conn->data_q, skb);
1596
1597 flags &= ~ACL_START;
1598 flags |= ACL_CONT;
1599 do {
1600 skb = list; list = list->next;
1601
1602 skb->dev = (void *) hdev;
1603 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1604 hci_add_acl_hdr(skb, conn->handle, flags);
1605
1606 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1607
1608 __skb_queue_tail(&conn->data_q, skb);
1609 } while (list);
1610
1611 spin_unlock_bh(&conn->data_q.lock);
1612 }
1613
1614 tasklet_schedule(&hdev->tx_task);
1615 }
1616 EXPORT_SYMBOL(hci_send_acl);
1617
1618 /* Send SCO data */
1619 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1620 {
1621 struct hci_dev *hdev = conn->hdev;
1622 struct hci_sco_hdr hdr;
1623
1624 BT_DBG("%s len %d", hdev->name, skb->len);
1625
1626 hdr.handle = cpu_to_le16(conn->handle);
1627 hdr.dlen = skb->len;
1628
1629 skb_push(skb, HCI_SCO_HDR_SIZE);
1630 skb_reset_transport_header(skb);
1631 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1632
1633 skb->dev = (void *) hdev;
1634 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1635
1636 skb_queue_tail(&conn->data_q, skb);
1637 tasklet_schedule(&hdev->tx_task);
1638 }
1639 EXPORT_SYMBOL(hci_send_sco);
1640
1641 /* ---- HCI TX task (outgoing data) ---- */
1642
1643 /* HCI Connection scheduler */
1644 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1645 {
1646 struct hci_conn_hash *h = &hdev->conn_hash;
1647 struct hci_conn *conn = NULL;
1648 int num = 0, min = ~0;
1649 struct list_head *p;
1650
1651 /* We don't have to lock device here. Connections are always
1652 * added and removed with TX task disabled. */
1653 list_for_each(p, &h->list) {
1654 struct hci_conn *c;
1655 c = list_entry(p, struct hci_conn, list);
1656
1657 if (c->type != type || skb_queue_empty(&c->data_q))
1658 continue;
1659
1660 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1661 continue;
1662
1663 num++;
1664
1665 if (c->sent < min) {
1666 min = c->sent;
1667 conn = c;
1668 }
1669 }
1670
1671 if (conn) {
1672 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1673 int q = cnt / num;
1674 *quote = q ? q : 1;
1675 } else
1676 *quote = 0;
1677
1678 BT_DBG("conn %p quote %d", conn, *quote);
1679 return conn;
1680 }
1681
1682 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1683 {
1684 struct hci_conn_hash *h = &hdev->conn_hash;
1685 struct list_head *p;
1686 struct hci_conn *c;
1687
1688 BT_ERR("%s ACL tx timeout", hdev->name);
1689
1690 /* Kill stalled connections */
1691 list_for_each(p, &h->list) {
1692 c = list_entry(p, struct hci_conn, list);
1693 if (c->type == ACL_LINK && c->sent) {
1694 BT_ERR("%s killing stalled ACL connection %s",
1695 hdev->name, batostr(&c->dst));
1696 hci_acl_disconn(c, 0x13);
1697 }
1698 }
1699 }
1700
1701 static inline void hci_sched_acl(struct hci_dev *hdev)
1702 {
1703 struct hci_conn *conn;
1704 struct sk_buff *skb;
1705 int quote;
1706
1707 BT_DBG("%s", hdev->name);
1708
1709 if (!test_bit(HCI_RAW, &hdev->flags)) {
1710 /* ACL tx timeout must be longer than maximum
1711 * link supervision timeout (40.9 seconds) */
1712 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1713 hci_acl_tx_to(hdev);
1714 }
1715
1716 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1717 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1718 BT_DBG("skb %p len %d", skb, skb->len);
1719
1720 hci_conn_enter_active_mode(conn);
1721
1722 hci_send_frame(skb);
1723 hdev->acl_last_tx = jiffies;
1724
1725 hdev->acl_cnt--;
1726 conn->sent++;
1727 }
1728 }
1729 }
1730
1731 /* Schedule SCO */
1732 static inline void hci_sched_sco(struct hci_dev *hdev)
1733 {
1734 struct hci_conn *conn;
1735 struct sk_buff *skb;
1736 int quote;
1737
1738 BT_DBG("%s", hdev->name);
1739
1740 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1741 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1742 BT_DBG("skb %p len %d", skb, skb->len);
1743 hci_send_frame(skb);
1744
1745 conn->sent++;
1746 if (conn->sent == ~0)
1747 conn->sent = 0;
1748 }
1749 }
1750 }
1751
1752 static inline void hci_sched_esco(struct hci_dev *hdev)
1753 {
1754 struct hci_conn *conn;
1755 struct sk_buff *skb;
1756 int quote;
1757
1758 BT_DBG("%s", hdev->name);
1759
1760 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1761 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1762 BT_DBG("skb %p len %d", skb, skb->len);
1763 hci_send_frame(skb);
1764
1765 conn->sent++;
1766 if (conn->sent == ~0)
1767 conn->sent = 0;
1768 }
1769 }
1770 }
1771
1772 static void hci_tx_task(unsigned long arg)
1773 {
1774 struct hci_dev *hdev = (struct hci_dev *) arg;
1775 struct sk_buff *skb;
1776
1777 read_lock(&hci_task_lock);
1778
1779 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1780
1781 /* Schedule queues and send stuff to HCI driver */
1782
1783 hci_sched_acl(hdev);
1784
1785 hci_sched_sco(hdev);
1786
1787 hci_sched_esco(hdev);
1788
1789 /* Send next queued raw (unknown type) packet */
1790 while ((skb = skb_dequeue(&hdev->raw_q)))
1791 hci_send_frame(skb);
1792
1793 read_unlock(&hci_task_lock);
1794 }
1795
1796 /* ----- HCI RX task (incoming data proccessing) ----- */
1797
1798 /* ACL data packet */
1799 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1800 {
1801 struct hci_acl_hdr *hdr = (void *) skb->data;
1802 struct hci_conn *conn;
1803 __u16 handle, flags;
1804
1805 skb_pull(skb, HCI_ACL_HDR_SIZE);
1806
1807 handle = __le16_to_cpu(hdr->handle);
1808 flags = hci_flags(handle);
1809 handle = hci_handle(handle);
1810
1811 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1812
1813 hdev->stat.acl_rx++;
1814
1815 hci_dev_lock(hdev);
1816 conn = hci_conn_hash_lookup_handle(hdev, handle);
1817 hci_dev_unlock(hdev);
1818
1819 if (conn) {
1820 register struct hci_proto *hp;
1821
1822 hci_conn_enter_active_mode(conn);
1823
1824 /* Send to upper protocol */
1825 hp = hci_proto[HCI_PROTO_L2CAP];
1826 if (hp && hp->recv_acldata) {
1827 hp->recv_acldata(conn, skb, flags);
1828 return;
1829 }
1830 } else {
1831 BT_ERR("%s ACL packet for unknown connection handle %d",
1832 hdev->name, handle);
1833 }
1834
1835 kfree_skb(skb);
1836 }
1837
1838 /* SCO data packet */
1839 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1840 {
1841 struct hci_sco_hdr *hdr = (void *) skb->data;
1842 struct hci_conn *conn;
1843 __u16 handle;
1844
1845 skb_pull(skb, HCI_SCO_HDR_SIZE);
1846
1847 handle = __le16_to_cpu(hdr->handle);
1848
1849 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1850
1851 hdev->stat.sco_rx++;
1852
1853 hci_dev_lock(hdev);
1854 conn = hci_conn_hash_lookup_handle(hdev, handle);
1855 hci_dev_unlock(hdev);
1856
1857 if (conn) {
1858 register struct hci_proto *hp;
1859
1860 /* Send to upper protocol */
1861 hp = hci_proto[HCI_PROTO_SCO];
1862 if (hp && hp->recv_scodata) {
1863 hp->recv_scodata(conn, skb);
1864 return;
1865 }
1866 } else {
1867 BT_ERR("%s SCO packet for unknown connection handle %d",
1868 hdev->name, handle);
1869 }
1870
1871 kfree_skb(skb);
1872 }
1873
1874 static void hci_rx_task(unsigned long arg)
1875 {
1876 struct hci_dev *hdev = (struct hci_dev *) arg;
1877 struct sk_buff *skb;
1878
1879 BT_DBG("%s", hdev->name);
1880
1881 read_lock(&hci_task_lock);
1882
1883 while ((skb = skb_dequeue(&hdev->rx_q))) {
1884 if (atomic_read(&hdev->promisc)) {
1885 /* Send copy to the sockets */
1886 hci_send_to_sock(hdev, skb, NULL);
1887 }
1888
1889 if (test_bit(HCI_RAW, &hdev->flags)) {
1890 kfree_skb(skb);
1891 continue;
1892 }
1893
1894 if (test_bit(HCI_INIT, &hdev->flags)) {
1895 /* Don't process data packets in this states. */
1896 switch (bt_cb(skb)->pkt_type) {
1897 case HCI_ACLDATA_PKT:
1898 case HCI_SCODATA_PKT:
1899 kfree_skb(skb);
1900 continue;
1901 }
1902 }
1903
1904 /* Process frame */
1905 switch (bt_cb(skb)->pkt_type) {
1906 case HCI_EVENT_PKT:
1907 hci_event_packet(hdev, skb);
1908 break;
1909
1910 case HCI_ACLDATA_PKT:
1911 BT_DBG("%s ACL data packet", hdev->name);
1912 hci_acldata_packet(hdev, skb);
1913 break;
1914
1915 case HCI_SCODATA_PKT:
1916 BT_DBG("%s SCO data packet", hdev->name);
1917 hci_scodata_packet(hdev, skb);
1918 break;
1919
1920 default:
1921 kfree_skb(skb);
1922 break;
1923 }
1924 }
1925
1926 read_unlock(&hci_task_lock);
1927 }
1928
1929 static void hci_cmd_task(unsigned long arg)
1930 {
1931 struct hci_dev *hdev = (struct hci_dev *) arg;
1932 struct sk_buff *skb;
1933
1934 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1935
1936 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1937 BT_ERR("%s command tx timeout", hdev->name);
1938 atomic_set(&hdev->cmd_cnt, 1);
1939 }
1940
1941 /* Send queued commands */
1942 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1943 kfree_skb(hdev->sent_cmd);
1944
1945 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1946 if (hdev->sent_cmd) {
1947 atomic_dec(&hdev->cmd_cnt);
1948 hci_send_frame(skb);
1949 hdev->cmd_last_tx = jiffies;
1950 } else {
1951 skb_queue_head(&hdev->cmd_q, skb);
1952 tasklet_schedule(&hdev->cmd_task);
1953 }
1954 }
1955 }