]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Remove unused and unneeded support in virtual driver
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/interrupt.h>
41#include <linux/notifier.h>
42#include <net/sock.h>
43
44#include <asm/system.h>
45#include <asm/uaccess.h>
46#include <asm/unaligned.h>
47
48#include <net/bluetooth/bluetooth.h>
49#include <net/bluetooth/hci_core.h>
50
1da177e4
LT
51static void hci_cmd_task(unsigned long arg);
52static void hci_rx_task(unsigned long arg);
53static void hci_tx_task(unsigned long arg);
54static void hci_notify(struct hci_dev *hdev, int event);
55
56static DEFINE_RWLOCK(hci_task_lock);
57
58/* HCI device list */
59LIST_HEAD(hci_dev_list);
60DEFINE_RWLOCK(hci_dev_list_lock);
61
62/* HCI callback list */
63LIST_HEAD(hci_cb_list);
64DEFINE_RWLOCK(hci_cb_list_lock);
65
66/* HCI protocols */
67#define HCI_MAX_PROTO 2
68struct hci_proto *hci_proto[HCI_MAX_PROTO];
69
70/* HCI notifiers list */
e041c683 71static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
72
73/* ---- HCI notifications ---- */
74
75int hci_register_notifier(struct notifier_block *nb)
76{
e041c683 77 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
78}
79
80int hci_unregister_notifier(struct notifier_block *nb)
81{
e041c683 82 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
83}
84
6516455d 85static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 86{
e041c683 87 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
88}
89
90/* ---- HCI requests ---- */
91
92void hci_req_complete(struct hci_dev *hdev, int result)
93{
94 BT_DBG("%s result 0x%2.2x", hdev->name, result);
95
96 if (hdev->req_status == HCI_REQ_PEND) {
97 hdev->req_result = result;
98 hdev->req_status = HCI_REQ_DONE;
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
103static void hci_req_cancel(struct hci_dev *hdev, int err)
104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114/* Execute request and wait for completion. */
8e87d142 115static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
1da177e4
LT
116 unsigned long opt, __u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 int err = 0;
120
121 BT_DBG("%s start", hdev->name);
122
123 hdev->req_status = HCI_REQ_PEND;
124
125 add_wait_queue(&hdev->req_wait_q, &wait);
126 set_current_state(TASK_INTERRUPTIBLE);
127
128 req(hdev, opt);
129 schedule_timeout(timeout);
130
131 remove_wait_queue(&hdev->req_wait_q, &wait);
132
133 if (signal_pending(current))
134 return -EINTR;
135
136 switch (hdev->req_status) {
137 case HCI_REQ_DONE:
138 err = -bt_err(hdev->req_result);
139 break;
140
141 case HCI_REQ_CANCELED:
142 err = -hdev->req_result;
143 break;
144
145 default:
146 err = -ETIMEDOUT;
147 break;
3ff50b79 148 }
1da177e4
LT
149
150 hdev->req_status = hdev->req_result = 0;
151
152 BT_DBG("%s end: err %d", hdev->name, err);
153
154 return err;
155}
156
157static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
158 unsigned long opt, __u32 timeout)
159{
160 int ret;
161
7c6a329e
MH
162 if (!test_bit(HCI_UP, &hdev->flags))
163 return -ENETDOWN;
164
1da177e4
LT
165 /* Serialize all requests */
166 hci_req_lock(hdev);
167 ret = __hci_request(hdev, req, opt, timeout);
168 hci_req_unlock(hdev);
169
170 return ret;
171}
172
173static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
174{
175 BT_DBG("%s %ld", hdev->name, opt);
176
177 /* Reset device */
a9de9248 178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
179}
180
181static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
182{
183 struct sk_buff *skb;
1ebb9252 184 __le16 param;
89f2783d 185 __u8 flt_type;
1da177e4
LT
186
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Driver initialization */
190
191 /* Special commands */
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4
LT
194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
196 hci_sched_cmd(hdev);
197 }
198 skb_queue_purge(&hdev->driver_init);
199
200 /* Mandatory initialization */
201
202 /* Reset */
7a9d4020 203 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
a9de9248 204 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4
LT
214
215#if 0
216 /* Host buffer size */
217 {
218 struct hci_cp_host_buffer_size cp;
aca3192c 219 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
1da177e4 220 cp.sco_mtu = HCI_MAX_SCO_SIZE;
aca3192c
YH
221 cp.acl_max_pkt = cpu_to_le16(0xffff);
222 cp.sco_max_pkt = cpu_to_le16(0xffff);
a9de9248 223 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
1da177e4
LT
224 }
225#endif
226
227 /* Read BD Address */
a9de9248
MH
228 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
229
230 /* Read Class of Device */
231 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
232
233 /* Read Local Name */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
235
236 /* Read Voice Setting */
a9de9248 237 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
238
239 /* Optional initialization */
240
241 /* Clear Event Filters */
89f2783d 242 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 243 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4
LT
244
245 /* Page timeout ~20 secs */
aca3192c 246 param = cpu_to_le16(0x8000);
a9de9248 247 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
1da177e4
LT
248
249 /* Connection accept timeout ~20 secs */
aca3192c 250 param = cpu_to_le16(0x7d00);
a9de9248 251 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1da177e4
LT
252}
253
254static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
255{
256 __u8 scan = opt;
257
258 BT_DBG("%s %x", hdev->name, scan);
259
260 /* Inquiry and Page scans */
a9de9248 261 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
262}
263
264static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
265{
266 __u8 auth = opt;
267
268 BT_DBG("%s %x", hdev->name, auth);
269
270 /* Authentication */
a9de9248 271 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
272}
273
274static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
275{
276 __u8 encrypt = opt;
277
278 BT_DBG("%s %x", hdev->name, encrypt);
279
e4e8e37c 280 /* Encryption */
a9de9248 281 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
282}
283
e4e8e37c
MH
284static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __le16 policy = cpu_to_le16(opt);
287
a418b893 288 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
289
290 /* Default link policy */
291 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
292}
293
8e87d142 294/* Get HCI device by index.
1da177e4
LT
295 * Device is held on return. */
296struct hci_dev *hci_dev_get(int index)
297{
298 struct hci_dev *hdev = NULL;
299 struct list_head *p;
300
301 BT_DBG("%d", index);
302
303 if (index < 0)
304 return NULL;
305
306 read_lock(&hci_dev_list_lock);
307 list_for_each(p, &hci_dev_list) {
308 struct hci_dev *d = list_entry(p, struct hci_dev, list);
309 if (d->id == index) {
310 hdev = hci_dev_hold(d);
311 break;
312 }
313 }
314 read_unlock(&hci_dev_list_lock);
315 return hdev;
316}
1da177e4
LT
317
318/* ---- Inquiry support ---- */
319static void inquiry_cache_flush(struct hci_dev *hdev)
320{
321 struct inquiry_cache *cache = &hdev->inq_cache;
322 struct inquiry_entry *next = cache->list, *e;
323
324 BT_DBG("cache %p", cache);
325
326 cache->list = NULL;
327 while ((e = next)) {
328 next = e->next;
329 kfree(e);
330 }
331}
332
333struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
334{
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
337
338 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
339
340 for (e = cache->list; e; e = e->next)
341 if (!bacmp(&e->data.bdaddr, bdaddr))
342 break;
343 return e;
344}
345
346void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
347{
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
350
351 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
352
353 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
354 /* Entry not in the cache. Add new one. */
25ea6db0 355 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
1da177e4 356 return;
1da177e4
LT
357 e->next = cache->list;
358 cache->list = e;
359 }
360
361 memcpy(&e->data, data, sizeof(*data));
362 e->timestamp = jiffies;
363 cache->timestamp = jiffies;
364}
365
366static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
367{
368 struct inquiry_cache *cache = &hdev->inq_cache;
369 struct inquiry_info *info = (struct inquiry_info *) buf;
370 struct inquiry_entry *e;
371 int copied = 0;
372
373 for (e = cache->list; e && copied < num; e = e->next, copied++) {
374 struct inquiry_data *data = &e->data;
375 bacpy(&info->bdaddr, &data->bdaddr);
376 info->pscan_rep_mode = data->pscan_rep_mode;
377 info->pscan_period_mode = data->pscan_period_mode;
378 info->pscan_mode = data->pscan_mode;
379 memcpy(info->dev_class, data->dev_class, 3);
380 info->clock_offset = data->clock_offset;
381 info++;
382 }
383
384 BT_DBG("cache %p, copied %d", cache, copied);
385 return copied;
386}
387
388static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
389{
390 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
391 struct hci_cp_inquiry cp;
392
393 BT_DBG("%s", hdev->name);
394
395 if (test_bit(HCI_INQUIRY, &hdev->flags))
396 return;
397
398 /* Start Inquiry */
399 memcpy(&cp.lap, &ir->lap, 3);
400 cp.length = ir->length;
401 cp.num_rsp = ir->num_rsp;
a9de9248 402 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
403}
404
405int hci_inquiry(void __user *arg)
406{
407 __u8 __user *ptr = arg;
408 struct hci_inquiry_req ir;
409 struct hci_dev *hdev;
410 int err = 0, do_inquiry = 0, max_rsp;
411 long timeo;
412 __u8 *buf;
413
414 if (copy_from_user(&ir, ptr, sizeof(ir)))
415 return -EFAULT;
416
417 if (!(hdev = hci_dev_get(ir.dev_id)))
418 return -ENODEV;
419
420 hci_dev_lock_bh(hdev);
8e87d142 421 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1da177e4
LT
422 inquiry_cache_empty(hdev) ||
423 ir.flags & IREQ_CACHE_FLUSH) {
424 inquiry_cache_flush(hdev);
425 do_inquiry = 1;
426 }
427 hci_dev_unlock_bh(hdev);
428
04837f64 429 timeo = ir.length * msecs_to_jiffies(2000);
1da177e4
LT
430 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
431 goto done;
432
433 /* for unlimited number of responses we will use buffer with 255 entries */
434 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
435
436 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
437 * copy it to the user space.
438 */
439 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
440 err = -ENOMEM;
441 goto done;
442 }
443
444 hci_dev_lock_bh(hdev);
445 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
446 hci_dev_unlock_bh(hdev);
447
448 BT_DBG("num_rsp %d", ir.num_rsp);
449
450 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
451 ptr += sizeof(ir);
452 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
453 ir.num_rsp))
454 err = -EFAULT;
8e87d142 455 } else
1da177e4
LT
456 err = -EFAULT;
457
458 kfree(buf);
459
460done:
461 hci_dev_put(hdev);
462 return err;
463}
464
465/* ---- HCI ioctl helpers ---- */
466
467int hci_dev_open(__u16 dev)
468{
469 struct hci_dev *hdev;
470 int ret = 0;
471
472 if (!(hdev = hci_dev_get(dev)))
473 return -ENODEV;
474
475 BT_DBG("%s %p", hdev->name, hdev);
476
477 hci_req_lock(hdev);
478
479 if (test_bit(HCI_UP, &hdev->flags)) {
480 ret = -EALREADY;
481 goto done;
482 }
483
484 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
485 set_bit(HCI_RAW, &hdev->flags);
486
487 if (hdev->open(hdev)) {
488 ret = -EIO;
489 goto done;
490 }
491
492 if (!test_bit(HCI_RAW, &hdev->flags)) {
493 atomic_set(&hdev->cmd_cnt, 1);
494 set_bit(HCI_INIT, &hdev->flags);
495
496 //__hci_request(hdev, hci_reset_req, 0, HZ);
04837f64
MH
497 ret = __hci_request(hdev, hci_init_req, 0,
498 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
499
500 clear_bit(HCI_INIT, &hdev->flags);
501 }
502
503 if (!ret) {
504 hci_dev_hold(hdev);
505 set_bit(HCI_UP, &hdev->flags);
506 hci_notify(hdev, HCI_DEV_UP);
8e87d142 507 } else {
1da177e4
LT
508 /* Init failed, cleanup */
509 tasklet_kill(&hdev->rx_task);
510 tasklet_kill(&hdev->tx_task);
511 tasklet_kill(&hdev->cmd_task);
512
513 skb_queue_purge(&hdev->cmd_q);
514 skb_queue_purge(&hdev->rx_q);
515
516 if (hdev->flush)
517 hdev->flush(hdev);
518
519 if (hdev->sent_cmd) {
520 kfree_skb(hdev->sent_cmd);
521 hdev->sent_cmd = NULL;
522 }
523
524 hdev->close(hdev);
525 hdev->flags = 0;
526 }
527
528done:
529 hci_req_unlock(hdev);
530 hci_dev_put(hdev);
531 return ret;
532}
533
534static int hci_dev_do_close(struct hci_dev *hdev)
535{
536 BT_DBG("%s %p", hdev->name, hdev);
537
538 hci_req_cancel(hdev, ENODEV);
539 hci_req_lock(hdev);
540
541 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
542 hci_req_unlock(hdev);
543 return 0;
544 }
545
546 /* Kill RX and TX tasks */
547 tasklet_kill(&hdev->rx_task);
548 tasklet_kill(&hdev->tx_task);
549
550 hci_dev_lock_bh(hdev);
551 inquiry_cache_flush(hdev);
552 hci_conn_hash_flush(hdev);
553 hci_dev_unlock_bh(hdev);
554
555 hci_notify(hdev, HCI_DEV_DOWN);
556
557 if (hdev->flush)
558 hdev->flush(hdev);
559
560 /* Reset device */
561 skb_queue_purge(&hdev->cmd_q);
562 atomic_set(&hdev->cmd_cnt, 1);
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 set_bit(HCI_INIT, &hdev->flags);
04837f64
MH
565 __hci_request(hdev, hci_reset_req, 0,
566 msecs_to_jiffies(250));
1da177e4
LT
567 clear_bit(HCI_INIT, &hdev->flags);
568 }
569
570 /* Kill cmd task */
571 tasklet_kill(&hdev->cmd_task);
572
573 /* Drop queues */
574 skb_queue_purge(&hdev->rx_q);
575 skb_queue_purge(&hdev->cmd_q);
576 skb_queue_purge(&hdev->raw_q);
577
578 /* Drop last sent command */
579 if (hdev->sent_cmd) {
580 kfree_skb(hdev->sent_cmd);
581 hdev->sent_cmd = NULL;
582 }
583
584 /* After this point our queues are empty
585 * and no tasks are scheduled. */
586 hdev->close(hdev);
587
588 /* Clear flags */
589 hdev->flags = 0;
590
591 hci_req_unlock(hdev);
592
593 hci_dev_put(hdev);
594 return 0;
595}
596
597int hci_dev_close(__u16 dev)
598{
599 struct hci_dev *hdev;
600 int err;
601
602 if (!(hdev = hci_dev_get(dev)))
603 return -ENODEV;
604 err = hci_dev_do_close(hdev);
605 hci_dev_put(hdev);
606 return err;
607}
608
609int hci_dev_reset(__u16 dev)
610{
611 struct hci_dev *hdev;
612 int ret = 0;
613
614 if (!(hdev = hci_dev_get(dev)))
615 return -ENODEV;
616
617 hci_req_lock(hdev);
618 tasklet_disable(&hdev->tx_task);
619
620 if (!test_bit(HCI_UP, &hdev->flags))
621 goto done;
622
623 /* Drop queues */
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626
627 hci_dev_lock_bh(hdev);
628 inquiry_cache_flush(hdev);
629 hci_conn_hash_flush(hdev);
630 hci_dev_unlock_bh(hdev);
631
632 if (hdev->flush)
633 hdev->flush(hdev);
634
8e87d142 635 atomic_set(&hdev->cmd_cnt, 1);
1da177e4
LT
636 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
637
638 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
639 ret = __hci_request(hdev, hci_reset_req, 0,
640 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
641
642done:
643 tasklet_enable(&hdev->tx_task);
644 hci_req_unlock(hdev);
645 hci_dev_put(hdev);
646 return ret;
647}
648
649int hci_dev_reset_stat(__u16 dev)
650{
651 struct hci_dev *hdev;
652 int ret = 0;
653
654 if (!(hdev = hci_dev_get(dev)))
655 return -ENODEV;
656
657 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
658
659 hci_dev_put(hdev);
660
661 return ret;
662}
663
664int hci_dev_cmd(unsigned int cmd, void __user *arg)
665{
666 struct hci_dev *hdev;
667 struct hci_dev_req dr;
668 int err = 0;
669
670 if (copy_from_user(&dr, arg, sizeof(dr)))
671 return -EFAULT;
672
673 if (!(hdev = hci_dev_get(dr.dev_id)))
674 return -ENODEV;
675
676 switch (cmd) {
677 case HCISETAUTH:
04837f64
MH
678 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
679 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
680 break;
681
682 case HCISETENCRYPT:
683 if (!lmp_encrypt_capable(hdev)) {
684 err = -EOPNOTSUPP;
685 break;
686 }
687
688 if (!test_bit(HCI_AUTH, &hdev->flags)) {
689 /* Auth must be enabled first */
04837f64
MH
690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
692 if (err)
693 break;
694 }
695
04837f64
MH
696 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
698 break;
699
700 case HCISETSCAN:
04837f64
MH
701 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
703 break;
704
1da177e4 705 case HCISETLINKPOL:
e4e8e37c
MH
706 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
708 break;
709
710 case HCISETLINKMODE:
e4e8e37c
MH
711 hdev->link_mode = ((__u16) dr.dev_opt) &
712 (HCI_LM_MASTER | HCI_LM_ACCEPT);
713 break;
714
715 case HCISETPTYPE:
716 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
717 break;
718
719 case HCISETACLMTU:
e4e8e37c
MH
720 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
721 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
722 break;
723
724 case HCISETSCOMTU:
e4e8e37c
MH
725 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
726 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
727 break;
728
729 default:
730 err = -EINVAL;
731 break;
732 }
e4e8e37c 733
1da177e4
LT
734 hci_dev_put(hdev);
735 return err;
736}
737
738int hci_get_dev_list(void __user *arg)
739{
740 struct hci_dev_list_req *dl;
741 struct hci_dev_req *dr;
742 struct list_head *p;
743 int n = 0, size, err;
744 __u16 dev_num;
745
746 if (get_user(dev_num, (__u16 __user *) arg))
747 return -EFAULT;
748
749 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
750 return -EINVAL;
751
752 size = sizeof(*dl) + dev_num * sizeof(*dr);
753
c6bf514c 754 if (!(dl = kzalloc(size, GFP_KERNEL)))
1da177e4
LT
755 return -ENOMEM;
756
757 dr = dl->dev_req;
758
759 read_lock_bh(&hci_dev_list_lock);
760 list_for_each(p, &hci_dev_list) {
761 struct hci_dev *hdev;
762 hdev = list_entry(p, struct hci_dev, list);
763 (dr + n)->dev_id = hdev->id;
764 (dr + n)->dev_opt = hdev->flags;
765 if (++n >= dev_num)
766 break;
767 }
768 read_unlock_bh(&hci_dev_list_lock);
769
770 dl->dev_num = n;
771 size = sizeof(*dl) + n * sizeof(*dr);
772
773 err = copy_to_user(arg, dl, size);
774 kfree(dl);
775
776 return err ? -EFAULT : 0;
777}
778
779int hci_get_dev_info(void __user *arg)
780{
781 struct hci_dev *hdev;
782 struct hci_dev_info di;
783 int err = 0;
784
785 if (copy_from_user(&di, arg, sizeof(di)))
786 return -EFAULT;
787
788 if (!(hdev = hci_dev_get(di.dev_id)))
789 return -ENODEV;
790
791 strcpy(di.name, hdev->name);
792 di.bdaddr = hdev->bdaddr;
793 di.type = hdev->type;
794 di.flags = hdev->flags;
795 di.pkt_type = hdev->pkt_type;
796 di.acl_mtu = hdev->acl_mtu;
797 di.acl_pkts = hdev->acl_pkts;
798 di.sco_mtu = hdev->sco_mtu;
799 di.sco_pkts = hdev->sco_pkts;
800 di.link_policy = hdev->link_policy;
801 di.link_mode = hdev->link_mode;
802
803 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
804 memcpy(&di.features, &hdev->features, sizeof(di.features));
805
806 if (copy_to_user(arg, &di, sizeof(di)))
807 err = -EFAULT;
808
809 hci_dev_put(hdev);
810
811 return err;
812}
813
814/* ---- Interface to HCI drivers ---- */
815
816/* Alloc HCI device */
817struct hci_dev *hci_alloc_dev(void)
818{
819 struct hci_dev *hdev;
820
25ea6db0 821 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
822 if (!hdev)
823 return NULL;
824
1da177e4
LT
825 skb_queue_head_init(&hdev->driver_init);
826
827 return hdev;
828}
829EXPORT_SYMBOL(hci_alloc_dev);
830
831/* Free HCI device */
832void hci_free_dev(struct hci_dev *hdev)
833{
834 skb_queue_purge(&hdev->driver_init);
835
a91f2e39
MH
836 /* will free via device release */
837 put_device(&hdev->dev);
1da177e4
LT
838}
839EXPORT_SYMBOL(hci_free_dev);
840
841/* Register HCI device */
842int hci_register_dev(struct hci_dev *hdev)
843{
844 struct list_head *head = &hci_dev_list, *p;
ef222013 845 int i, id = 0;
1da177e4
LT
846
847 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
848
849 if (!hdev->open || !hdev->close || !hdev->destruct)
850 return -EINVAL;
851
852 write_lock_bh(&hci_dev_list_lock);
853
854 /* Find first available device id */
855 list_for_each(p, &hci_dev_list) {
856 if (list_entry(p, struct hci_dev, list)->id != id)
857 break;
858 head = p; id++;
859 }
8e87d142 860
1da177e4
LT
861 sprintf(hdev->name, "hci%d", id);
862 hdev->id = id;
863 list_add(&hdev->list, head);
864
865 atomic_set(&hdev->refcnt, 1);
866 spin_lock_init(&hdev->lock);
867
868 hdev->flags = 0;
869 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 870 hdev->esco_type = (ESCO_HV1);
1da177e4
LT
871 hdev->link_mode = (HCI_LM_ACCEPT);
872
04837f64
MH
873 hdev->idle_timeout = 0;
874 hdev->sniff_max_interval = 800;
875 hdev->sniff_min_interval = 80;
876
1da177e4
LT
877 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
878 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
879 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
880
881 skb_queue_head_init(&hdev->rx_q);
882 skb_queue_head_init(&hdev->cmd_q);
883 skb_queue_head_init(&hdev->raw_q);
884
ef222013
MH
885 for (i = 0; i < 3; i++)
886 hdev->reassembly[i] = NULL;
887
1da177e4
LT
888 init_waitqueue_head(&hdev->req_wait_q);
889 init_MUTEX(&hdev->req_lock);
890
891 inquiry_cache_init(hdev);
892
893 hci_conn_hash_init(hdev);
894
895 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
896
897 atomic_set(&hdev->promisc, 0);
898
899 write_unlock_bh(&hci_dev_list_lock);
900
901 hci_register_sysfs(hdev);
902
903 hci_notify(hdev, HCI_DEV_REG);
904
905 return id;
906}
907EXPORT_SYMBOL(hci_register_dev);
908
909/* Unregister HCI device */
910int hci_unregister_dev(struct hci_dev *hdev)
911{
ef222013
MH
912 int i;
913
1da177e4
LT
914 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
915
1da177e4
LT
916 write_lock_bh(&hci_dev_list_lock);
917 list_del(&hdev->list);
918 write_unlock_bh(&hci_dev_list_lock);
919
920 hci_dev_do_close(hdev);
921
ef222013
MH
922 for (i = 0; i < 3; i++)
923 kfree_skb(hdev->reassembly[i]);
924
1da177e4
LT
925 hci_notify(hdev, HCI_DEV_UNREG);
926
147e2d59
DY
927 hci_unregister_sysfs(hdev);
928
1da177e4 929 __hci_dev_put(hdev);
ef222013 930
1da177e4
LT
931 return 0;
932}
933EXPORT_SYMBOL(hci_unregister_dev);
934
935/* Suspend HCI device */
936int hci_suspend_dev(struct hci_dev *hdev)
937{
938 hci_notify(hdev, HCI_DEV_SUSPEND);
939 return 0;
940}
941EXPORT_SYMBOL(hci_suspend_dev);
942
943/* Resume HCI device */
944int hci_resume_dev(struct hci_dev *hdev)
945{
946 hci_notify(hdev, HCI_DEV_RESUME);
947 return 0;
948}
949EXPORT_SYMBOL(hci_resume_dev);
950
ef222013
MH
951/* Receive packet type fragment */
952#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
953
954int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
955{
956 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
957 return -EILSEQ;
958
959 while (count) {
960 struct sk_buff *skb = __reassembly(hdev, type);
961 struct { int expect; } *scb;
962 int len = 0;
963
964 if (!skb) {
965 /* Start of the frame */
966
967 switch (type) {
968 case HCI_EVENT_PKT:
969 if (count >= HCI_EVENT_HDR_SIZE) {
970 struct hci_event_hdr *h = data;
971 len = HCI_EVENT_HDR_SIZE + h->plen;
972 } else
973 return -EILSEQ;
974 break;
975
976 case HCI_ACLDATA_PKT:
977 if (count >= HCI_ACL_HDR_SIZE) {
978 struct hci_acl_hdr *h = data;
979 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
980 } else
981 return -EILSEQ;
982 break;
983
984 case HCI_SCODATA_PKT:
985 if (count >= HCI_SCO_HDR_SIZE) {
986 struct hci_sco_hdr *h = data;
987 len = HCI_SCO_HDR_SIZE + h->dlen;
988 } else
989 return -EILSEQ;
990 break;
991 }
992
993 skb = bt_skb_alloc(len, GFP_ATOMIC);
994 if (!skb) {
995 BT_ERR("%s no memory for packet", hdev->name);
996 return -ENOMEM;
997 }
998
999 skb->dev = (void *) hdev;
1000 bt_cb(skb)->pkt_type = type;
00ae02f3 1001
ef222013
MH
1002 __reassembly(hdev, type) = skb;
1003
1004 scb = (void *) skb->cb;
1005 scb->expect = len;
1006 } else {
1007 /* Continuation */
1008
1009 scb = (void *) skb->cb;
1010 len = scb->expect;
1011 }
1012
1013 len = min(len, count);
1014
1015 memcpy(skb_put(skb, len), data, len);
1016
1017 scb->expect -= len;
1018
1019 if (scb->expect == 0) {
1020 /* Complete frame */
1021
1022 __reassembly(hdev, type) = NULL;
1023
1024 bt_cb(skb)->pkt_type = type;
1025 hci_recv_frame(skb);
1026 }
1027
1028 count -= len; data += len;
1029 }
1030
1031 return 0;
1032}
1033EXPORT_SYMBOL(hci_recv_fragment);
1034
1da177e4
LT
1035/* ---- Interface to upper protocols ---- */
1036
1037/* Register/Unregister protocols.
1038 * hci_task_lock is used to ensure that no tasks are running. */
1039int hci_register_proto(struct hci_proto *hp)
1040{
1041 int err = 0;
1042
1043 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1044
1045 if (hp->id >= HCI_MAX_PROTO)
1046 return -EINVAL;
1047
1048 write_lock_bh(&hci_task_lock);
1049
1050 if (!hci_proto[hp->id])
1051 hci_proto[hp->id] = hp;
1052 else
1053 err = -EEXIST;
1054
1055 write_unlock_bh(&hci_task_lock);
1056
1057 return err;
1058}
1059EXPORT_SYMBOL(hci_register_proto);
1060
1061int hci_unregister_proto(struct hci_proto *hp)
1062{
1063 int err = 0;
1064
1065 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1066
1067 if (hp->id >= HCI_MAX_PROTO)
1068 return -EINVAL;
1069
1070 write_lock_bh(&hci_task_lock);
1071
1072 if (hci_proto[hp->id])
1073 hci_proto[hp->id] = NULL;
1074 else
1075 err = -ENOENT;
1076
1077 write_unlock_bh(&hci_task_lock);
1078
1079 return err;
1080}
1081EXPORT_SYMBOL(hci_unregister_proto);
1082
1083int hci_register_cb(struct hci_cb *cb)
1084{
1085 BT_DBG("%p name %s", cb, cb->name);
1086
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_add(&cb->list, &hci_cb_list);
1089 write_unlock_bh(&hci_cb_list_lock);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL(hci_register_cb);
1094
1095int hci_unregister_cb(struct hci_cb *cb)
1096{
1097 BT_DBG("%p name %s", cb, cb->name);
1098
1099 write_lock_bh(&hci_cb_list_lock);
1100 list_del(&cb->list);
1101 write_unlock_bh(&hci_cb_list_lock);
1102
1103 return 0;
1104}
1105EXPORT_SYMBOL(hci_unregister_cb);
1106
1107static int hci_send_frame(struct sk_buff *skb)
1108{
1109 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1110
1111 if (!hdev) {
1112 kfree_skb(skb);
1113 return -ENODEV;
1114 }
1115
0d48d939 1116 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1117
1118 if (atomic_read(&hdev->promisc)) {
1119 /* Time stamp */
a61bbcf2 1120 __net_timestamp(skb);
1da177e4
LT
1121
1122 hci_send_to_sock(hdev, skb);
1123 }
1124
1125 /* Get rid of skb owner, prior to sending to the driver. */
1126 skb_orphan(skb);
1127
1128 return hdev->send(skb);
1129}
1130
1131/* Send HCI command */
a9de9248 1132int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1133{
1134 int len = HCI_COMMAND_HDR_SIZE + plen;
1135 struct hci_command_hdr *hdr;
1136 struct sk_buff *skb;
1137
a9de9248 1138 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1139
1140 skb = bt_skb_alloc(len, GFP_ATOMIC);
1141 if (!skb) {
ef222013 1142 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1143 return -ENOMEM;
1144 }
1145
1146 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1147 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1148 hdr->plen = plen;
1149
1150 if (plen)
1151 memcpy(skb_put(skb, plen), param, plen);
1152
1153 BT_DBG("skb len %d", skb->len);
1154
0d48d939 1155 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4
LT
1156 skb->dev = (void *) hdev;
1157 skb_queue_tail(&hdev->cmd_q, skb);
1158 hci_sched_cmd(hdev);
1159
1160 return 0;
1161}
1da177e4
LT
1162
1163/* Get data from the previously sent command */
a9de9248 1164void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1165{
1166 struct hci_command_hdr *hdr;
1167
1168 if (!hdev->sent_cmd)
1169 return NULL;
1170
1171 hdr = (void *) hdev->sent_cmd->data;
1172
a9de9248 1173 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1174 return NULL;
1175
a9de9248 1176 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1177
1178 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1179}
1180
1181/* Send ACL data */
1182static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1183{
1184 struct hci_acl_hdr *hdr;
1185 int len = skb->len;
1186
badff6d0
ACM
1187 skb_push(skb, HCI_ACL_HDR_SIZE);
1188 skb_reset_transport_header(skb);
9c70220b 1189 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1190 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1191 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1192}
1193
1194int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1195{
1196 struct hci_dev *hdev = conn->hdev;
1197 struct sk_buff *list;
1198
1199 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1200
1201 skb->dev = (void *) hdev;
0d48d939 1202 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1203 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1204
1205 if (!(list = skb_shinfo(skb)->frag_list)) {
1206 /* Non fragmented */
1207 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1208
1209 skb_queue_tail(&conn->data_q, skb);
1210 } else {
1211 /* Fragmented */
1212 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1213
1214 skb_shinfo(skb)->frag_list = NULL;
1215
1216 /* Queue all fragments atomically */
1217 spin_lock_bh(&conn->data_q.lock);
1218
1219 __skb_queue_tail(&conn->data_q, skb);
1220 do {
1221 skb = list; list = list->next;
8e87d142 1222
1da177e4 1223 skb->dev = (void *) hdev;
0d48d939 1224 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1da177e4
LT
1225 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1226
1227 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1228
1229 __skb_queue_tail(&conn->data_q, skb);
1230 } while (list);
1231
1232 spin_unlock_bh(&conn->data_q.lock);
1233 }
1234
1235 hci_sched_tx(hdev);
1236 return 0;
1237}
1238EXPORT_SYMBOL(hci_send_acl);
1239
1240/* Send SCO data */
1241int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1242{
1243 struct hci_dev *hdev = conn->hdev;
1244 struct hci_sco_hdr hdr;
1245
1246 BT_DBG("%s len %d", hdev->name, skb->len);
1247
1248 if (skb->len > hdev->sco_mtu) {
1249 kfree_skb(skb);
1250 return -EINVAL;
1251 }
1252
aca3192c 1253 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
1254 hdr.dlen = skb->len;
1255
badff6d0
ACM
1256 skb_push(skb, HCI_SCO_HDR_SIZE);
1257 skb_reset_transport_header(skb);
9c70220b 1258 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
1259
1260 skb->dev = (void *) hdev;
0d48d939 1261 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1da177e4
LT
1262 skb_queue_tail(&conn->data_q, skb);
1263 hci_sched_tx(hdev);
1264 return 0;
1265}
1266EXPORT_SYMBOL(hci_send_sco);
1267
1268/* ---- HCI TX task (outgoing data) ---- */
1269
1270/* HCI Connection scheduler */
1271static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1272{
1273 struct hci_conn_hash *h = &hdev->conn_hash;
5b7f9909 1274 struct hci_conn *conn = NULL;
1da177e4
LT
1275 int num = 0, min = ~0;
1276 struct list_head *p;
1277
8e87d142 1278 /* We don't have to lock device here. Connections are always
1da177e4
LT
1279 * added and removed with TX task disabled. */
1280 list_for_each(p, &h->list) {
1281 struct hci_conn *c;
1282 c = list_entry(p, struct hci_conn, list);
1283
769be974 1284 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 1285 continue;
769be974
MH
1286
1287 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1288 continue;
1289
1da177e4
LT
1290 num++;
1291
1292 if (c->sent < min) {
1293 min = c->sent;
1294 conn = c;
1295 }
1296 }
1297
1298 if (conn) {
1299 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1300 int q = cnt / num;
1301 *quote = q ? q : 1;
1302 } else
1303 *quote = 0;
1304
1305 BT_DBG("conn %p quote %d", conn, *quote);
1306 return conn;
1307}
1308
1309static inline void hci_acl_tx_to(struct hci_dev *hdev)
1310{
1311 struct hci_conn_hash *h = &hdev->conn_hash;
1312 struct list_head *p;
1313 struct hci_conn *c;
1314
1315 BT_ERR("%s ACL tx timeout", hdev->name);
1316
1317 /* Kill stalled connections */
1318 list_for_each(p, &h->list) {
1319 c = list_entry(p, struct hci_conn, list);
1320 if (c->type == ACL_LINK && c->sent) {
1321 BT_ERR("%s killing stalled ACL connection %s",
1322 hdev->name, batostr(&c->dst));
1323 hci_acl_disconn(c, 0x13);
1324 }
1325 }
1326}
1327
1328static inline void hci_sched_acl(struct hci_dev *hdev)
1329{
1330 struct hci_conn *conn;
1331 struct sk_buff *skb;
1332 int quote;
1333
1334 BT_DBG("%s", hdev->name);
1335
1336 if (!test_bit(HCI_RAW, &hdev->flags)) {
1337 /* ACL tx timeout must be longer than maximum
1338 * link supervision timeout (40.9 seconds) */
82453021 1339 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1da177e4
LT
1340 hci_acl_tx_to(hdev);
1341 }
1342
1343 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1345 BT_DBG("skb %p len %d", skb, skb->len);
04837f64
MH
1346
1347 hci_conn_enter_active_mode(conn);
1348
1da177e4
LT
1349 hci_send_frame(skb);
1350 hdev->acl_last_tx = jiffies;
1351
1352 hdev->acl_cnt--;
1353 conn->sent++;
1354 }
1355 }
1356}
1357
1358/* Schedule SCO */
1359static inline void hci_sched_sco(struct hci_dev *hdev)
1360{
1361 struct hci_conn *conn;
1362 struct sk_buff *skb;
1363 int quote;
1364
1365 BT_DBG("%s", hdev->name);
1366
1367 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1368 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1369 BT_DBG("skb %p len %d", skb, skb->len);
1370 hci_send_frame(skb);
1371
1372 conn->sent++;
1373 if (conn->sent == ~0)
1374 conn->sent = 0;
1375 }
1376 }
1377}
1378
b6a0dc82
MH
1379static inline void hci_sched_esco(struct hci_dev *hdev)
1380{
1381 struct hci_conn *conn;
1382 struct sk_buff *skb;
1383 int quote;
1384
1385 BT_DBG("%s", hdev->name);
1386
1387 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1388 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1389 BT_DBG("skb %p len %d", skb, skb->len);
1390 hci_send_frame(skb);
1391
1392 conn->sent++;
1393 if (conn->sent == ~0)
1394 conn->sent = 0;
1395 }
1396 }
1397}
1398
1da177e4
LT
1399static void hci_tx_task(unsigned long arg)
1400{
1401 struct hci_dev *hdev = (struct hci_dev *) arg;
1402 struct sk_buff *skb;
1403
1404 read_lock(&hci_task_lock);
1405
1406 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1407
1408 /* Schedule queues and send stuff to HCI driver */
1409
1410 hci_sched_acl(hdev);
1411
1412 hci_sched_sco(hdev);
1413
b6a0dc82
MH
1414 hci_sched_esco(hdev);
1415
1da177e4
LT
1416 /* Send next queued raw (unknown type) packet */
1417 while ((skb = skb_dequeue(&hdev->raw_q)))
1418 hci_send_frame(skb);
1419
1420 read_unlock(&hci_task_lock);
1421}
1422
1423/* ----- HCI RX task (incoming data proccessing) ----- */
1424
1425/* ACL data packet */
1426static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1427{
1428 struct hci_acl_hdr *hdr = (void *) skb->data;
1429 struct hci_conn *conn;
1430 __u16 handle, flags;
1431
1432 skb_pull(skb, HCI_ACL_HDR_SIZE);
1433
1434 handle = __le16_to_cpu(hdr->handle);
1435 flags = hci_flags(handle);
1436 handle = hci_handle(handle);
1437
1438 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1439
1440 hdev->stat.acl_rx++;
1441
1442 hci_dev_lock(hdev);
1443 conn = hci_conn_hash_lookup_handle(hdev, handle);
1444 hci_dev_unlock(hdev);
8e87d142 1445
1da177e4
LT
1446 if (conn) {
1447 register struct hci_proto *hp;
1448
04837f64
MH
1449 hci_conn_enter_active_mode(conn);
1450
1da177e4
LT
1451 /* Send to upper protocol */
1452 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1453 hp->recv_acldata(conn, skb, flags);
1454 return;
1455 }
1456 } else {
8e87d142 1457 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
1458 hdev->name, handle);
1459 }
1460
1461 kfree_skb(skb);
1462}
1463
1464/* SCO data packet */
1465static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1466{
1467 struct hci_sco_hdr *hdr = (void *) skb->data;
1468 struct hci_conn *conn;
1469 __u16 handle;
1470
1471 skb_pull(skb, HCI_SCO_HDR_SIZE);
1472
1473 handle = __le16_to_cpu(hdr->handle);
1474
1475 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1476
1477 hdev->stat.sco_rx++;
1478
1479 hci_dev_lock(hdev);
1480 conn = hci_conn_hash_lookup_handle(hdev, handle);
1481 hci_dev_unlock(hdev);
1482
1483 if (conn) {
1484 register struct hci_proto *hp;
1485
1486 /* Send to upper protocol */
1487 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1488 hp->recv_scodata(conn, skb);
1489 return;
1490 }
1491 } else {
8e87d142 1492 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
1493 hdev->name, handle);
1494 }
1495
1496 kfree_skb(skb);
1497}
1498
6516455d 1499static void hci_rx_task(unsigned long arg)
1da177e4
LT
1500{
1501 struct hci_dev *hdev = (struct hci_dev *) arg;
1502 struct sk_buff *skb;
1503
1504 BT_DBG("%s", hdev->name);
1505
1506 read_lock(&hci_task_lock);
1507
1508 while ((skb = skb_dequeue(&hdev->rx_q))) {
1509 if (atomic_read(&hdev->promisc)) {
1510 /* Send copy to the sockets */
1511 hci_send_to_sock(hdev, skb);
1512 }
1513
1514 if (test_bit(HCI_RAW, &hdev->flags)) {
1515 kfree_skb(skb);
1516 continue;
1517 }
1518
1519 if (test_bit(HCI_INIT, &hdev->flags)) {
1520 /* Don't process data packets in this states. */
0d48d939 1521 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1522 case HCI_ACLDATA_PKT:
1523 case HCI_SCODATA_PKT:
1524 kfree_skb(skb);
1525 continue;
3ff50b79 1526 }
1da177e4
LT
1527 }
1528
1529 /* Process frame */
0d48d939 1530 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
1531 case HCI_EVENT_PKT:
1532 hci_event_packet(hdev, skb);
1533 break;
1534
1535 case HCI_ACLDATA_PKT:
1536 BT_DBG("%s ACL data packet", hdev->name);
1537 hci_acldata_packet(hdev, skb);
1538 break;
1539
1540 case HCI_SCODATA_PKT:
1541 BT_DBG("%s SCO data packet", hdev->name);
1542 hci_scodata_packet(hdev, skb);
1543 break;
1544
1545 default:
1546 kfree_skb(skb);
1547 break;
1548 }
1549 }
1550
1551 read_unlock(&hci_task_lock);
1552}
1553
1554static void hci_cmd_task(unsigned long arg)
1555{
1556 struct hci_dev *hdev = (struct hci_dev *) arg;
1557 struct sk_buff *skb;
1558
1559 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1560
82453021 1561 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1da177e4
LT
1562 BT_ERR("%s command tx timeout", hdev->name);
1563 atomic_set(&hdev->cmd_cnt, 1);
1564 }
1565
1566 /* Send queued commands */
1567 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
7585b97a 1568 kfree_skb(hdev->sent_cmd);
1da177e4
LT
1569
1570 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1571 atomic_dec(&hdev->cmd_cnt);
1572 hci_send_frame(skb);
1573 hdev->cmd_last_tx = jiffies;
1574 } else {
1575 skb_queue_head(&hdev->cmd_q, skb);
1576 hci_sched_cmd(hdev);
1577 }
1578 }
1579}