2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 #define AUTO_OFF_TIMEOUT 2000
55 static void hci_cmd_task(unsigned long arg
);
56 static void hci_rx_task(unsigned long arg
);
57 static void hci_tx_task(unsigned long arg
);
58 static void hci_notify(struct hci_dev
*hdev
, int event
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
100 /* If the request has set req_last_cmd (typical for multi-HCI
101 * command requests) check if the completed command matches
102 * this, and if not just return. Single HCI command requests
103 * typically leave req_last_cmd as 0 */
104 if (hdev
->req_last_cmd
&& cmd
!= hdev
->req_last_cmd
)
107 if (hdev
->req_status
== HCI_REQ_PEND
) {
108 hdev
->req_result
= result
;
109 hdev
->req_status
= HCI_REQ_DONE
;
110 wake_up_interruptible(&hdev
->req_wait_q
);
114 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
116 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
118 if (hdev
->req_status
== HCI_REQ_PEND
) {
119 hdev
->req_result
= err
;
120 hdev
->req_status
= HCI_REQ_CANCELED
;
121 wake_up_interruptible(&hdev
->req_wait_q
);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
127 unsigned long opt
, __u32 timeout
)
129 DECLARE_WAITQUEUE(wait
, current
);
132 BT_DBG("%s start", hdev
->name
);
134 hdev
->req_status
= HCI_REQ_PEND
;
136 add_wait_queue(&hdev
->req_wait_q
, &wait
);
137 set_current_state(TASK_INTERRUPTIBLE
);
140 schedule_timeout(timeout
);
142 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
144 if (signal_pending(current
))
147 switch (hdev
->req_status
) {
149 err
= -bt_err(hdev
->req_result
);
152 case HCI_REQ_CANCELED
:
153 err
= -hdev
->req_result
;
161 hdev
->req_last_cmd
= hdev
->req_status
= hdev
->req_result
= 0;
163 BT_DBG("%s end: err %d", hdev
->name
, err
);
168 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
169 unsigned long opt
, __u32 timeout
)
173 if (!test_bit(HCI_UP
, &hdev
->flags
))
176 /* Serialize all requests */
178 ret
= __hci_request(hdev
, req
, opt
, timeout
);
179 hci_req_unlock(hdev
);
184 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
186 BT_DBG("%s %ld", hdev
->name
, opt
);
189 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
192 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
198 BT_DBG("%s %ld", hdev
->name
, opt
);
200 /* Driver initialization */
202 /* Special commands */
203 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
204 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
205 skb
->dev
= (void *) hdev
;
207 skb_queue_tail(&hdev
->cmd_q
, skb
);
208 tasklet_schedule(&hdev
->cmd_task
);
210 skb_queue_purge(&hdev
->driver_init
);
212 /* Mandatory initialization */
215 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
))
216 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
218 /* Read Local Supported Features */
219 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
221 /* Read Local Version */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
228 /* Host buffer size */
230 struct hci_cp_host_buffer_size cp
;
231 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
232 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
233 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
234 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
235 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
239 /* Read BD Address */
240 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
242 /* Read Class of Device */
243 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
245 /* Read Local Name */
246 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
248 /* Read Voice Setting */
249 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
251 /* Optional initialization */
253 /* Clear Event Filters */
254 flt_type
= HCI_FLT_CLEAR_ALL
;
255 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
257 /* Page timeout ~20 secs */
258 param
= cpu_to_le16(0x8000);
259 hci_send_cmd(hdev
, HCI_OP_WRITE_PG_TIMEOUT
, 2, ¶m
);
261 /* Connection accept timeout ~20 secs */
262 param
= cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
265 hdev
->req_last_cmd
= HCI_OP_WRITE_CA_TIMEOUT
;
268 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
272 BT_DBG("%s %x", hdev
->name
, scan
);
274 /* Inquiry and Page scans */
275 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
278 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
282 BT_DBG("%s %x", hdev
->name
, auth
);
285 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
288 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
292 BT_DBG("%s %x", hdev
->name
, encrypt
);
295 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
298 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
300 __le16 policy
= cpu_to_le16(opt
);
302 BT_DBG("%s %x", hdev
->name
, policy
);
304 /* Default link policy */
305 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
308 /* Get HCI device by index.
309 * Device is held on return. */
310 struct hci_dev
*hci_dev_get(int index
)
312 struct hci_dev
*hdev
= NULL
;
320 read_lock(&hci_dev_list_lock
);
321 list_for_each(p
, &hci_dev_list
) {
322 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
323 if (d
->id
== index
) {
324 hdev
= hci_dev_hold(d
);
328 read_unlock(&hci_dev_list_lock
);
332 /* ---- Inquiry support ---- */
333 static void inquiry_cache_flush(struct hci_dev
*hdev
)
335 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
336 struct inquiry_entry
*next
= cache
->list
, *e
;
338 BT_DBG("cache %p", cache
);
347 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
349 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
350 struct inquiry_entry
*e
;
352 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
354 for (e
= cache
->list
; e
; e
= e
->next
)
355 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
360 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
362 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
363 struct inquiry_entry
*ie
;
365 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
367 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
369 /* Entry not in the cache. Add new one. */
370 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
374 ie
->next
= cache
->list
;
378 memcpy(&ie
->data
, data
, sizeof(*data
));
379 ie
->timestamp
= jiffies
;
380 cache
->timestamp
= jiffies
;
383 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
385 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
386 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
387 struct inquiry_entry
*e
;
390 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
391 struct inquiry_data
*data
= &e
->data
;
392 bacpy(&info
->bdaddr
, &data
->bdaddr
);
393 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
394 info
->pscan_period_mode
= data
->pscan_period_mode
;
395 info
->pscan_mode
= data
->pscan_mode
;
396 memcpy(info
->dev_class
, data
->dev_class
, 3);
397 info
->clock_offset
= data
->clock_offset
;
401 BT_DBG("cache %p, copied %d", cache
, copied
);
405 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
407 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
408 struct hci_cp_inquiry cp
;
410 BT_DBG("%s", hdev
->name
);
412 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
416 memcpy(&cp
.lap
, &ir
->lap
, 3);
417 cp
.length
= ir
->length
;
418 cp
.num_rsp
= ir
->num_rsp
;
419 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
422 int hci_inquiry(void __user
*arg
)
424 __u8 __user
*ptr
= arg
;
425 struct hci_inquiry_req ir
;
426 struct hci_dev
*hdev
;
427 int err
= 0, do_inquiry
= 0, max_rsp
;
431 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
434 if (!(hdev
= hci_dev_get(ir
.dev_id
)))
437 hci_dev_lock_bh(hdev
);
438 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
439 inquiry_cache_empty(hdev
) ||
440 ir
.flags
& IREQ_CACHE_FLUSH
) {
441 inquiry_cache_flush(hdev
);
444 hci_dev_unlock_bh(hdev
);
446 timeo
= ir
.length
* msecs_to_jiffies(2000);
449 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
460 buf
= kmalloc(sizeof(struct inquiry_info
) *max_rsp
, GFP_KERNEL
);
466 hci_dev_lock_bh(hdev
);
467 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
468 hci_dev_unlock_bh(hdev
);
470 BT_DBG("num_rsp %d", ir
.num_rsp
);
472 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
474 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
487 /* ---- HCI ioctl helpers ---- */
489 int hci_dev_open(__u16 dev
)
491 struct hci_dev
*hdev
;
494 if (!(hdev
= hci_dev_get(dev
)))
497 BT_DBG("%s %p", hdev
->name
, hdev
);
501 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
506 if (test_bit(HCI_UP
, &hdev
->flags
)) {
511 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
512 set_bit(HCI_RAW
, &hdev
->flags
);
514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev
->dev_type
!= HCI_BREDR
)
516 set_bit(HCI_RAW
, &hdev
->flags
);
518 if (hdev
->open(hdev
)) {
523 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
524 atomic_set(&hdev
->cmd_cnt
, 1);
525 set_bit(HCI_INIT
, &hdev
->flags
);
527 //__hci_request(hdev, hci_reset_req, 0, HZ);
528 ret
= __hci_request(hdev
, hci_init_req
, 0,
529 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
531 clear_bit(HCI_INIT
, &hdev
->flags
);
536 set_bit(HCI_UP
, &hdev
->flags
);
537 hci_notify(hdev
, HCI_DEV_UP
);
538 if (!test_bit(HCI_SETUP
, &hdev
->flags
))
539 mgmt_powered(hdev
->id
, 1);
541 /* Init failed, cleanup */
542 tasklet_kill(&hdev
->rx_task
);
543 tasklet_kill(&hdev
->tx_task
);
544 tasklet_kill(&hdev
->cmd_task
);
546 skb_queue_purge(&hdev
->cmd_q
);
547 skb_queue_purge(&hdev
->rx_q
);
552 if (hdev
->sent_cmd
) {
553 kfree_skb(hdev
->sent_cmd
);
554 hdev
->sent_cmd
= NULL
;
562 hci_req_unlock(hdev
);
567 static int hci_dev_do_close(struct hci_dev
*hdev
)
569 BT_DBG("%s %p", hdev
->name
, hdev
);
571 hci_req_cancel(hdev
, ENODEV
);
574 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
575 hci_req_unlock(hdev
);
579 /* Kill RX and TX tasks */
580 tasklet_kill(&hdev
->rx_task
);
581 tasklet_kill(&hdev
->tx_task
);
583 hci_dev_lock_bh(hdev
);
584 inquiry_cache_flush(hdev
);
585 hci_conn_hash_flush(hdev
);
586 hci_dev_unlock_bh(hdev
);
588 hci_notify(hdev
, HCI_DEV_DOWN
);
594 skb_queue_purge(&hdev
->cmd_q
);
595 atomic_set(&hdev
->cmd_cnt
, 1);
596 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
597 set_bit(HCI_INIT
, &hdev
->flags
);
598 __hci_request(hdev
, hci_reset_req
, 0,
599 msecs_to_jiffies(250));
600 clear_bit(HCI_INIT
, &hdev
->flags
);
604 tasklet_kill(&hdev
->cmd_task
);
607 skb_queue_purge(&hdev
->rx_q
);
608 skb_queue_purge(&hdev
->cmd_q
);
609 skb_queue_purge(&hdev
->raw_q
);
611 /* Drop last sent command */
612 if (hdev
->sent_cmd
) {
613 kfree_skb(hdev
->sent_cmd
);
614 hdev
->sent_cmd
= NULL
;
617 /* After this point our queues are empty
618 * and no tasks are scheduled. */
621 mgmt_powered(hdev
->id
, 0);
626 hci_req_unlock(hdev
);
632 int hci_dev_close(__u16 dev
)
634 struct hci_dev
*hdev
;
637 hdev
= hci_dev_get(dev
);
640 err
= hci_dev_do_close(hdev
);
645 int hci_dev_reset(__u16 dev
)
647 struct hci_dev
*hdev
;
650 hdev
= hci_dev_get(dev
);
655 tasklet_disable(&hdev
->tx_task
);
657 if (!test_bit(HCI_UP
, &hdev
->flags
))
661 skb_queue_purge(&hdev
->rx_q
);
662 skb_queue_purge(&hdev
->cmd_q
);
664 hci_dev_lock_bh(hdev
);
665 inquiry_cache_flush(hdev
);
666 hci_conn_hash_flush(hdev
);
667 hci_dev_unlock_bh(hdev
);
672 atomic_set(&hdev
->cmd_cnt
, 1);
673 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0;
675 if (!test_bit(HCI_RAW
, &hdev
->flags
))
676 ret
= __hci_request(hdev
, hci_reset_req
, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
680 tasklet_enable(&hdev
->tx_task
);
681 hci_req_unlock(hdev
);
686 int hci_dev_reset_stat(__u16 dev
)
688 struct hci_dev
*hdev
;
691 hdev
= hci_dev_get(dev
);
695 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
702 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
704 struct hci_dev
*hdev
;
705 struct hci_dev_req dr
;
708 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
711 hdev
= hci_dev_get(dr
.dev_id
);
717 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
718 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
722 if (!lmp_encrypt_capable(hdev
)) {
727 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
728 /* Auth must be enabled first */
729 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
730 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
735 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
740 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
741 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
745 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
750 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
751 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
755 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
759 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
760 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
764 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
765 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
777 int hci_get_dev_list(void __user
*arg
)
779 struct hci_dev_list_req
*dl
;
780 struct hci_dev_req
*dr
;
782 int n
= 0, size
, err
;
785 if (get_user(dev_num
, (__u16 __user
*) arg
))
788 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
791 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
793 dl
= kzalloc(size
, GFP_KERNEL
);
799 read_lock_bh(&hci_dev_list_lock
);
800 list_for_each(p
, &hci_dev_list
) {
801 struct hci_dev
*hdev
;
803 hdev
= list_entry(p
, struct hci_dev
, list
);
805 hci_del_off_timer(hdev
);
807 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
808 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
810 (dr
+ n
)->dev_id
= hdev
->id
;
811 (dr
+ n
)->dev_opt
= hdev
->flags
;
816 read_unlock_bh(&hci_dev_list_lock
);
819 size
= sizeof(*dl
) + n
* sizeof(*dr
);
821 err
= copy_to_user(arg
, dl
, size
);
824 return err
? -EFAULT
: 0;
827 int hci_get_dev_info(void __user
*arg
)
829 struct hci_dev
*hdev
;
830 struct hci_dev_info di
;
833 if (copy_from_user(&di
, arg
, sizeof(di
)))
836 hdev
= hci_dev_get(di
.dev_id
);
840 hci_del_off_timer(hdev
);
842 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
843 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
845 strcpy(di
.name
, hdev
->name
);
846 di
.bdaddr
= hdev
->bdaddr
;
847 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
848 di
.flags
= hdev
->flags
;
849 di
.pkt_type
= hdev
->pkt_type
;
850 di
.acl_mtu
= hdev
->acl_mtu
;
851 di
.acl_pkts
= hdev
->acl_pkts
;
852 di
.sco_mtu
= hdev
->sco_mtu
;
853 di
.sco_pkts
= hdev
->sco_pkts
;
854 di
.link_policy
= hdev
->link_policy
;
855 di
.link_mode
= hdev
->link_mode
;
857 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
858 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
860 if (copy_to_user(arg
, &di
, sizeof(di
)))
868 /* ---- Interface to HCI drivers ---- */
870 static int hci_rfkill_set_block(void *data
, bool blocked
)
872 struct hci_dev
*hdev
= data
;
874 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
879 hci_dev_do_close(hdev
);
884 static const struct rfkill_ops hci_rfkill_ops
= {
885 .set_block
= hci_rfkill_set_block
,
888 /* Alloc HCI device */
889 struct hci_dev
*hci_alloc_dev(void)
891 struct hci_dev
*hdev
;
893 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
897 skb_queue_head_init(&hdev
->driver_init
);
901 EXPORT_SYMBOL(hci_alloc_dev
);
903 /* Free HCI device */
904 void hci_free_dev(struct hci_dev
*hdev
)
906 skb_queue_purge(&hdev
->driver_init
);
908 /* will free via device release */
909 put_device(&hdev
->dev
);
911 EXPORT_SYMBOL(hci_free_dev
);
913 static void hci_power_on(struct work_struct
*work
)
915 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
917 BT_DBG("%s", hdev
->name
);
919 if (hci_dev_open(hdev
->id
) < 0)
922 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
923 mod_timer(&hdev
->off_timer
,
924 jiffies
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
926 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
927 mgmt_index_added(hdev
->id
);
930 static void hci_power_off(struct work_struct
*work
)
932 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_off
);
934 BT_DBG("%s", hdev
->name
);
936 hci_dev_close(hdev
->id
);
939 static void hci_auto_off(unsigned long data
)
941 struct hci_dev
*hdev
= (struct hci_dev
*) data
;
943 BT_DBG("%s", hdev
->name
);
945 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
947 queue_work(hdev
->workqueue
, &hdev
->power_off
);
950 void hci_del_off_timer(struct hci_dev
*hdev
)
952 BT_DBG("%s", hdev
->name
);
954 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
955 del_timer(&hdev
->off_timer
);
958 /* Register HCI device */
959 int hci_register_dev(struct hci_dev
*hdev
)
961 struct list_head
*head
= &hci_dev_list
, *p
;
964 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
965 hdev
->bus
, hdev
->owner
);
967 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
970 write_lock_bh(&hci_dev_list_lock
);
972 /* Find first available device id */
973 list_for_each(p
, &hci_dev_list
) {
974 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
979 sprintf(hdev
->name
, "hci%d", id
);
981 list_add(&hdev
->list
, head
);
983 atomic_set(&hdev
->refcnt
, 1);
984 spin_lock_init(&hdev
->lock
);
987 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
988 hdev
->esco_type
= (ESCO_HV1
);
989 hdev
->link_mode
= (HCI_LM_ACCEPT
);
991 hdev
->idle_timeout
= 0;
992 hdev
->sniff_max_interval
= 800;
993 hdev
->sniff_min_interval
= 80;
995 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
996 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
997 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
999 skb_queue_head_init(&hdev
->rx_q
);
1000 skb_queue_head_init(&hdev
->cmd_q
);
1001 skb_queue_head_init(&hdev
->raw_q
);
1003 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1004 hdev
->reassembly
[i
] = NULL
;
1006 init_waitqueue_head(&hdev
->req_wait_q
);
1007 mutex_init(&hdev
->req_lock
);
1009 inquiry_cache_init(hdev
);
1011 hci_conn_hash_init(hdev
);
1013 INIT_LIST_HEAD(&hdev
->blacklist
);
1015 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1016 INIT_WORK(&hdev
->power_off
, hci_power_off
);
1017 setup_timer(&hdev
->off_timer
, hci_auto_off
, (unsigned long) hdev
);
1019 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1021 atomic_set(&hdev
->promisc
, 0);
1023 write_unlock_bh(&hci_dev_list_lock
);
1025 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1026 if (!hdev
->workqueue
)
1029 hci_register_sysfs(hdev
);
1031 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1032 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1034 if (rfkill_register(hdev
->rfkill
) < 0) {
1035 rfkill_destroy(hdev
->rfkill
);
1036 hdev
->rfkill
= NULL
;
1040 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1041 set_bit(HCI_SETUP
, &hdev
->flags
);
1042 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1044 hci_notify(hdev
, HCI_DEV_REG
);
1049 write_lock_bh(&hci_dev_list_lock
);
1050 list_del(&hdev
->list
);
1051 write_unlock_bh(&hci_dev_list_lock
);
1055 EXPORT_SYMBOL(hci_register_dev
);
1057 /* Unregister HCI device */
1058 int hci_unregister_dev(struct hci_dev
*hdev
)
1062 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1064 write_lock_bh(&hci_dev_list_lock
);
1065 list_del(&hdev
->list
);
1066 write_unlock_bh(&hci_dev_list_lock
);
1068 hci_dev_do_close(hdev
);
1070 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1071 kfree_skb(hdev
->reassembly
[i
]);
1073 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1074 !test_bit(HCI_SETUP
, &hdev
->flags
))
1075 mgmt_index_removed(hdev
->id
);
1077 hci_notify(hdev
, HCI_DEV_UNREG
);
1080 rfkill_unregister(hdev
->rfkill
);
1081 rfkill_destroy(hdev
->rfkill
);
1084 hci_unregister_sysfs(hdev
);
1086 destroy_workqueue(hdev
->workqueue
);
1088 hci_dev_lock_bh(hdev
);
1089 hci_blacklist_clear(hdev
);
1090 hci_dev_unlock_bh(hdev
);
1092 __hci_dev_put(hdev
);
1096 EXPORT_SYMBOL(hci_unregister_dev
);
1098 /* Suspend HCI device */
1099 int hci_suspend_dev(struct hci_dev
*hdev
)
1101 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1104 EXPORT_SYMBOL(hci_suspend_dev
);
1106 /* Resume HCI device */
1107 int hci_resume_dev(struct hci_dev
*hdev
)
1109 hci_notify(hdev
, HCI_DEV_RESUME
);
1112 EXPORT_SYMBOL(hci_resume_dev
);
1114 /* Receive frame from HCI drivers */
1115 int hci_recv_frame(struct sk_buff
*skb
)
1117 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1118 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1119 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1125 bt_cb(skb
)->incoming
= 1;
1128 __net_timestamp(skb
);
1130 /* Queue frame for rx task */
1131 skb_queue_tail(&hdev
->rx_q
, skb
);
1132 tasklet_schedule(&hdev
->rx_task
);
1136 EXPORT_SYMBOL(hci_recv_frame
);
1138 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1139 int count
, __u8 index
, gfp_t gfp_mask
)
1144 struct sk_buff
*skb
;
1145 struct bt_skb_cb
*scb
;
1147 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1148 index
>= NUM_REASSEMBLY
)
1151 skb
= hdev
->reassembly
[index
];
1155 case HCI_ACLDATA_PKT
:
1156 len
= HCI_MAX_FRAME_SIZE
;
1157 hlen
= HCI_ACL_HDR_SIZE
;
1160 len
= HCI_MAX_EVENT_SIZE
;
1161 hlen
= HCI_EVENT_HDR_SIZE
;
1163 case HCI_SCODATA_PKT
:
1164 len
= HCI_MAX_SCO_SIZE
;
1165 hlen
= HCI_SCO_HDR_SIZE
;
1169 skb
= bt_skb_alloc(len
, gfp_mask
);
1173 scb
= (void *) skb
->cb
;
1175 scb
->pkt_type
= type
;
1177 skb
->dev
= (void *) hdev
;
1178 hdev
->reassembly
[index
] = skb
;
1182 scb
= (void *) skb
->cb
;
1183 len
= min(scb
->expect
, (__u16
)count
);
1185 memcpy(skb_put(skb
, len
), data
, len
);
1194 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1195 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1196 scb
->expect
= h
->plen
;
1198 if (skb_tailroom(skb
) < scb
->expect
) {
1200 hdev
->reassembly
[index
] = NULL
;
1206 case HCI_ACLDATA_PKT
:
1207 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1208 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1209 scb
->expect
= __le16_to_cpu(h
->dlen
);
1211 if (skb_tailroom(skb
) < scb
->expect
) {
1213 hdev
->reassembly
[index
] = NULL
;
1219 case HCI_SCODATA_PKT
:
1220 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1221 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1222 scb
->expect
= h
->dlen
;
1224 if (skb_tailroom(skb
) < scb
->expect
) {
1226 hdev
->reassembly
[index
] = NULL
;
1233 if (scb
->expect
== 0) {
1234 /* Complete frame */
1236 bt_cb(skb
)->pkt_type
= type
;
1237 hci_recv_frame(skb
);
1239 hdev
->reassembly
[index
] = NULL
;
1247 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1251 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1255 rem
= hci_reassembly(hdev
, type
, data
, count
,
1256 type
- 1, GFP_ATOMIC
);
1260 data
+= (count
- rem
);
1266 EXPORT_SYMBOL(hci_recv_fragment
);
1268 #define STREAM_REASSEMBLY 0
1270 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1276 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1279 struct { char type
; } *pkt
;
1281 /* Start of the frame */
1288 type
= bt_cb(skb
)->pkt_type
;
1290 rem
= hci_reassembly(hdev
, type
, data
,
1291 count
, STREAM_REASSEMBLY
, GFP_ATOMIC
);
1295 data
+= (count
- rem
);
1301 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1303 /* ---- Interface to upper protocols ---- */
1305 /* Register/Unregister protocols.
1306 * hci_task_lock is used to ensure that no tasks are running. */
1307 int hci_register_proto(struct hci_proto
*hp
)
1311 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1313 if (hp
->id
>= HCI_MAX_PROTO
)
1316 write_lock_bh(&hci_task_lock
);
1318 if (!hci_proto
[hp
->id
])
1319 hci_proto
[hp
->id
] = hp
;
1323 write_unlock_bh(&hci_task_lock
);
1327 EXPORT_SYMBOL(hci_register_proto
);
1329 int hci_unregister_proto(struct hci_proto
*hp
)
1333 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1335 if (hp
->id
>= HCI_MAX_PROTO
)
1338 write_lock_bh(&hci_task_lock
);
1340 if (hci_proto
[hp
->id
])
1341 hci_proto
[hp
->id
] = NULL
;
1345 write_unlock_bh(&hci_task_lock
);
1349 EXPORT_SYMBOL(hci_unregister_proto
);
1351 int hci_register_cb(struct hci_cb
*cb
)
1353 BT_DBG("%p name %s", cb
, cb
->name
);
1355 write_lock_bh(&hci_cb_list_lock
);
1356 list_add(&cb
->list
, &hci_cb_list
);
1357 write_unlock_bh(&hci_cb_list_lock
);
1361 EXPORT_SYMBOL(hci_register_cb
);
1363 int hci_unregister_cb(struct hci_cb
*cb
)
1365 BT_DBG("%p name %s", cb
, cb
->name
);
1367 write_lock_bh(&hci_cb_list_lock
);
1368 list_del(&cb
->list
);
1369 write_unlock_bh(&hci_cb_list_lock
);
1373 EXPORT_SYMBOL(hci_unregister_cb
);
1375 static int hci_send_frame(struct sk_buff
*skb
)
1377 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1384 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1386 if (atomic_read(&hdev
->promisc
)) {
1388 __net_timestamp(skb
);
1390 hci_send_to_sock(hdev
, skb
, NULL
);
1393 /* Get rid of skb owner, prior to sending to the driver. */
1396 return hdev
->send(skb
);
1399 /* Send HCI command */
1400 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1402 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1403 struct hci_command_hdr
*hdr
;
1404 struct sk_buff
*skb
;
1406 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1408 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1410 BT_ERR("%s no memory for command", hdev
->name
);
1414 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1415 hdr
->opcode
= cpu_to_le16(opcode
);
1419 memcpy(skb_put(skb
, plen
), param
, plen
);
1421 BT_DBG("skb len %d", skb
->len
);
1423 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1424 skb
->dev
= (void *) hdev
;
1426 skb_queue_tail(&hdev
->cmd_q
, skb
);
1427 tasklet_schedule(&hdev
->cmd_task
);
1432 /* Get data from the previously sent command */
1433 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1435 struct hci_command_hdr
*hdr
;
1437 if (!hdev
->sent_cmd
)
1440 hdr
= (void *) hdev
->sent_cmd
->data
;
1442 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1445 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1447 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1451 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1453 struct hci_acl_hdr
*hdr
;
1456 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1457 skb_reset_transport_header(skb
);
1458 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1459 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1460 hdr
->dlen
= cpu_to_le16(len
);
1463 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1465 struct hci_dev
*hdev
= conn
->hdev
;
1466 struct sk_buff
*list
;
1468 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1470 skb
->dev
= (void *) hdev
;
1471 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1472 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1474 list
= skb_shinfo(skb
)->frag_list
;
1476 /* Non fragmented */
1477 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1479 skb_queue_tail(&conn
->data_q
, skb
);
1482 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1484 skb_shinfo(skb
)->frag_list
= NULL
;
1486 /* Queue all fragments atomically */
1487 spin_lock_bh(&conn
->data_q
.lock
);
1489 __skb_queue_tail(&conn
->data_q
, skb
);
1491 flags
&= ~ACL_START
;
1494 skb
= list
; list
= list
->next
;
1496 skb
->dev
= (void *) hdev
;
1497 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1498 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1500 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1502 __skb_queue_tail(&conn
->data_q
, skb
);
1505 spin_unlock_bh(&conn
->data_q
.lock
);
1508 tasklet_schedule(&hdev
->tx_task
);
1510 EXPORT_SYMBOL(hci_send_acl
);
1513 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1515 struct hci_dev
*hdev
= conn
->hdev
;
1516 struct hci_sco_hdr hdr
;
1518 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1520 hdr
.handle
= cpu_to_le16(conn
->handle
);
1521 hdr
.dlen
= skb
->len
;
1523 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1524 skb_reset_transport_header(skb
);
1525 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1527 skb
->dev
= (void *) hdev
;
1528 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1530 skb_queue_tail(&conn
->data_q
, skb
);
1531 tasklet_schedule(&hdev
->tx_task
);
1533 EXPORT_SYMBOL(hci_send_sco
);
1535 /* ---- HCI TX task (outgoing data) ---- */
1537 /* HCI Connection scheduler */
1538 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1540 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1541 struct hci_conn
*conn
= NULL
;
1542 int num
= 0, min
= ~0;
1543 struct list_head
*p
;
1545 /* We don't have to lock device here. Connections are always
1546 * added and removed with TX task disabled. */
1547 list_for_each(p
, &h
->list
) {
1549 c
= list_entry(p
, struct hci_conn
, list
);
1551 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1554 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1559 if (c
->sent
< min
) {
1566 int cnt
= (type
== ACL_LINK
? hdev
->acl_cnt
: hdev
->sco_cnt
);
1572 BT_DBG("conn %p quote %d", conn
, *quote
);
1576 static inline void hci_acl_tx_to(struct hci_dev
*hdev
)
1578 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1579 struct list_head
*p
;
1582 BT_ERR("%s ACL tx timeout", hdev
->name
);
1584 /* Kill stalled connections */
1585 list_for_each(p
, &h
->list
) {
1586 c
= list_entry(p
, struct hci_conn
, list
);
1587 if (c
->type
== ACL_LINK
&& c
->sent
) {
1588 BT_ERR("%s killing stalled ACL connection %s",
1589 hdev
->name
, batostr(&c
->dst
));
1590 hci_acl_disconn(c
, 0x13);
1595 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1597 struct hci_conn
*conn
;
1598 struct sk_buff
*skb
;
1601 BT_DBG("%s", hdev
->name
);
1603 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1604 /* ACL tx timeout must be longer than maximum
1605 * link supervision timeout (40.9 seconds) */
1606 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1607 hci_acl_tx_to(hdev
);
1610 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1611 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1612 BT_DBG("skb %p len %d", skb
, skb
->len
);
1614 hci_conn_enter_active_mode(conn
);
1616 hci_send_frame(skb
);
1617 hdev
->acl_last_tx
= jiffies
;
1626 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1628 struct hci_conn
*conn
;
1629 struct sk_buff
*skb
;
1632 BT_DBG("%s", hdev
->name
);
1634 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1635 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1636 BT_DBG("skb %p len %d", skb
, skb
->len
);
1637 hci_send_frame(skb
);
1640 if (conn
->sent
== ~0)
1646 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1648 struct hci_conn
*conn
;
1649 struct sk_buff
*skb
;
1652 BT_DBG("%s", hdev
->name
);
1654 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1655 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1656 BT_DBG("skb %p len %d", skb
, skb
->len
);
1657 hci_send_frame(skb
);
1660 if (conn
->sent
== ~0)
1666 static void hci_tx_task(unsigned long arg
)
1668 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1669 struct sk_buff
*skb
;
1671 read_lock(&hci_task_lock
);
1673 BT_DBG("%s acl %d sco %d", hdev
->name
, hdev
->acl_cnt
, hdev
->sco_cnt
);
1675 /* Schedule queues and send stuff to HCI driver */
1677 hci_sched_acl(hdev
);
1679 hci_sched_sco(hdev
);
1681 hci_sched_esco(hdev
);
1683 /* Send next queued raw (unknown type) packet */
1684 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
1685 hci_send_frame(skb
);
1687 read_unlock(&hci_task_lock
);
1690 /* ----- HCI RX task (incoming data proccessing) ----- */
1692 /* ACL data packet */
1693 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1695 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
1696 struct hci_conn
*conn
;
1697 __u16 handle
, flags
;
1699 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
1701 handle
= __le16_to_cpu(hdr
->handle
);
1702 flags
= hci_flags(handle
);
1703 handle
= hci_handle(handle
);
1705 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
1707 hdev
->stat
.acl_rx
++;
1710 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1711 hci_dev_unlock(hdev
);
1714 register struct hci_proto
*hp
;
1716 hci_conn_enter_active_mode(conn
);
1718 /* Send to upper protocol */
1719 hp
= hci_proto
[HCI_PROTO_L2CAP
];
1720 if (hp
&& hp
->recv_acldata
) {
1721 hp
->recv_acldata(conn
, skb
, flags
);
1725 BT_ERR("%s ACL packet for unknown connection handle %d",
1726 hdev
->name
, handle
);
1732 /* SCO data packet */
1733 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1735 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
1736 struct hci_conn
*conn
;
1739 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
1741 handle
= __le16_to_cpu(hdr
->handle
);
1743 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
1745 hdev
->stat
.sco_rx
++;
1748 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
1749 hci_dev_unlock(hdev
);
1752 register struct hci_proto
*hp
;
1754 /* Send to upper protocol */
1755 hp
= hci_proto
[HCI_PROTO_SCO
];
1756 if (hp
&& hp
->recv_scodata
) {
1757 hp
->recv_scodata(conn
, skb
);
1761 BT_ERR("%s SCO packet for unknown connection handle %d",
1762 hdev
->name
, handle
);
1768 static void hci_rx_task(unsigned long arg
)
1770 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1771 struct sk_buff
*skb
;
1773 BT_DBG("%s", hdev
->name
);
1775 read_lock(&hci_task_lock
);
1777 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
1778 if (atomic_read(&hdev
->promisc
)) {
1779 /* Send copy to the sockets */
1780 hci_send_to_sock(hdev
, skb
, NULL
);
1783 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
1788 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
1789 /* Don't process data packets in this states. */
1790 switch (bt_cb(skb
)->pkt_type
) {
1791 case HCI_ACLDATA_PKT
:
1792 case HCI_SCODATA_PKT
:
1799 switch (bt_cb(skb
)->pkt_type
) {
1801 hci_event_packet(hdev
, skb
);
1804 case HCI_ACLDATA_PKT
:
1805 BT_DBG("%s ACL data packet", hdev
->name
);
1806 hci_acldata_packet(hdev
, skb
);
1809 case HCI_SCODATA_PKT
:
1810 BT_DBG("%s SCO data packet", hdev
->name
);
1811 hci_scodata_packet(hdev
, skb
);
1820 read_unlock(&hci_task_lock
);
1823 static void hci_cmd_task(unsigned long arg
)
1825 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1826 struct sk_buff
*skb
;
1828 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
1830 if (!atomic_read(&hdev
->cmd_cnt
) && time_after(jiffies
, hdev
->cmd_last_tx
+ HZ
)) {
1831 BT_ERR("%s command tx timeout", hdev
->name
);
1832 atomic_set(&hdev
->cmd_cnt
, 1);
1835 /* Send queued commands */
1836 if (atomic_read(&hdev
->cmd_cnt
) && (skb
= skb_dequeue(&hdev
->cmd_q
))) {
1837 kfree_skb(hdev
->sent_cmd
);
1839 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
1840 if (hdev
->sent_cmd
) {
1841 atomic_dec(&hdev
->cmd_cnt
);
1842 hci_send_frame(skb
);
1843 hdev
->cmd_last_tx
= jiffies
;
1845 skb_queue_head(&hdev
->cmd_q
, skb
);
1846 tasklet_schedule(&hdev
->cmd_task
);