2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct
*work
);
38 static void hci_cmd_work(struct work_struct
*work
);
39 static void hci_tx_work(struct work_struct
*work
);
42 LIST_HEAD(hci_dev_list
);
43 DEFINE_RWLOCK(hci_dev_list_lock
);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list
);
47 DEFINE_RWLOCK(hci_cb_list_lock
);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida
);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev
*hdev
, int event
)
56 hci_sock_dev_event(hdev
, event
);
59 /* ---- HCI debugfs entries ---- */
61 static int features_show(struct seq_file
*f
, void *ptr
)
63 struct hci_dev
*hdev
= f
->private;
67 for (p
= 0; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
68 seq_printf(f
, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p
,
70 hdev
->features
[p
][0], hdev
->features
[p
][1],
71 hdev
->features
[p
][2], hdev
->features
[p
][3],
72 hdev
->features
[p
][4], hdev
->features
[p
][5],
73 hdev
->features
[p
][6], hdev
->features
[p
][7]);
75 if (lmp_le_capable(hdev
))
76 seq_printf(f
, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78 hdev
->le_features
[0], hdev
->le_features
[1],
79 hdev
->le_features
[2], hdev
->le_features
[3],
80 hdev
->le_features
[4], hdev
->le_features
[5],
81 hdev
->le_features
[6], hdev
->le_features
[7]);
87 static int features_open(struct inode
*inode
, struct file
*file
)
89 return single_open(file
, features_show
, inode
->i_private
);
92 static const struct file_operations features_fops
= {
93 .open
= features_open
,
96 .release
= single_release
,
99 static int blacklist_show(struct seq_file
*f
, void *p
)
101 struct hci_dev
*hdev
= f
->private;
102 struct bdaddr_list
*b
;
105 list_for_each_entry(b
, &hdev
->blacklist
, list
)
106 seq_printf(f
, "%pMR (type %u)\n", &b
->bdaddr
, b
->bdaddr_type
);
107 hci_dev_unlock(hdev
);
112 static int blacklist_open(struct inode
*inode
, struct file
*file
)
114 return single_open(file
, blacklist_show
, inode
->i_private
);
117 static const struct file_operations blacklist_fops
= {
118 .open
= blacklist_open
,
121 .release
= single_release
,
124 static int uuids_show(struct seq_file
*f
, void *p
)
126 struct hci_dev
*hdev
= f
->private;
127 struct bt_uuid
*uuid
;
130 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
132 u16 data1
, data2
, data3
, data4
;
134 data5
= get_unaligned_le32(uuid
);
135 data4
= get_unaligned_le16(uuid
+ 4);
136 data3
= get_unaligned_le16(uuid
+ 6);
137 data2
= get_unaligned_le16(uuid
+ 8);
138 data1
= get_unaligned_le16(uuid
+ 10);
139 data0
= get_unaligned_le32(uuid
+ 12);
141 seq_printf(f
, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142 data0
, data1
, data2
, data3
, data4
, data5
);
144 hci_dev_unlock(hdev
);
149 static int uuids_open(struct inode
*inode
, struct file
*file
)
151 return single_open(file
, uuids_show
, inode
->i_private
);
154 static const struct file_operations uuids_fops
= {
158 .release
= single_release
,
161 static int inquiry_cache_show(struct seq_file
*f
, void *p
)
163 struct hci_dev
*hdev
= f
->private;
164 struct discovery_state
*cache
= &hdev
->discovery
;
165 struct inquiry_entry
*e
;
169 list_for_each_entry(e
, &cache
->all
, all
) {
170 struct inquiry_data
*data
= &e
->data
;
171 seq_printf(f
, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
173 data
->pscan_rep_mode
, data
->pscan_period_mode
,
174 data
->pscan_mode
, data
->dev_class
[2],
175 data
->dev_class
[1], data
->dev_class
[0],
176 __le16_to_cpu(data
->clock_offset
),
177 data
->rssi
, data
->ssp_mode
, e
->timestamp
);
180 hci_dev_unlock(hdev
);
185 static int inquiry_cache_open(struct inode
*inode
, struct file
*file
)
187 return single_open(file
, inquiry_cache_show
, inode
->i_private
);
190 static const struct file_operations inquiry_cache_fops
= {
191 .open
= inquiry_cache_open
,
194 .release
= single_release
,
197 static int link_keys_show(struct seq_file
*f
, void *ptr
)
199 struct hci_dev
*hdev
= f
->private;
200 struct list_head
*p
, *n
;
203 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
204 struct link_key
*key
= list_entry(p
, struct link_key
, list
);
205 seq_printf(f
, "%pMR %u %*phN %u\n", &key
->bdaddr
, key
->type
,
206 HCI_LINK_KEY_SIZE
, key
->val
, key
->pin_len
);
208 hci_dev_unlock(hdev
);
213 static int link_keys_open(struct inode
*inode
, struct file
*file
)
215 return single_open(file
, link_keys_show
, inode
->i_private
);
218 static const struct file_operations link_keys_fops
= {
219 .open
= link_keys_open
,
222 .release
= single_release
,
225 static ssize_t
use_debug_keys_read(struct file
*file
, char __user
*user_buf
,
226 size_t count
, loff_t
*ppos
)
228 struct hci_dev
*hdev
= file
->private_data
;
231 buf
[0] = test_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
) ? 'Y': 'N';
234 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
237 static const struct file_operations use_debug_keys_fops
= {
239 .read
= use_debug_keys_read
,
240 .llseek
= default_llseek
,
243 static int dev_class_show(struct seq_file
*f
, void *ptr
)
245 struct hci_dev
*hdev
= f
->private;
248 seq_printf(f
, "0x%.2x%.2x%.2x\n", hdev
->dev_class
[2],
249 hdev
->dev_class
[1], hdev
->dev_class
[0]);
250 hci_dev_unlock(hdev
);
255 static int dev_class_open(struct inode
*inode
, struct file
*file
)
257 return single_open(file
, dev_class_show
, inode
->i_private
);
260 static const struct file_operations dev_class_fops
= {
261 .open
= dev_class_open
,
264 .release
= single_release
,
267 static int voice_setting_get(void *data
, u64
*val
)
269 struct hci_dev
*hdev
= data
;
272 *val
= hdev
->voice_setting
;
273 hci_dev_unlock(hdev
);
278 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops
, voice_setting_get
,
279 NULL
, "0x%4.4llx\n");
281 static int auto_accept_delay_set(void *data
, u64 val
)
283 struct hci_dev
*hdev
= data
;
286 hdev
->auto_accept_delay
= val
;
287 hci_dev_unlock(hdev
);
292 static int auto_accept_delay_get(void *data
, u64
*val
)
294 struct hci_dev
*hdev
= data
;
297 *val
= hdev
->auto_accept_delay
;
298 hci_dev_unlock(hdev
);
303 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops
, auto_accept_delay_get
,
304 auto_accept_delay_set
, "%llu\n");
306 static int ssp_debug_mode_set(void *data
, u64 val
)
308 struct hci_dev
*hdev
= data
;
313 if (val
!= 0 && val
!= 1)
316 if (!test_bit(HCI_UP
, &hdev
->flags
))
321 skb
= __hci_cmd_sync(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
, sizeof(mode
),
322 &mode
, HCI_CMD_TIMEOUT
);
323 hci_req_unlock(hdev
);
328 err
= -bt_to_errno(skb
->data
[0]);
335 hdev
->ssp_debug_mode
= val
;
336 hci_dev_unlock(hdev
);
341 static int ssp_debug_mode_get(void *data
, u64
*val
)
343 struct hci_dev
*hdev
= data
;
346 *val
= hdev
->ssp_debug_mode
;
347 hci_dev_unlock(hdev
);
352 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops
, ssp_debug_mode_get
,
353 ssp_debug_mode_set
, "%llu\n");
355 static int idle_timeout_set(void *data
, u64 val
)
357 struct hci_dev
*hdev
= data
;
359 if (val
!= 0 && (val
< 500 || val
> 3600000))
363 hdev
->idle_timeout
= val
;
364 hci_dev_unlock(hdev
);
369 static int idle_timeout_get(void *data
, u64
*val
)
371 struct hci_dev
*hdev
= data
;
374 *val
= hdev
->idle_timeout
;
375 hci_dev_unlock(hdev
);
380 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops
, idle_timeout_get
,
381 idle_timeout_set
, "%llu\n");
383 static int sniff_min_interval_set(void *data
, u64 val
)
385 struct hci_dev
*hdev
= data
;
387 if (val
== 0 || val
% 2 || val
> hdev
->sniff_max_interval
)
391 hdev
->sniff_min_interval
= val
;
392 hci_dev_unlock(hdev
);
397 static int sniff_min_interval_get(void *data
, u64
*val
)
399 struct hci_dev
*hdev
= data
;
402 *val
= hdev
->sniff_min_interval
;
403 hci_dev_unlock(hdev
);
408 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops
, sniff_min_interval_get
,
409 sniff_min_interval_set
, "%llu\n");
411 static int sniff_max_interval_set(void *data
, u64 val
)
413 struct hci_dev
*hdev
= data
;
415 if (val
== 0 || val
% 2 || val
< hdev
->sniff_min_interval
)
419 hdev
->sniff_max_interval
= val
;
420 hci_dev_unlock(hdev
);
425 static int sniff_max_interval_get(void *data
, u64
*val
)
427 struct hci_dev
*hdev
= data
;
430 *val
= hdev
->sniff_max_interval
;
431 hci_dev_unlock(hdev
);
436 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops
, sniff_max_interval_get
,
437 sniff_max_interval_set
, "%llu\n");
439 static int static_address_show(struct seq_file
*f
, void *p
)
441 struct hci_dev
*hdev
= f
->private;
444 seq_printf(f
, "%pMR\n", &hdev
->static_addr
);
445 hci_dev_unlock(hdev
);
450 static int static_address_open(struct inode
*inode
, struct file
*file
)
452 return single_open(file
, static_address_show
, inode
->i_private
);
455 static const struct file_operations static_address_fops
= {
456 .open
= static_address_open
,
459 .release
= single_release
,
462 static int own_address_type_set(void *data
, u64 val
)
464 struct hci_dev
*hdev
= data
;
466 if (val
!= 0 && val
!= 1)
470 hdev
->own_addr_type
= val
;
471 hci_dev_unlock(hdev
);
476 static int own_address_type_get(void *data
, u64
*val
)
478 struct hci_dev
*hdev
= data
;
481 *val
= hdev
->own_addr_type
;
482 hci_dev_unlock(hdev
);
487 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops
, own_address_type_get
,
488 own_address_type_set
, "%llu\n");
490 static int long_term_keys_show(struct seq_file
*f
, void *ptr
)
492 struct hci_dev
*hdev
= f
->private;
493 struct list_head
*p
, *n
;
496 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
497 struct smp_ltk
*ltk
= list_entry(p
, struct smp_ltk
, list
);
498 seq_printf(f
, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
499 <k
->bdaddr
, ltk
->bdaddr_type
, ltk
->authenticated
,
500 ltk
->type
, ltk
->enc_size
, __le16_to_cpu(ltk
->ediv
),
501 8, ltk
->rand
, 16, ltk
->val
);
503 hci_dev_unlock(hdev
);
508 static int long_term_keys_open(struct inode
*inode
, struct file
*file
)
510 return single_open(file
, long_term_keys_show
, inode
->i_private
);
513 static const struct file_operations long_term_keys_fops
= {
514 .open
= long_term_keys_open
,
517 .release
= single_release
,
520 static int conn_min_interval_set(void *data
, u64 val
)
522 struct hci_dev
*hdev
= data
;
524 if (val
< 0x0006 || val
> 0x0c80 || val
> hdev
->le_conn_max_interval
)
528 hdev
->le_conn_min_interval
= val
;
529 hci_dev_unlock(hdev
);
534 static int conn_min_interval_get(void *data
, u64
*val
)
536 struct hci_dev
*hdev
= data
;
539 *val
= hdev
->le_conn_min_interval
;
540 hci_dev_unlock(hdev
);
545 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops
, conn_min_interval_get
,
546 conn_min_interval_set
, "%llu\n");
548 static int conn_max_interval_set(void *data
, u64 val
)
550 struct hci_dev
*hdev
= data
;
552 if (val
< 0x0006 || val
> 0x0c80 || val
< hdev
->le_conn_min_interval
)
556 hdev
->le_conn_max_interval
= val
;
557 hci_dev_unlock(hdev
);
562 static int conn_max_interval_get(void *data
, u64
*val
)
564 struct hci_dev
*hdev
= data
;
567 *val
= hdev
->le_conn_max_interval
;
568 hci_dev_unlock(hdev
);
573 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops
, conn_max_interval_get
,
574 conn_max_interval_set
, "%llu\n");
576 /* ---- HCI requests ---- */
578 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
580 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
582 if (hdev
->req_status
== HCI_REQ_PEND
) {
583 hdev
->req_result
= result
;
584 hdev
->req_status
= HCI_REQ_DONE
;
585 wake_up_interruptible(&hdev
->req_wait_q
);
589 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
591 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
593 if (hdev
->req_status
== HCI_REQ_PEND
) {
594 hdev
->req_result
= err
;
595 hdev
->req_status
= HCI_REQ_CANCELED
;
596 wake_up_interruptible(&hdev
->req_wait_q
);
600 static struct sk_buff
*hci_get_cmd_complete(struct hci_dev
*hdev
, u16 opcode
,
603 struct hci_ev_cmd_complete
*ev
;
604 struct hci_event_hdr
*hdr
;
609 skb
= hdev
->recv_evt
;
610 hdev
->recv_evt
= NULL
;
612 hci_dev_unlock(hdev
);
615 return ERR_PTR(-ENODATA
);
617 if (skb
->len
< sizeof(*hdr
)) {
618 BT_ERR("Too short HCI event");
622 hdr
= (void *) skb
->data
;
623 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
626 if (hdr
->evt
!= event
)
631 if (hdr
->evt
!= HCI_EV_CMD_COMPLETE
) {
632 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr
->evt
);
636 if (skb
->len
< sizeof(*ev
)) {
637 BT_ERR("Too short cmd_complete event");
641 ev
= (void *) skb
->data
;
642 skb_pull(skb
, sizeof(*ev
));
644 if (opcode
== __le16_to_cpu(ev
->opcode
))
647 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode
,
648 __le16_to_cpu(ev
->opcode
));
652 return ERR_PTR(-ENODATA
);
655 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
656 const void *param
, u8 event
, u32 timeout
)
658 DECLARE_WAITQUEUE(wait
, current
);
659 struct hci_request req
;
662 BT_DBG("%s", hdev
->name
);
664 hci_req_init(&req
, hdev
);
666 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
668 hdev
->req_status
= HCI_REQ_PEND
;
670 err
= hci_req_run(&req
, hci_req_sync_complete
);
674 add_wait_queue(&hdev
->req_wait_q
, &wait
);
675 set_current_state(TASK_INTERRUPTIBLE
);
677 schedule_timeout(timeout
);
679 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
681 if (signal_pending(current
))
682 return ERR_PTR(-EINTR
);
684 switch (hdev
->req_status
) {
686 err
= -bt_to_errno(hdev
->req_result
);
689 case HCI_REQ_CANCELED
:
690 err
= -hdev
->req_result
;
698 hdev
->req_status
= hdev
->req_result
= 0;
700 BT_DBG("%s end: err %d", hdev
->name
, err
);
705 return hci_get_cmd_complete(hdev
, opcode
, event
);
707 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
709 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
710 const void *param
, u32 timeout
)
712 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
714 EXPORT_SYMBOL(__hci_cmd_sync
);
716 /* Execute request and wait for completion. */
717 static int __hci_req_sync(struct hci_dev
*hdev
,
718 void (*func
)(struct hci_request
*req
,
720 unsigned long opt
, __u32 timeout
)
722 struct hci_request req
;
723 DECLARE_WAITQUEUE(wait
, current
);
726 BT_DBG("%s start", hdev
->name
);
728 hci_req_init(&req
, hdev
);
730 hdev
->req_status
= HCI_REQ_PEND
;
734 err
= hci_req_run(&req
, hci_req_sync_complete
);
736 hdev
->req_status
= 0;
738 /* ENODATA means the HCI request command queue is empty.
739 * This can happen when a request with conditionals doesn't
740 * trigger any commands to be sent. This is normal behavior
741 * and should not trigger an error return.
749 add_wait_queue(&hdev
->req_wait_q
, &wait
);
750 set_current_state(TASK_INTERRUPTIBLE
);
752 schedule_timeout(timeout
);
754 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
756 if (signal_pending(current
))
759 switch (hdev
->req_status
) {
761 err
= -bt_to_errno(hdev
->req_result
);
764 case HCI_REQ_CANCELED
:
765 err
= -hdev
->req_result
;
773 hdev
->req_status
= hdev
->req_result
= 0;
775 BT_DBG("%s end: err %d", hdev
->name
, err
);
780 static int hci_req_sync(struct hci_dev
*hdev
,
781 void (*req
)(struct hci_request
*req
,
783 unsigned long opt
, __u32 timeout
)
787 if (!test_bit(HCI_UP
, &hdev
->flags
))
790 /* Serialize all requests */
792 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
793 hci_req_unlock(hdev
);
798 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
800 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
803 set_bit(HCI_RESET
, &req
->hdev
->flags
);
804 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
807 static void bredr_init(struct hci_request
*req
)
809 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
811 /* Read Local Supported Features */
812 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
814 /* Read Local Version */
815 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
817 /* Read BD Address */
818 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
821 static void amp_init(struct hci_request
*req
)
823 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
825 /* Read Local Version */
826 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
828 /* Read Local Supported Commands */
829 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
831 /* Read Local Supported Features */
832 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
834 /* Read Local AMP Info */
835 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
837 /* Read Data Blk size */
838 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
840 /* Read Flow Control Mode */
841 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
843 /* Read Location Data */
844 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
847 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
849 struct hci_dev
*hdev
= req
->hdev
;
851 BT_DBG("%s %ld", hdev
->name
, opt
);
854 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
855 hci_reset_req(req
, 0);
857 switch (hdev
->dev_type
) {
867 BT_ERR("Unknown device type %d", hdev
->dev_type
);
872 static void bredr_setup(struct hci_request
*req
)
874 struct hci_dev
*hdev
= req
->hdev
;
879 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
880 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
882 /* Read Class of Device */
883 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
885 /* Read Local Name */
886 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
888 /* Read Voice Setting */
889 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
891 /* Read Number of Supported IAC */
892 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
894 /* Read Current IAC LAP */
895 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
897 /* Clear Event Filters */
898 flt_type
= HCI_FLT_CLEAR_ALL
;
899 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
901 /* Connection accept timeout ~20 secs */
902 param
= __constant_cpu_to_le16(0x7d00);
903 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
905 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
906 * but it does not support page scan related HCI commands.
908 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
) {
909 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
910 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
914 static void le_setup(struct hci_request
*req
)
916 struct hci_dev
*hdev
= req
->hdev
;
918 /* Read LE Buffer Size */
919 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
921 /* Read LE Local Supported Features */
922 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
924 /* Read LE Advertising Channel TX Power */
925 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
927 /* Read LE White List Size */
928 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
930 /* Read LE Supported States */
931 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
933 /* LE-only controllers have LE implicitly enabled */
934 if (!lmp_bredr_capable(hdev
))
935 set_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
938 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
940 if (lmp_ext_inq_capable(hdev
))
943 if (lmp_inq_rssi_capable(hdev
))
946 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
947 hdev
->lmp_subver
== 0x0757)
950 if (hdev
->manufacturer
== 15) {
951 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
953 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
955 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
959 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
960 hdev
->lmp_subver
== 0x1805)
966 static void hci_setup_inquiry_mode(struct hci_request
*req
)
970 mode
= hci_get_inquiry_mode(req
->hdev
);
972 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
975 static void hci_setup_event_mask(struct hci_request
*req
)
977 struct hci_dev
*hdev
= req
->hdev
;
979 /* The second byte is 0xff instead of 0x9f (two reserved bits
980 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
983 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
985 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
986 * any event mask for pre 1.2 devices.
988 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
991 if (lmp_bredr_capable(hdev
)) {
992 events
[4] |= 0x01; /* Flow Specification Complete */
993 events
[4] |= 0x02; /* Inquiry Result with RSSI */
994 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
995 events
[5] |= 0x08; /* Synchronous Connection Complete */
996 events
[5] |= 0x10; /* Synchronous Connection Changed */
998 /* Use a different default for LE-only devices */
999 memset(events
, 0, sizeof(events
));
1000 events
[0] |= 0x10; /* Disconnection Complete */
1001 events
[0] |= 0x80; /* Encryption Change */
1002 events
[1] |= 0x08; /* Read Remote Version Information Complete */
1003 events
[1] |= 0x20; /* Command Complete */
1004 events
[1] |= 0x40; /* Command Status */
1005 events
[1] |= 0x80; /* Hardware Error */
1006 events
[2] |= 0x04; /* Number of Completed Packets */
1007 events
[3] |= 0x02; /* Data Buffer Overflow */
1008 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1011 if (lmp_inq_rssi_capable(hdev
))
1012 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1014 if (lmp_sniffsubr_capable(hdev
))
1015 events
[5] |= 0x20; /* Sniff Subrating */
1017 if (lmp_pause_enc_capable(hdev
))
1018 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1020 if (lmp_ext_inq_capable(hdev
))
1021 events
[5] |= 0x40; /* Extended Inquiry Result */
1023 if (lmp_no_flush_capable(hdev
))
1024 events
[7] |= 0x01; /* Enhanced Flush Complete */
1026 if (lmp_lsto_capable(hdev
))
1027 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
1029 if (lmp_ssp_capable(hdev
)) {
1030 events
[6] |= 0x01; /* IO Capability Request */
1031 events
[6] |= 0x02; /* IO Capability Response */
1032 events
[6] |= 0x04; /* User Confirmation Request */
1033 events
[6] |= 0x08; /* User Passkey Request */
1034 events
[6] |= 0x10; /* Remote OOB Data Request */
1035 events
[6] |= 0x20; /* Simple Pairing Complete */
1036 events
[7] |= 0x04; /* User Passkey Notification */
1037 events
[7] |= 0x08; /* Keypress Notification */
1038 events
[7] |= 0x10; /* Remote Host Supported
1039 * Features Notification
1043 if (lmp_le_capable(hdev
))
1044 events
[7] |= 0x20; /* LE Meta-Event */
1046 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
1048 if (lmp_le_capable(hdev
)) {
1049 memset(events
, 0, sizeof(events
));
1051 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
,
1052 sizeof(events
), events
);
1056 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
1058 struct hci_dev
*hdev
= req
->hdev
;
1060 if (lmp_bredr_capable(hdev
))
1063 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
1065 if (lmp_le_capable(hdev
))
1068 hci_setup_event_mask(req
);
1070 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1071 * local supported commands HCI command.
1073 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
1074 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
1076 if (lmp_ssp_capable(hdev
)) {
1077 /* When SSP is available, then the host features page
1078 * should also be available as well. However some
1079 * controllers list the max_page as 0 as long as SSP
1080 * has not been enabled. To achieve proper debugging
1081 * output, force the minimum max_page to 1 at least.
1083 hdev
->max_page
= 0x01;
1085 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1087 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
1088 sizeof(mode
), &mode
);
1090 struct hci_cp_write_eir cp
;
1092 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1093 memset(&cp
, 0, sizeof(cp
));
1095 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1099 if (lmp_inq_rssi_capable(hdev
))
1100 hci_setup_inquiry_mode(req
);
1102 if (lmp_inq_tx_pwr_capable(hdev
))
1103 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
1105 if (lmp_ext_feat_capable(hdev
)) {
1106 struct hci_cp_read_local_ext_features cp
;
1109 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1113 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
1115 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
1120 static void hci_setup_link_policy(struct hci_request
*req
)
1122 struct hci_dev
*hdev
= req
->hdev
;
1123 struct hci_cp_write_def_link_policy cp
;
1124 u16 link_policy
= 0;
1126 if (lmp_rswitch_capable(hdev
))
1127 link_policy
|= HCI_LP_RSWITCH
;
1128 if (lmp_hold_capable(hdev
))
1129 link_policy
|= HCI_LP_HOLD
;
1130 if (lmp_sniff_capable(hdev
))
1131 link_policy
|= HCI_LP_SNIFF
;
1132 if (lmp_park_capable(hdev
))
1133 link_policy
|= HCI_LP_PARK
;
1135 cp
.policy
= cpu_to_le16(link_policy
);
1136 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
1139 static void hci_set_le_support(struct hci_request
*req
)
1141 struct hci_dev
*hdev
= req
->hdev
;
1142 struct hci_cp_write_le_host_supported cp
;
1144 /* LE-only devices do not support explicit enablement */
1145 if (!lmp_bredr_capable(hdev
))
1148 memset(&cp
, 0, sizeof(cp
));
1150 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1152 cp
.simul
= lmp_le_br_capable(hdev
);
1155 if (cp
.le
!= lmp_host_le_capable(hdev
))
1156 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
1160 static void hci_set_event_mask_page_2(struct hci_request
*req
)
1162 struct hci_dev
*hdev
= req
->hdev
;
1163 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1165 /* If Connectionless Slave Broadcast master role is supported
1166 * enable all necessary events for it.
1168 if (hdev
->features
[2][0] & 0x01) {
1169 events
[1] |= 0x40; /* Triggered Clock Capture */
1170 events
[1] |= 0x80; /* Synchronization Train Complete */
1171 events
[2] |= 0x10; /* Slave Page Response Timeout */
1172 events
[2] |= 0x20; /* CSB Channel Map Change */
1175 /* If Connectionless Slave Broadcast slave role is supported
1176 * enable all necessary events for it.
1178 if (hdev
->features
[2][0] & 0x02) {
1179 events
[2] |= 0x01; /* Synchronization Train Received */
1180 events
[2] |= 0x02; /* CSB Receive */
1181 events
[2] |= 0x04; /* CSB Timeout */
1182 events
[2] |= 0x08; /* Truncated Page Complete */
1185 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
1188 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
1190 struct hci_dev
*hdev
= req
->hdev
;
1193 /* Some Broadcom based Bluetooth controllers do not support the
1194 * Delete Stored Link Key command. They are clearly indicating its
1195 * absence in the bit mask of supported commands.
1197 * Check the supported commands and only if the the command is marked
1198 * as supported send it. If not supported assume that the controller
1199 * does not have actual support for stored link keys which makes this
1200 * command redundant anyway.
1202 if (hdev
->commands
[6] & 0x80) {
1203 struct hci_cp_delete_stored_link_key cp
;
1205 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
1206 cp
.delete_all
= 0x01;
1207 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
1211 if (hdev
->commands
[5] & 0x10)
1212 hci_setup_link_policy(req
);
1214 if (lmp_le_capable(hdev
)) {
1215 /* If the controller has a public BD_ADDR, then by
1216 * default use that one. If this is a LE only
1217 * controller without one, default to the random
1220 if (bacmp(&hdev
->bdaddr
, BDADDR_ANY
))
1221 hdev
->own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1223 hdev
->own_addr_type
= ADDR_LE_DEV_RANDOM
;
1225 hci_set_le_support(req
);
1228 /* Read features beyond page 1 if available */
1229 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
1230 struct hci_cp_read_local_ext_features cp
;
1233 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1238 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
1240 struct hci_dev
*hdev
= req
->hdev
;
1242 /* Set event mask page 2 if the HCI command for it is supported */
1243 if (hdev
->commands
[22] & 0x04)
1244 hci_set_event_mask_page_2(req
);
1246 /* Check for Synchronization Train support */
1247 if (hdev
->features
[2][0] & 0x04)
1248 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
1251 static int __hci_init(struct hci_dev
*hdev
)
1255 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
1259 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1260 * BR/EDR/LE type controllers. AMP controllers only need the
1263 if (hdev
->dev_type
!= HCI_BREDR
)
1266 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
1270 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
1274 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
1278 /* Only create debugfs entries during the initial setup
1279 * phase and not every time the controller gets powered on.
1281 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1284 debugfs_create_file("features", 0444, hdev
->debugfs
, hdev
,
1286 debugfs_create_u16("manufacturer", 0444, hdev
->debugfs
,
1287 &hdev
->manufacturer
);
1288 debugfs_create_u8("hci_version", 0444, hdev
->debugfs
, &hdev
->hci_ver
);
1289 debugfs_create_u16("hci_revision", 0444, hdev
->debugfs
, &hdev
->hci_rev
);
1290 debugfs_create_file("blacklist", 0444, hdev
->debugfs
, hdev
,
1292 debugfs_create_file("uuids", 0444, hdev
->debugfs
, hdev
, &uuids_fops
);
1294 if (lmp_bredr_capable(hdev
)) {
1295 debugfs_create_file("inquiry_cache", 0444, hdev
->debugfs
,
1296 hdev
, &inquiry_cache_fops
);
1297 debugfs_create_file("link_keys", 0400, hdev
->debugfs
,
1298 hdev
, &link_keys_fops
);
1299 debugfs_create_file("use_debug_keys", 0444, hdev
->debugfs
,
1300 hdev
, &use_debug_keys_fops
);
1301 debugfs_create_file("dev_class", 0444, hdev
->debugfs
,
1302 hdev
, &dev_class_fops
);
1303 debugfs_create_file("voice_setting", 0444, hdev
->debugfs
,
1304 hdev
, &voice_setting_fops
);
1307 if (lmp_ssp_capable(hdev
)) {
1308 debugfs_create_file("auto_accept_delay", 0644, hdev
->debugfs
,
1309 hdev
, &auto_accept_delay_fops
);
1310 debugfs_create_file("ssp_debug_mode", 0644, hdev
->debugfs
,
1311 hdev
, &ssp_debug_mode_fops
);
1314 if (lmp_sniff_capable(hdev
)) {
1315 debugfs_create_file("idle_timeout", 0644, hdev
->debugfs
,
1316 hdev
, &idle_timeout_fops
);
1317 debugfs_create_file("sniff_min_interval", 0644, hdev
->debugfs
,
1318 hdev
, &sniff_min_interval_fops
);
1319 debugfs_create_file("sniff_max_interval", 0644, hdev
->debugfs
,
1320 hdev
, &sniff_max_interval_fops
);
1323 if (lmp_le_capable(hdev
)) {
1324 debugfs_create_u8("white_list_size", 0444, hdev
->debugfs
,
1325 &hdev
->le_white_list_size
);
1326 debugfs_create_file("static_address", 0444, hdev
->debugfs
,
1327 hdev
, &static_address_fops
);
1328 debugfs_create_file("own_address_type", 0644, hdev
->debugfs
,
1329 hdev
, &own_address_type_fops
);
1330 debugfs_create_file("long_term_keys", 0400, hdev
->debugfs
,
1331 hdev
, &long_term_keys_fops
);
1332 debugfs_create_file("conn_min_interval", 0644, hdev
->debugfs
,
1333 hdev
, &conn_min_interval_fops
);
1334 debugfs_create_file("conn_max_interval", 0644, hdev
->debugfs
,
1335 hdev
, &conn_max_interval_fops
);
1341 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
1345 BT_DBG("%s %x", req
->hdev
->name
, scan
);
1347 /* Inquiry and Page scans */
1348 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1351 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
1355 BT_DBG("%s %x", req
->hdev
->name
, auth
);
1357 /* Authentication */
1358 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
1361 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
1365 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
1368 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
1371 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
1373 __le16 policy
= cpu_to_le16(opt
);
1375 BT_DBG("%s %x", req
->hdev
->name
, policy
);
1377 /* Default link policy */
1378 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1381 /* Get HCI device by index.
1382 * Device is held on return. */
1383 struct hci_dev
*hci_dev_get(int index
)
1385 struct hci_dev
*hdev
= NULL
, *d
;
1387 BT_DBG("%d", index
);
1392 read_lock(&hci_dev_list_lock
);
1393 list_for_each_entry(d
, &hci_dev_list
, list
) {
1394 if (d
->id
== index
) {
1395 hdev
= hci_dev_hold(d
);
1399 read_unlock(&hci_dev_list_lock
);
1403 /* ---- Inquiry support ---- */
1405 bool hci_discovery_active(struct hci_dev
*hdev
)
1407 struct discovery_state
*discov
= &hdev
->discovery
;
1409 switch (discov
->state
) {
1410 case DISCOVERY_FINDING
:
1411 case DISCOVERY_RESOLVING
:
1419 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1421 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1423 if (hdev
->discovery
.state
== state
)
1427 case DISCOVERY_STOPPED
:
1428 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
1429 mgmt_discovering(hdev
, 0);
1431 case DISCOVERY_STARTING
:
1433 case DISCOVERY_FINDING
:
1434 mgmt_discovering(hdev
, 1);
1436 case DISCOVERY_RESOLVING
:
1438 case DISCOVERY_STOPPING
:
1442 hdev
->discovery
.state
= state
;
1445 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1447 struct discovery_state
*cache
= &hdev
->discovery
;
1448 struct inquiry_entry
*p
, *n
;
1450 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1455 INIT_LIST_HEAD(&cache
->unknown
);
1456 INIT_LIST_HEAD(&cache
->resolve
);
1459 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1462 struct discovery_state
*cache
= &hdev
->discovery
;
1463 struct inquiry_entry
*e
;
1465 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1467 list_for_each_entry(e
, &cache
->all
, all
) {
1468 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1475 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1478 struct discovery_state
*cache
= &hdev
->discovery
;
1479 struct inquiry_entry
*e
;
1481 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1483 list_for_each_entry(e
, &cache
->unknown
, list
) {
1484 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1491 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1495 struct discovery_state
*cache
= &hdev
->discovery
;
1496 struct inquiry_entry
*e
;
1498 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1500 list_for_each_entry(e
, &cache
->resolve
, list
) {
1501 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1503 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1510 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1511 struct inquiry_entry
*ie
)
1513 struct discovery_state
*cache
= &hdev
->discovery
;
1514 struct list_head
*pos
= &cache
->resolve
;
1515 struct inquiry_entry
*p
;
1517 list_del(&ie
->list
);
1519 list_for_each_entry(p
, &cache
->resolve
, list
) {
1520 if (p
->name_state
!= NAME_PENDING
&&
1521 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1526 list_add(&ie
->list
, pos
);
1529 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1530 bool name_known
, bool *ssp
)
1532 struct discovery_state
*cache
= &hdev
->discovery
;
1533 struct inquiry_entry
*ie
;
1535 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1537 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
1540 *ssp
= data
->ssp_mode
;
1542 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1544 if (ie
->data
.ssp_mode
&& ssp
)
1547 if (ie
->name_state
== NAME_NEEDED
&&
1548 data
->rssi
!= ie
->data
.rssi
) {
1549 ie
->data
.rssi
= data
->rssi
;
1550 hci_inquiry_cache_update_resolve(hdev
, ie
);
1556 /* Entry not in the cache. Add new one. */
1557 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
1561 list_add(&ie
->all
, &cache
->all
);
1564 ie
->name_state
= NAME_KNOWN
;
1566 ie
->name_state
= NAME_NOT_KNOWN
;
1567 list_add(&ie
->list
, &cache
->unknown
);
1571 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1572 ie
->name_state
!= NAME_PENDING
) {
1573 ie
->name_state
= NAME_KNOWN
;
1574 list_del(&ie
->list
);
1577 memcpy(&ie
->data
, data
, sizeof(*data
));
1578 ie
->timestamp
= jiffies
;
1579 cache
->timestamp
= jiffies
;
1581 if (ie
->name_state
== NAME_NOT_KNOWN
)
1587 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1589 struct discovery_state
*cache
= &hdev
->discovery
;
1590 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1591 struct inquiry_entry
*e
;
1594 list_for_each_entry(e
, &cache
->all
, all
) {
1595 struct inquiry_data
*data
= &e
->data
;
1600 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1601 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1602 info
->pscan_period_mode
= data
->pscan_period_mode
;
1603 info
->pscan_mode
= data
->pscan_mode
;
1604 memcpy(info
->dev_class
, data
->dev_class
, 3);
1605 info
->clock_offset
= data
->clock_offset
;
1611 BT_DBG("cache %p, copied %d", cache
, copied
);
1615 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1617 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1618 struct hci_dev
*hdev
= req
->hdev
;
1619 struct hci_cp_inquiry cp
;
1621 BT_DBG("%s", hdev
->name
);
1623 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1627 memcpy(&cp
.lap
, &ir
->lap
, 3);
1628 cp
.length
= ir
->length
;
1629 cp
.num_rsp
= ir
->num_rsp
;
1630 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1633 static int wait_inquiry(void *word
)
1636 return signal_pending(current
);
1639 int hci_inquiry(void __user
*arg
)
1641 __u8 __user
*ptr
= arg
;
1642 struct hci_inquiry_req ir
;
1643 struct hci_dev
*hdev
;
1644 int err
= 0, do_inquiry
= 0, max_rsp
;
1648 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1651 hdev
= hci_dev_get(ir
.dev_id
);
1655 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
1660 if (hdev
->dev_type
!= HCI_BREDR
) {
1665 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1671 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1672 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1673 hci_inquiry_cache_flush(hdev
);
1676 hci_dev_unlock(hdev
);
1678 timeo
= ir
.length
* msecs_to_jiffies(2000);
1681 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1686 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1687 * cleared). If it is interrupted by a signal, return -EINTR.
1689 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
, wait_inquiry
,
1690 TASK_INTERRUPTIBLE
))
1694 /* for unlimited number of responses we will use buffer with
1697 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1699 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1700 * copy it to the user space.
1702 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1709 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1710 hci_dev_unlock(hdev
);
1712 BT_DBG("num_rsp %d", ir
.num_rsp
);
1714 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1716 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1729 static int hci_dev_do_open(struct hci_dev
*hdev
)
1733 BT_DBG("%s %p", hdev
->name
, hdev
);
1737 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
1742 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1743 /* Check for rfkill but allow the HCI setup stage to
1744 * proceed (which in itself doesn't cause any RF activity).
1746 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
)) {
1751 /* Check for valid public address or a configured static
1752 * random adddress, but let the HCI setup proceed to
1753 * be able to determine if there is a public address
1756 * This check is only valid for BR/EDR controllers
1757 * since AMP controllers do not have an address.
1759 if (hdev
->dev_type
== HCI_BREDR
&&
1760 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1761 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1762 ret
= -EADDRNOTAVAIL
;
1767 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1772 if (hdev
->open(hdev
)) {
1777 atomic_set(&hdev
->cmd_cnt
, 1);
1778 set_bit(HCI_INIT
, &hdev
->flags
);
1780 if (hdev
->setup
&& test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1781 ret
= hdev
->setup(hdev
);
1784 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
1785 set_bit(HCI_RAW
, &hdev
->flags
);
1787 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
1788 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
1789 ret
= __hci_init(hdev
);
1792 clear_bit(HCI_INIT
, &hdev
->flags
);
1796 set_bit(HCI_UP
, &hdev
->flags
);
1797 hci_notify(hdev
, HCI_DEV_UP
);
1798 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
1799 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
1800 hdev
->dev_type
== HCI_BREDR
) {
1802 mgmt_powered(hdev
, 1);
1803 hci_dev_unlock(hdev
);
1806 /* Init failed, cleanup */
1807 flush_work(&hdev
->tx_work
);
1808 flush_work(&hdev
->cmd_work
);
1809 flush_work(&hdev
->rx_work
);
1811 skb_queue_purge(&hdev
->cmd_q
);
1812 skb_queue_purge(&hdev
->rx_q
);
1817 if (hdev
->sent_cmd
) {
1818 kfree_skb(hdev
->sent_cmd
);
1819 hdev
->sent_cmd
= NULL
;
1827 hci_req_unlock(hdev
);
1831 /* ---- HCI ioctl helpers ---- */
1833 int hci_dev_open(__u16 dev
)
1835 struct hci_dev
*hdev
;
1838 hdev
= hci_dev_get(dev
);
1842 /* We need to ensure that no other power on/off work is pending
1843 * before proceeding to call hci_dev_do_open. This is
1844 * particularly important if the setup procedure has not yet
1847 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1848 cancel_delayed_work(&hdev
->power_off
);
1850 /* After this call it is guaranteed that the setup procedure
1851 * has finished. This means that error conditions like RFKILL
1852 * or no valid public or static random address apply.
1854 flush_workqueue(hdev
->req_workqueue
);
1856 err
= hci_dev_do_open(hdev
);
1863 static int hci_dev_do_close(struct hci_dev
*hdev
)
1865 BT_DBG("%s %p", hdev
->name
, hdev
);
1867 cancel_delayed_work(&hdev
->power_off
);
1869 hci_req_cancel(hdev
, ENODEV
);
1872 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1873 del_timer_sync(&hdev
->cmd_timer
);
1874 hci_req_unlock(hdev
);
1878 /* Flush RX and TX works */
1879 flush_work(&hdev
->tx_work
);
1880 flush_work(&hdev
->rx_work
);
1882 if (hdev
->discov_timeout
> 0) {
1883 cancel_delayed_work(&hdev
->discov_off
);
1884 hdev
->discov_timeout
= 0;
1885 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1886 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1889 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1890 cancel_delayed_work(&hdev
->service_cache
);
1892 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1895 hci_inquiry_cache_flush(hdev
);
1896 hci_conn_hash_flush(hdev
);
1897 hci_dev_unlock(hdev
);
1899 hci_notify(hdev
, HCI_DEV_DOWN
);
1905 skb_queue_purge(&hdev
->cmd_q
);
1906 atomic_set(&hdev
->cmd_cnt
, 1);
1907 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
1908 !test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
1909 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
1910 set_bit(HCI_INIT
, &hdev
->flags
);
1911 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1912 clear_bit(HCI_INIT
, &hdev
->flags
);
1915 /* flush cmd work */
1916 flush_work(&hdev
->cmd_work
);
1919 skb_queue_purge(&hdev
->rx_q
);
1920 skb_queue_purge(&hdev
->cmd_q
);
1921 skb_queue_purge(&hdev
->raw_q
);
1923 /* Drop last sent command */
1924 if (hdev
->sent_cmd
) {
1925 del_timer_sync(&hdev
->cmd_timer
);
1926 kfree_skb(hdev
->sent_cmd
);
1927 hdev
->sent_cmd
= NULL
;
1930 kfree_skb(hdev
->recv_evt
);
1931 hdev
->recv_evt
= NULL
;
1933 /* After this point our queues are empty
1934 * and no tasks are scheduled. */
1939 hdev
->dev_flags
&= ~HCI_PERSISTENT_MASK
;
1941 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1942 if (hdev
->dev_type
== HCI_BREDR
) {
1944 mgmt_powered(hdev
, 0);
1945 hci_dev_unlock(hdev
);
1949 /* Controller radio is available but is currently powered down */
1950 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1952 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1953 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1955 hci_req_unlock(hdev
);
1961 int hci_dev_close(__u16 dev
)
1963 struct hci_dev
*hdev
;
1966 hdev
= hci_dev_get(dev
);
1970 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
1975 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1976 cancel_delayed_work(&hdev
->power_off
);
1978 err
= hci_dev_do_close(hdev
);
1985 int hci_dev_reset(__u16 dev
)
1987 struct hci_dev
*hdev
;
1990 hdev
= hci_dev_get(dev
);
1996 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2001 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2007 skb_queue_purge(&hdev
->rx_q
);
2008 skb_queue_purge(&hdev
->cmd_q
);
2011 hci_inquiry_cache_flush(hdev
);
2012 hci_conn_hash_flush(hdev
);
2013 hci_dev_unlock(hdev
);
2018 atomic_set(&hdev
->cmd_cnt
, 1);
2019 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
2021 if (!test_bit(HCI_RAW
, &hdev
->flags
))
2022 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
2025 hci_req_unlock(hdev
);
2030 int hci_dev_reset_stat(__u16 dev
)
2032 struct hci_dev
*hdev
;
2035 hdev
= hci_dev_get(dev
);
2039 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2044 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
2051 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
2053 struct hci_dev
*hdev
;
2054 struct hci_dev_req dr
;
2057 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
2060 hdev
= hci_dev_get(dr
.dev_id
);
2064 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2069 if (hdev
->dev_type
!= HCI_BREDR
) {
2074 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
2081 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2086 if (!lmp_encrypt_capable(hdev
)) {
2091 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
2092 /* Auth must be enabled first */
2093 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2099 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
2104 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
2109 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
2113 case HCISETLINKMODE
:
2114 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
2115 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
2119 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
2123 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2124 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2128 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2129 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2142 int hci_get_dev_list(void __user
*arg
)
2144 struct hci_dev
*hdev
;
2145 struct hci_dev_list_req
*dl
;
2146 struct hci_dev_req
*dr
;
2147 int n
= 0, size
, err
;
2150 if (get_user(dev_num
, (__u16 __user
*) arg
))
2153 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
2156 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
2158 dl
= kzalloc(size
, GFP_KERNEL
);
2164 read_lock(&hci_dev_list_lock
);
2165 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2166 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2167 cancel_delayed_work(&hdev
->power_off
);
2169 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2170 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
2172 (dr
+ n
)->dev_id
= hdev
->id
;
2173 (dr
+ n
)->dev_opt
= hdev
->flags
;
2178 read_unlock(&hci_dev_list_lock
);
2181 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2183 err
= copy_to_user(arg
, dl
, size
);
2186 return err
? -EFAULT
: 0;
2189 int hci_get_dev_info(void __user
*arg
)
2191 struct hci_dev
*hdev
;
2192 struct hci_dev_info di
;
2195 if (copy_from_user(&di
, arg
, sizeof(di
)))
2198 hdev
= hci_dev_get(di
.dev_id
);
2202 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2203 cancel_delayed_work_sync(&hdev
->power_off
);
2205 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2206 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
2208 strcpy(di
.name
, hdev
->name
);
2209 di
.bdaddr
= hdev
->bdaddr
;
2210 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2211 di
.flags
= hdev
->flags
;
2212 di
.pkt_type
= hdev
->pkt_type
;
2213 if (lmp_bredr_capable(hdev
)) {
2214 di
.acl_mtu
= hdev
->acl_mtu
;
2215 di
.acl_pkts
= hdev
->acl_pkts
;
2216 di
.sco_mtu
= hdev
->sco_mtu
;
2217 di
.sco_pkts
= hdev
->sco_pkts
;
2219 di
.acl_mtu
= hdev
->le_mtu
;
2220 di
.acl_pkts
= hdev
->le_pkts
;
2224 di
.link_policy
= hdev
->link_policy
;
2225 di
.link_mode
= hdev
->link_mode
;
2227 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2228 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2230 if (copy_to_user(arg
, &di
, sizeof(di
)))
2238 /* ---- Interface to HCI drivers ---- */
2240 static int hci_rfkill_set_block(void *data
, bool blocked
)
2242 struct hci_dev
*hdev
= data
;
2244 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2246 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
2250 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2251 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
2252 hci_dev_do_close(hdev
);
2254 clear_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2260 static const struct rfkill_ops hci_rfkill_ops
= {
2261 .set_block
= hci_rfkill_set_block
,
2264 static void hci_power_on(struct work_struct
*work
)
2266 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2269 BT_DBG("%s", hdev
->name
);
2271 err
= hci_dev_do_open(hdev
);
2273 mgmt_set_powered_failed(hdev
, err
);
2277 /* During the HCI setup phase, a few error conditions are
2278 * ignored and they need to be checked now. If they are still
2279 * valid, it is important to turn the device back off.
2281 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
) ||
2282 (hdev
->dev_type
== HCI_BREDR
&&
2283 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2284 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2285 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2286 hci_dev_do_close(hdev
);
2287 } else if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
2288 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2289 HCI_AUTO_OFF_TIMEOUT
);
2292 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
2293 mgmt_index_added(hdev
);
2296 static void hci_power_off(struct work_struct
*work
)
2298 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2301 BT_DBG("%s", hdev
->name
);
2303 hci_dev_do_close(hdev
);
2306 static void hci_discov_off(struct work_struct
*work
)
2308 struct hci_dev
*hdev
;
2310 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
2312 BT_DBG("%s", hdev
->name
);
2314 mgmt_discoverable_timeout(hdev
);
2317 int hci_uuids_clear(struct hci_dev
*hdev
)
2319 struct bt_uuid
*uuid
, *tmp
;
2321 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2322 list_del(&uuid
->list
);
2329 int hci_link_keys_clear(struct hci_dev
*hdev
)
2331 struct list_head
*p
, *n
;
2333 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
2334 struct link_key
*key
;
2336 key
= list_entry(p
, struct link_key
, list
);
2345 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
2347 struct smp_ltk
*k
, *tmp
;
2349 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
2357 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2361 list_for_each_entry(k
, &hdev
->link_keys
, list
)
2362 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
2368 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2369 u8 key_type
, u8 old_key_type
)
2372 if (key_type
< 0x03)
2375 /* Debug keys are insecure so don't store them persistently */
2376 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2379 /* Changed combination key and there's no previous one */
2380 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2383 /* Security mode 3 case */
2387 /* Neither local nor remote side had no-bonding as requirement */
2388 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2391 /* Local side had dedicated bonding as requirement */
2392 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2395 /* Remote side had dedicated bonding as requirement */
2396 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2399 /* If none of the above criteria match, then don't store the key
2404 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
2408 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
2409 if (k
->ediv
!= ediv
||
2410 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
2419 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2424 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
2425 if (addr_type
== k
->bdaddr_type
&&
2426 bacmp(bdaddr
, &k
->bdaddr
) == 0)
2432 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
2433 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
2435 struct link_key
*key
, *old_key
;
2439 old_key
= hci_find_link_key(hdev
, bdaddr
);
2441 old_key_type
= old_key
->type
;
2444 old_key_type
= conn
? conn
->key_type
: 0xff;
2445 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
2448 list_add(&key
->list
, &hdev
->link_keys
);
2451 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2453 /* Some buggy controller combinations generate a changed
2454 * combination key for legacy pairing even when there's no
2456 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2457 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2458 type
= HCI_LK_COMBINATION
;
2460 conn
->key_type
= type
;
2463 bacpy(&key
->bdaddr
, bdaddr
);
2464 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2465 key
->pin_len
= pin_len
;
2467 if (type
== HCI_LK_CHANGED_COMBINATION
)
2468 key
->type
= old_key_type
;
2475 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
2477 mgmt_new_link_key(hdev
, key
, persistent
);
2480 conn
->flush_key
= !persistent
;
2485 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
2486 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
2489 struct smp_ltk
*key
, *old_key
;
2491 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
2494 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
2498 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
2501 list_add(&key
->list
, &hdev
->long_term_keys
);
2504 bacpy(&key
->bdaddr
, bdaddr
);
2505 key
->bdaddr_type
= addr_type
;
2506 memcpy(key
->val
, tk
, sizeof(key
->val
));
2507 key
->authenticated
= authenticated
;
2509 key
->enc_size
= enc_size
;
2511 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
2516 if (type
& HCI_SMP_LTK
)
2517 mgmt_new_ltk(hdev
, key
, 1);
2522 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2524 struct link_key
*key
;
2526 key
= hci_find_link_key(hdev
, bdaddr
);
2530 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2532 list_del(&key
->list
);
2538 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2540 struct smp_ltk
*k
, *tmp
;
2542 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
2543 if (bacmp(bdaddr
, &k
->bdaddr
))
2546 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2555 /* HCI command timer function */
2556 static void hci_cmd_timeout(unsigned long arg
)
2558 struct hci_dev
*hdev
= (void *) arg
;
2560 if (hdev
->sent_cmd
) {
2561 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2562 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2564 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2566 BT_ERR("%s command tx timeout", hdev
->name
);
2569 atomic_set(&hdev
->cmd_cnt
, 1);
2570 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2573 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2576 struct oob_data
*data
;
2578 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
2579 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
2585 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2587 struct oob_data
*data
;
2589 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
2593 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2595 list_del(&data
->list
);
2601 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2603 struct oob_data
*data
, *n
;
2605 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2606 list_del(&data
->list
);
2613 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
2616 struct oob_data
*data
;
2618 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
2621 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
2625 bacpy(&data
->bdaddr
, bdaddr
);
2626 list_add(&data
->list
, &hdev
->remote_oob_data
);
2629 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
2630 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
2632 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2637 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
2638 bdaddr_t
*bdaddr
, u8 type
)
2640 struct bdaddr_list
*b
;
2642 list_for_each_entry(b
, &hdev
->blacklist
, list
) {
2643 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2650 int hci_blacklist_clear(struct hci_dev
*hdev
)
2652 struct list_head
*p
, *n
;
2654 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
2655 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
2664 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2666 struct bdaddr_list
*entry
;
2668 if (!bacmp(bdaddr
, BDADDR_ANY
))
2671 if (hci_blacklist_lookup(hdev
, bdaddr
, type
))
2674 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
2678 bacpy(&entry
->bdaddr
, bdaddr
);
2679 entry
->bdaddr_type
= type
;
2681 list_add(&entry
->list
, &hdev
->blacklist
);
2683 return mgmt_device_blocked(hdev
, bdaddr
, type
);
2686 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2688 struct bdaddr_list
*entry
;
2690 if (!bacmp(bdaddr
, BDADDR_ANY
))
2691 return hci_blacklist_clear(hdev
);
2693 entry
= hci_blacklist_lookup(hdev
, bdaddr
, type
);
2697 list_del(&entry
->list
);
2700 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
2703 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
)
2706 BT_ERR("Failed to start inquiry: status %d", status
);
2709 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2710 hci_dev_unlock(hdev
);
2715 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
)
2717 /* General inquiry access code (GIAC) */
2718 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2719 struct hci_request req
;
2720 struct hci_cp_inquiry cp
;
2724 BT_ERR("Failed to disable LE scanning: status %d", status
);
2728 switch (hdev
->discovery
.type
) {
2729 case DISCOV_TYPE_LE
:
2731 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2732 hci_dev_unlock(hdev
);
2735 case DISCOV_TYPE_INTERLEAVED
:
2736 hci_req_init(&req
, hdev
);
2738 memset(&cp
, 0, sizeof(cp
));
2739 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2740 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
2741 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2745 hci_inquiry_cache_flush(hdev
);
2747 err
= hci_req_run(&req
, inquiry_complete
);
2749 BT_ERR("Inquiry request failed: err %d", err
);
2750 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2753 hci_dev_unlock(hdev
);
2758 static void le_scan_disable_work(struct work_struct
*work
)
2760 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2761 le_scan_disable
.work
);
2762 struct hci_cp_le_set_scan_enable cp
;
2763 struct hci_request req
;
2766 BT_DBG("%s", hdev
->name
);
2768 hci_req_init(&req
, hdev
);
2770 memset(&cp
, 0, sizeof(cp
));
2771 cp
.enable
= LE_SCAN_DISABLE
;
2772 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
2774 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
2776 BT_ERR("Disable LE scanning request failed: err %d", err
);
2779 /* Alloc HCI device */
2780 struct hci_dev
*hci_alloc_dev(void)
2782 struct hci_dev
*hdev
;
2784 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
2788 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2789 hdev
->esco_type
= (ESCO_HV1
);
2790 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2791 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
2792 hdev
->io_capability
= 0x03; /* No Input No Output */
2793 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2794 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2796 hdev
->sniff_max_interval
= 800;
2797 hdev
->sniff_min_interval
= 80;
2799 hdev
->le_scan_interval
= 0x0060;
2800 hdev
->le_scan_window
= 0x0030;
2801 hdev
->le_conn_min_interval
= 0x0028;
2802 hdev
->le_conn_max_interval
= 0x0038;
2804 mutex_init(&hdev
->lock
);
2805 mutex_init(&hdev
->req_lock
);
2807 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
2808 INIT_LIST_HEAD(&hdev
->blacklist
);
2809 INIT_LIST_HEAD(&hdev
->uuids
);
2810 INIT_LIST_HEAD(&hdev
->link_keys
);
2811 INIT_LIST_HEAD(&hdev
->long_term_keys
);
2812 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
2813 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
2815 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
2816 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2817 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2818 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2820 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2821 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
2822 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2824 skb_queue_head_init(&hdev
->rx_q
);
2825 skb_queue_head_init(&hdev
->cmd_q
);
2826 skb_queue_head_init(&hdev
->raw_q
);
2828 init_waitqueue_head(&hdev
->req_wait_q
);
2830 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
2832 hci_init_sysfs(hdev
);
2833 discovery_init(hdev
);
2837 EXPORT_SYMBOL(hci_alloc_dev
);
2839 /* Free HCI device */
2840 void hci_free_dev(struct hci_dev
*hdev
)
2842 /* will free via device release */
2843 put_device(&hdev
->dev
);
2845 EXPORT_SYMBOL(hci_free_dev
);
2847 /* Register HCI device */
2848 int hci_register_dev(struct hci_dev
*hdev
)
2852 if (!hdev
->open
|| !hdev
->close
)
2855 /* Do not allow HCI_AMP devices to register at index 0,
2856 * so the index can be used as the AMP controller ID.
2858 switch (hdev
->dev_type
) {
2860 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
2863 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
2872 sprintf(hdev
->name
, "hci%d", id
);
2875 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2877 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
2878 WQ_MEM_RECLAIM
, 1, hdev
->name
);
2879 if (!hdev
->workqueue
) {
2884 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
2885 WQ_MEM_RECLAIM
, 1, hdev
->name
);
2886 if (!hdev
->req_workqueue
) {
2887 destroy_workqueue(hdev
->workqueue
);
2892 if (!IS_ERR_OR_NULL(bt_debugfs
))
2893 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
2895 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
2897 error
= device_add(&hdev
->dev
);
2901 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
2902 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
2905 if (rfkill_register(hdev
->rfkill
) < 0) {
2906 rfkill_destroy(hdev
->rfkill
);
2907 hdev
->rfkill
= NULL
;
2911 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
2912 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2914 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
2915 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2917 if (hdev
->dev_type
== HCI_BREDR
) {
2918 /* Assume BR/EDR support until proven otherwise (such as
2919 * through reading supported features during init.
2921 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
2924 write_lock(&hci_dev_list_lock
);
2925 list_add(&hdev
->list
, &hci_dev_list
);
2926 write_unlock(&hci_dev_list_lock
);
2928 hci_notify(hdev
, HCI_DEV_REG
);
2931 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
2936 destroy_workqueue(hdev
->workqueue
);
2937 destroy_workqueue(hdev
->req_workqueue
);
2939 ida_simple_remove(&hci_index_ida
, hdev
->id
);
2943 EXPORT_SYMBOL(hci_register_dev
);
2945 /* Unregister HCI device */
2946 void hci_unregister_dev(struct hci_dev
*hdev
)
2950 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2952 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
2956 write_lock(&hci_dev_list_lock
);
2957 list_del(&hdev
->list
);
2958 write_unlock(&hci_dev_list_lock
);
2960 hci_dev_do_close(hdev
);
2962 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
2963 kfree_skb(hdev
->reassembly
[i
]);
2965 cancel_work_sync(&hdev
->power_on
);
2967 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
2968 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
2970 mgmt_index_removed(hdev
);
2971 hci_dev_unlock(hdev
);
2974 /* mgmt_index_removed should take care of emptying the
2976 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
2978 hci_notify(hdev
, HCI_DEV_UNREG
);
2981 rfkill_unregister(hdev
->rfkill
);
2982 rfkill_destroy(hdev
->rfkill
);
2985 device_del(&hdev
->dev
);
2987 debugfs_remove_recursive(hdev
->debugfs
);
2989 destroy_workqueue(hdev
->workqueue
);
2990 destroy_workqueue(hdev
->req_workqueue
);
2993 hci_blacklist_clear(hdev
);
2994 hci_uuids_clear(hdev
);
2995 hci_link_keys_clear(hdev
);
2996 hci_smp_ltks_clear(hdev
);
2997 hci_remote_oob_data_clear(hdev
);
2998 hci_dev_unlock(hdev
);
3002 ida_simple_remove(&hci_index_ida
, id
);
3004 EXPORT_SYMBOL(hci_unregister_dev
);
3006 /* Suspend HCI device */
3007 int hci_suspend_dev(struct hci_dev
*hdev
)
3009 hci_notify(hdev
, HCI_DEV_SUSPEND
);
3012 EXPORT_SYMBOL(hci_suspend_dev
);
3014 /* Resume HCI device */
3015 int hci_resume_dev(struct hci_dev
*hdev
)
3017 hci_notify(hdev
, HCI_DEV_RESUME
);
3020 EXPORT_SYMBOL(hci_resume_dev
);
3022 /* Receive frame from HCI drivers */
3023 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3025 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3026 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3032 bt_cb(skb
)->incoming
= 1;
3035 __net_timestamp(skb
);
3037 skb_queue_tail(&hdev
->rx_q
, skb
);
3038 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3042 EXPORT_SYMBOL(hci_recv_frame
);
3044 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
3045 int count
, __u8 index
)
3050 struct sk_buff
*skb
;
3051 struct bt_skb_cb
*scb
;
3053 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
3054 index
>= NUM_REASSEMBLY
)
3057 skb
= hdev
->reassembly
[index
];
3061 case HCI_ACLDATA_PKT
:
3062 len
= HCI_MAX_FRAME_SIZE
;
3063 hlen
= HCI_ACL_HDR_SIZE
;
3066 len
= HCI_MAX_EVENT_SIZE
;
3067 hlen
= HCI_EVENT_HDR_SIZE
;
3069 case HCI_SCODATA_PKT
:
3070 len
= HCI_MAX_SCO_SIZE
;
3071 hlen
= HCI_SCO_HDR_SIZE
;
3075 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3079 scb
= (void *) skb
->cb
;
3081 scb
->pkt_type
= type
;
3083 hdev
->reassembly
[index
] = skb
;
3087 scb
= (void *) skb
->cb
;
3088 len
= min_t(uint
, scb
->expect
, count
);
3090 memcpy(skb_put(skb
, len
), data
, len
);
3099 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
3100 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
3101 scb
->expect
= h
->plen
;
3103 if (skb_tailroom(skb
) < scb
->expect
) {
3105 hdev
->reassembly
[index
] = NULL
;
3111 case HCI_ACLDATA_PKT
:
3112 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
3113 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
3114 scb
->expect
= __le16_to_cpu(h
->dlen
);
3116 if (skb_tailroom(skb
) < scb
->expect
) {
3118 hdev
->reassembly
[index
] = NULL
;
3124 case HCI_SCODATA_PKT
:
3125 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
3126 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
3127 scb
->expect
= h
->dlen
;
3129 if (skb_tailroom(skb
) < scb
->expect
) {
3131 hdev
->reassembly
[index
] = NULL
;
3138 if (scb
->expect
== 0) {
3139 /* Complete frame */
3141 bt_cb(skb
)->pkt_type
= type
;
3142 hci_recv_frame(hdev
, skb
);
3144 hdev
->reassembly
[index
] = NULL
;
3152 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
3156 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
3160 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
3164 data
+= (count
- rem
);
3170 EXPORT_SYMBOL(hci_recv_fragment
);
3172 #define STREAM_REASSEMBLY 0
3174 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
3180 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
3183 struct { char type
; } *pkt
;
3185 /* Start of the frame */
3192 type
= bt_cb(skb
)->pkt_type
;
3194 rem
= hci_reassembly(hdev
, type
, data
, count
,
3199 data
+= (count
- rem
);
3205 EXPORT_SYMBOL(hci_recv_stream_fragment
);
3207 /* ---- Interface to upper protocols ---- */
3209 int hci_register_cb(struct hci_cb
*cb
)
3211 BT_DBG("%p name %s", cb
, cb
->name
);
3213 write_lock(&hci_cb_list_lock
);
3214 list_add(&cb
->list
, &hci_cb_list
);
3215 write_unlock(&hci_cb_list_lock
);
3219 EXPORT_SYMBOL(hci_register_cb
);
3221 int hci_unregister_cb(struct hci_cb
*cb
)
3223 BT_DBG("%p name %s", cb
, cb
->name
);
3225 write_lock(&hci_cb_list_lock
);
3226 list_del(&cb
->list
);
3227 write_unlock(&hci_cb_list_lock
);
3231 EXPORT_SYMBOL(hci_unregister_cb
);
3233 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3235 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
3238 __net_timestamp(skb
);
3240 /* Send copy to monitor */
3241 hci_send_to_monitor(hdev
, skb
);
3243 if (atomic_read(&hdev
->promisc
)) {
3244 /* Send copy to the sockets */
3245 hci_send_to_sock(hdev
, skb
);
3248 /* Get rid of skb owner, prior to sending to the driver. */
3251 if (hdev
->send(hdev
, skb
) < 0)
3252 BT_ERR("%s sending frame failed", hdev
->name
);
3255 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
3257 skb_queue_head_init(&req
->cmd_q
);
3262 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
3264 struct hci_dev
*hdev
= req
->hdev
;
3265 struct sk_buff
*skb
;
3266 unsigned long flags
;
3268 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
3270 /* If an error occured during request building, remove all HCI
3271 * commands queued on the HCI request queue.
3274 skb_queue_purge(&req
->cmd_q
);
3278 /* Do not allow empty requests */
3279 if (skb_queue_empty(&req
->cmd_q
))
3282 skb
= skb_peek_tail(&req
->cmd_q
);
3283 bt_cb(skb
)->req
.complete
= complete
;
3285 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3286 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
3287 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3289 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3294 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
3295 u32 plen
, const void *param
)
3297 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
3298 struct hci_command_hdr
*hdr
;
3299 struct sk_buff
*skb
;
3301 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3305 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
3306 hdr
->opcode
= cpu_to_le16(opcode
);
3310 memcpy(skb_put(skb
, plen
), param
, plen
);
3312 BT_DBG("skb len %d", skb
->len
);
3314 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
3319 /* Send HCI command */
3320 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3323 struct sk_buff
*skb
;
3325 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3327 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3329 BT_ERR("%s no memory for command", hdev
->name
);
3333 /* Stand-alone HCI commands must be flaged as
3334 * single-command requests.
3336 bt_cb(skb
)->req
.start
= true;
3338 skb_queue_tail(&hdev
->cmd_q
, skb
);
3339 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3344 /* Queue a command to an asynchronous HCI request */
3345 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
3346 const void *param
, u8 event
)
3348 struct hci_dev
*hdev
= req
->hdev
;
3349 struct sk_buff
*skb
;
3351 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3353 /* If an error occured during request building, there is no point in
3354 * queueing the HCI command. We can simply return.
3359 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3361 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3362 hdev
->name
, opcode
);
3367 if (skb_queue_empty(&req
->cmd_q
))
3368 bt_cb(skb
)->req
.start
= true;
3370 bt_cb(skb
)->req
.event
= event
;
3372 skb_queue_tail(&req
->cmd_q
, skb
);
3375 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
3378 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
3381 /* Get data from the previously sent command */
3382 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3384 struct hci_command_hdr
*hdr
;
3386 if (!hdev
->sent_cmd
)
3389 hdr
= (void *) hdev
->sent_cmd
->data
;
3391 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3394 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3396 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3400 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3402 struct hci_acl_hdr
*hdr
;
3405 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3406 skb_reset_transport_header(skb
);
3407 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3408 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3409 hdr
->dlen
= cpu_to_le16(len
);
3412 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3413 struct sk_buff
*skb
, __u16 flags
)
3415 struct hci_conn
*conn
= chan
->conn
;
3416 struct hci_dev
*hdev
= conn
->hdev
;
3417 struct sk_buff
*list
;
3419 skb
->len
= skb_headlen(skb
);
3422 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3424 switch (hdev
->dev_type
) {
3426 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3429 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3432 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3436 list
= skb_shinfo(skb
)->frag_list
;
3438 /* Non fragmented */
3439 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3441 skb_queue_tail(queue
, skb
);
3444 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3446 skb_shinfo(skb
)->frag_list
= NULL
;
3448 /* Queue all fragments atomically */
3449 spin_lock(&queue
->lock
);
3451 __skb_queue_tail(queue
, skb
);
3453 flags
&= ~ACL_START
;
3456 skb
= list
; list
= list
->next
;
3458 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3459 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3461 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3463 __skb_queue_tail(queue
, skb
);
3466 spin_unlock(&queue
->lock
);
3470 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3472 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3474 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3476 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3478 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3482 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3484 struct hci_dev
*hdev
= conn
->hdev
;
3485 struct hci_sco_hdr hdr
;
3487 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3489 hdr
.handle
= cpu_to_le16(conn
->handle
);
3490 hdr
.dlen
= skb
->len
;
3492 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3493 skb_reset_transport_header(skb
);
3494 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3496 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
3498 skb_queue_tail(&conn
->data_q
, skb
);
3499 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3502 /* ---- HCI TX task (outgoing data) ---- */
3504 /* HCI Connection scheduler */
3505 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3508 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3509 struct hci_conn
*conn
= NULL
, *c
;
3510 unsigned int num
= 0, min
= ~0;
3512 /* We don't have to lock device here. Connections are always
3513 * added and removed with TX task disabled. */
3517 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3518 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3521 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3526 if (c
->sent
< min
) {
3531 if (hci_conn_num(hdev
, type
) == num
)
3540 switch (conn
->type
) {
3542 cnt
= hdev
->acl_cnt
;
3546 cnt
= hdev
->sco_cnt
;
3549 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3553 BT_ERR("Unknown link type");
3561 BT_DBG("conn %p quote %d", conn
, *quote
);
3565 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3567 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3570 BT_ERR("%s link tx timeout", hdev
->name
);
3574 /* Kill stalled connections */
3575 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3576 if (c
->type
== type
&& c
->sent
) {
3577 BT_ERR("%s killing stalled connection %pMR",
3578 hdev
->name
, &c
->dst
);
3579 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3586 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3589 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3590 struct hci_chan
*chan
= NULL
;
3591 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3592 struct hci_conn
*conn
;
3593 int cnt
, q
, conn_num
= 0;
3595 BT_DBG("%s", hdev
->name
);
3599 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3600 struct hci_chan
*tmp
;
3602 if (conn
->type
!= type
)
3605 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3610 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3611 struct sk_buff
*skb
;
3613 if (skb_queue_empty(&tmp
->data_q
))
3616 skb
= skb_peek(&tmp
->data_q
);
3617 if (skb
->priority
< cur_prio
)
3620 if (skb
->priority
> cur_prio
) {
3623 cur_prio
= skb
->priority
;
3628 if (conn
->sent
< min
) {
3634 if (hci_conn_num(hdev
, type
) == conn_num
)
3643 switch (chan
->conn
->type
) {
3645 cnt
= hdev
->acl_cnt
;
3648 cnt
= hdev
->block_cnt
;
3652 cnt
= hdev
->sco_cnt
;
3655 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3659 BT_ERR("Unknown link type");
3664 BT_DBG("chan %p quote %d", chan
, *quote
);
3668 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3670 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3671 struct hci_conn
*conn
;
3674 BT_DBG("%s", hdev
->name
);
3678 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3679 struct hci_chan
*chan
;
3681 if (conn
->type
!= type
)
3684 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3689 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3690 struct sk_buff
*skb
;
3697 if (skb_queue_empty(&chan
->data_q
))
3700 skb
= skb_peek(&chan
->data_q
);
3701 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3704 skb
->priority
= HCI_PRIO_MAX
- 1;
3706 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3710 if (hci_conn_num(hdev
, type
) == num
)
3718 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3720 /* Calculate count of blocks used by this packet */
3721 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3724 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3726 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3727 /* ACL tx timeout must be longer than maximum
3728 * link supervision timeout (40.9 seconds) */
3729 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3730 HCI_ACL_TX_TIMEOUT
))
3731 hci_link_tx_to(hdev
, ACL_LINK
);
3735 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3737 unsigned int cnt
= hdev
->acl_cnt
;
3738 struct hci_chan
*chan
;
3739 struct sk_buff
*skb
;
3742 __check_timeout(hdev
, cnt
);
3744 while (hdev
->acl_cnt
&&
3745 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3746 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3747 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3748 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3749 skb
->len
, skb
->priority
);
3751 /* Stop if priority has changed */
3752 if (skb
->priority
< priority
)
3755 skb
= skb_dequeue(&chan
->data_q
);
3757 hci_conn_enter_active_mode(chan
->conn
,
3758 bt_cb(skb
)->force_active
);
3760 hci_send_frame(hdev
, skb
);
3761 hdev
->acl_last_tx
= jiffies
;
3769 if (cnt
!= hdev
->acl_cnt
)
3770 hci_prio_recalculate(hdev
, ACL_LINK
);
3773 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3775 unsigned int cnt
= hdev
->block_cnt
;
3776 struct hci_chan
*chan
;
3777 struct sk_buff
*skb
;
3781 __check_timeout(hdev
, cnt
);
3783 BT_DBG("%s", hdev
->name
);
3785 if (hdev
->dev_type
== HCI_AMP
)
3790 while (hdev
->block_cnt
> 0 &&
3791 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3792 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3793 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3796 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3797 skb
->len
, skb
->priority
);
3799 /* Stop if priority has changed */
3800 if (skb
->priority
< priority
)
3803 skb
= skb_dequeue(&chan
->data_q
);
3805 blocks
= __get_blocks(hdev
, skb
);
3806 if (blocks
> hdev
->block_cnt
)
3809 hci_conn_enter_active_mode(chan
->conn
,
3810 bt_cb(skb
)->force_active
);
3812 hci_send_frame(hdev
, skb
);
3813 hdev
->acl_last_tx
= jiffies
;
3815 hdev
->block_cnt
-= blocks
;
3818 chan
->sent
+= blocks
;
3819 chan
->conn
->sent
+= blocks
;
3823 if (cnt
!= hdev
->block_cnt
)
3824 hci_prio_recalculate(hdev
, type
);
3827 static void hci_sched_acl(struct hci_dev
*hdev
)
3829 BT_DBG("%s", hdev
->name
);
3831 /* No ACL link over BR/EDR controller */
3832 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
3835 /* No AMP link over AMP controller */
3836 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3839 switch (hdev
->flow_ctl_mode
) {
3840 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3841 hci_sched_acl_pkt(hdev
);
3844 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3845 hci_sched_acl_blk(hdev
);
3851 static void hci_sched_sco(struct hci_dev
*hdev
)
3853 struct hci_conn
*conn
;
3854 struct sk_buff
*skb
;
3857 BT_DBG("%s", hdev
->name
);
3859 if (!hci_conn_num(hdev
, SCO_LINK
))
3862 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3863 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3864 BT_DBG("skb %p len %d", skb
, skb
->len
);
3865 hci_send_frame(hdev
, skb
);
3868 if (conn
->sent
== ~0)
3874 static void hci_sched_esco(struct hci_dev
*hdev
)
3876 struct hci_conn
*conn
;
3877 struct sk_buff
*skb
;
3880 BT_DBG("%s", hdev
->name
);
3882 if (!hci_conn_num(hdev
, ESCO_LINK
))
3885 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3887 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3888 BT_DBG("skb %p len %d", skb
, skb
->len
);
3889 hci_send_frame(hdev
, skb
);
3892 if (conn
->sent
== ~0)
3898 static void hci_sched_le(struct hci_dev
*hdev
)
3900 struct hci_chan
*chan
;
3901 struct sk_buff
*skb
;
3902 int quote
, cnt
, tmp
;
3904 BT_DBG("%s", hdev
->name
);
3906 if (!hci_conn_num(hdev
, LE_LINK
))
3909 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3910 /* LE tx timeout must be longer than maximum
3911 * link supervision timeout (40.9 seconds) */
3912 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3913 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3914 hci_link_tx_to(hdev
, LE_LINK
);
3917 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3919 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3920 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3921 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3922 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3923 skb
->len
, skb
->priority
);
3925 /* Stop if priority has changed */
3926 if (skb
->priority
< priority
)
3929 skb
= skb_dequeue(&chan
->data_q
);
3931 hci_send_frame(hdev
, skb
);
3932 hdev
->le_last_tx
= jiffies
;
3943 hdev
->acl_cnt
= cnt
;
3946 hci_prio_recalculate(hdev
, LE_LINK
);
3949 static void hci_tx_work(struct work_struct
*work
)
3951 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3952 struct sk_buff
*skb
;
3954 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3955 hdev
->sco_cnt
, hdev
->le_cnt
);
3957 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
3958 /* Schedule queues and send stuff to HCI driver */
3959 hci_sched_acl(hdev
);
3960 hci_sched_sco(hdev
);
3961 hci_sched_esco(hdev
);
3965 /* Send next queued raw (unknown type) packet */
3966 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3967 hci_send_frame(hdev
, skb
);
3970 /* ----- HCI RX task (incoming data processing) ----- */
3972 /* ACL data packet */
3973 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3975 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
3976 struct hci_conn
*conn
;
3977 __u16 handle
, flags
;
3979 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
3981 handle
= __le16_to_cpu(hdr
->handle
);
3982 flags
= hci_flags(handle
);
3983 handle
= hci_handle(handle
);
3985 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
3988 hdev
->stat
.acl_rx
++;
3991 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3992 hci_dev_unlock(hdev
);
3995 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3997 /* Send to upper protocol */
3998 l2cap_recv_acldata(conn
, skb
, flags
);
4001 BT_ERR("%s ACL packet for unknown connection handle %d",
4002 hdev
->name
, handle
);
4008 /* SCO data packet */
4009 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4011 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4012 struct hci_conn
*conn
;
4015 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4017 handle
= __le16_to_cpu(hdr
->handle
);
4019 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4021 hdev
->stat
.sco_rx
++;
4024 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4025 hci_dev_unlock(hdev
);
4028 /* Send to upper protocol */
4029 sco_recv_scodata(conn
, skb
);
4032 BT_ERR("%s SCO packet for unknown connection handle %d",
4033 hdev
->name
, handle
);
4039 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4041 struct sk_buff
*skb
;
4043 skb
= skb_peek(&hdev
->cmd_q
);
4047 return bt_cb(skb
)->req
.start
;
4050 static void hci_resend_last(struct hci_dev
*hdev
)
4052 struct hci_command_hdr
*sent
;
4053 struct sk_buff
*skb
;
4056 if (!hdev
->sent_cmd
)
4059 sent
= (void *) hdev
->sent_cmd
->data
;
4060 opcode
= __le16_to_cpu(sent
->opcode
);
4061 if (opcode
== HCI_OP_RESET
)
4064 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4068 skb_queue_head(&hdev
->cmd_q
, skb
);
4069 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4072 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
4074 hci_req_complete_t req_complete
= NULL
;
4075 struct sk_buff
*skb
;
4076 unsigned long flags
;
4078 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4080 /* If the completed command doesn't match the last one that was
4081 * sent we need to do special handling of it.
4083 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4084 /* Some CSR based controllers generate a spontaneous
4085 * reset complete event during init and any pending
4086 * command will never be completed. In such a case we
4087 * need to resend whatever was the last sent
4090 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4091 hci_resend_last(hdev
);
4096 /* If the command succeeded and there's still more commands in
4097 * this request the request is not yet complete.
4099 if (!status
&& !hci_req_is_complete(hdev
))
4102 /* If this was the last command in a request the complete
4103 * callback would be found in hdev->sent_cmd instead of the
4104 * command queue (hdev->cmd_q).
4106 if (hdev
->sent_cmd
) {
4107 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
4110 /* We must set the complete callback to NULL to
4111 * avoid calling the callback more than once if
4112 * this function gets called again.
4114 bt_cb(hdev
->sent_cmd
)->req
.complete
= NULL
;
4120 /* Remove all pending commands belonging to this request */
4121 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4122 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4123 if (bt_cb(skb
)->req
.start
) {
4124 __skb_queue_head(&hdev
->cmd_q
, skb
);
4128 req_complete
= bt_cb(skb
)->req
.complete
;
4131 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4135 req_complete(hdev
, status
);
4138 static void hci_rx_work(struct work_struct
*work
)
4140 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4141 struct sk_buff
*skb
;
4143 BT_DBG("%s", hdev
->name
);
4145 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4146 /* Send copy to monitor */
4147 hci_send_to_monitor(hdev
, skb
);
4149 if (atomic_read(&hdev
->promisc
)) {
4150 /* Send copy to the sockets */
4151 hci_send_to_sock(hdev
, skb
);
4154 if (test_bit(HCI_RAW
, &hdev
->flags
) ||
4155 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4160 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4161 /* Don't process data packets in this states. */
4162 switch (bt_cb(skb
)->pkt_type
) {
4163 case HCI_ACLDATA_PKT
:
4164 case HCI_SCODATA_PKT
:
4171 switch (bt_cb(skb
)->pkt_type
) {
4173 BT_DBG("%s Event packet", hdev
->name
);
4174 hci_event_packet(hdev
, skb
);
4177 case HCI_ACLDATA_PKT
:
4178 BT_DBG("%s ACL data packet", hdev
->name
);
4179 hci_acldata_packet(hdev
, skb
);
4182 case HCI_SCODATA_PKT
:
4183 BT_DBG("%s SCO data packet", hdev
->name
);
4184 hci_scodata_packet(hdev
, skb
);
4194 static void hci_cmd_work(struct work_struct
*work
)
4196 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4197 struct sk_buff
*skb
;
4199 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4200 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4202 /* Send queued commands */
4203 if (atomic_read(&hdev
->cmd_cnt
)) {
4204 skb
= skb_dequeue(&hdev
->cmd_q
);
4208 kfree_skb(hdev
->sent_cmd
);
4210 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4211 if (hdev
->sent_cmd
) {
4212 atomic_dec(&hdev
->cmd_cnt
);
4213 hci_send_frame(hdev
, skb
);
4214 if (test_bit(HCI_RESET
, &hdev
->flags
))
4215 del_timer(&hdev
->cmd_timer
);
4217 mod_timer(&hdev
->cmd_timer
,
4218 jiffies
+ HCI_CMD_TIMEOUT
);
4220 skb_queue_head(&hdev
->cmd_q
, skb
);
4221 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);