2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct
*work
);
45 static void hci_cmd_work(struct work_struct
*work
);
46 static void hci_tx_work(struct work_struct
*work
);
49 LIST_HEAD(hci_dev_list
);
50 DEFINE_RWLOCK(hci_dev_list_lock
);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list
);
54 DEFINE_MUTEX(hci_cb_list_lock
);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida
);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev
*hdev
, int event
)
72 hci_sock_dev_event(hdev
, event
);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
78 size_t count
, loff_t
*ppos
)
80 struct hci_dev
*hdev
= file
->private_data
;
83 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
89 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
90 size_t count
, loff_t
*ppos
)
92 struct hci_dev
*hdev
= file
->private_data
;
95 size_t buf_size
= min(count
, (sizeof(buf
)-1));
98 if (!test_bit(HCI_UP
, &hdev
->flags
))
101 if (copy_from_user(buf
, user_buf
, buf_size
))
104 buf
[buf_size
] = '\0';
105 if (strtobool(buf
, &enable
))
108 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
113 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
116 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
118 hci_req_unlock(hdev
);
125 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
130 static const struct file_operations dut_mode_fops
= {
132 .read
= dut_mode_read
,
133 .write
= dut_mode_write
,
134 .llseek
= default_llseek
,
137 static ssize_t
vendor_diag_read(struct file
*file
, char __user
*user_buf
,
138 size_t count
, loff_t
*ppos
)
140 struct hci_dev
*hdev
= file
->private_data
;
143 buf
[0] = hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) ? 'Y': 'N';
146 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
149 static ssize_t
vendor_diag_write(struct file
*file
, const char __user
*user_buf
,
150 size_t count
, loff_t
*ppos
)
152 struct hci_dev
*hdev
= file
->private_data
;
154 size_t buf_size
= min(count
, (sizeof(buf
)-1));
158 if (copy_from_user(buf
, user_buf
, buf_size
))
161 buf
[buf_size
] = '\0';
162 if (strtobool(buf
, &enable
))
165 /* When the diagnostic flags are not persistent and the transport
166 * is not active, then there is no need for the vendor callback.
168 * Instead just store the desired value. If needed the setting
169 * will be programmed when the controller gets powered on.
171 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
172 !test_bit(HCI_RUNNING
, &hdev
->flags
))
176 err
= hdev
->set_diag(hdev
, enable
);
177 hci_req_unlock(hdev
);
184 hci_dev_set_flag(hdev
, HCI_VENDOR_DIAG
);
186 hci_dev_clear_flag(hdev
, HCI_VENDOR_DIAG
);
191 static const struct file_operations vendor_diag_fops
= {
193 .read
= vendor_diag_read
,
194 .write
= vendor_diag_write
,
195 .llseek
= default_llseek
,
198 static void hci_debugfs_create_basic(struct hci_dev
*hdev
)
200 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
204 debugfs_create_file("vendor_diag", 0644, hdev
->debugfs
, hdev
,
208 /* ---- HCI requests ---- */
210 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
213 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
215 if (hdev
->req_status
== HCI_REQ_PEND
) {
216 hdev
->req_result
= result
;
217 hdev
->req_status
= HCI_REQ_DONE
;
219 hdev
->req_skb
= skb_get(skb
);
220 wake_up_interruptible(&hdev
->req_wait_q
);
224 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
226 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
228 if (hdev
->req_status
== HCI_REQ_PEND
) {
229 hdev
->req_result
= err
;
230 hdev
->req_status
= HCI_REQ_CANCELED
;
231 wake_up_interruptible(&hdev
->req_wait_q
);
235 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
236 const void *param
, u8 event
, u32 timeout
)
238 DECLARE_WAITQUEUE(wait
, current
);
239 struct hci_request req
;
243 BT_DBG("%s", hdev
->name
);
245 hci_req_init(&req
, hdev
);
247 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
249 hdev
->req_status
= HCI_REQ_PEND
;
251 add_wait_queue(&hdev
->req_wait_q
, &wait
);
252 set_current_state(TASK_INTERRUPTIBLE
);
254 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
256 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
257 set_current_state(TASK_RUNNING
);
261 schedule_timeout(timeout
);
263 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
265 if (signal_pending(current
))
266 return ERR_PTR(-EINTR
);
268 switch (hdev
->req_status
) {
270 err
= -bt_to_errno(hdev
->req_result
);
273 case HCI_REQ_CANCELED
:
274 err
= -hdev
->req_result
;
282 hdev
->req_status
= hdev
->req_result
= 0;
284 hdev
->req_skb
= NULL
;
286 BT_DBG("%s end: err %d", hdev
->name
, err
);
294 return ERR_PTR(-ENODATA
);
298 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
300 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
301 const void *param
, u32 timeout
)
303 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
305 EXPORT_SYMBOL(__hci_cmd_sync
);
307 /* Execute request and wait for completion. */
308 static int __hci_req_sync(struct hci_dev
*hdev
,
309 void (*func
)(struct hci_request
*req
,
311 unsigned long opt
, __u32 timeout
)
313 struct hci_request req
;
314 DECLARE_WAITQUEUE(wait
, current
);
317 BT_DBG("%s start", hdev
->name
);
319 hci_req_init(&req
, hdev
);
321 hdev
->req_status
= HCI_REQ_PEND
;
325 add_wait_queue(&hdev
->req_wait_q
, &wait
);
326 set_current_state(TASK_INTERRUPTIBLE
);
328 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
330 hdev
->req_status
= 0;
332 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
333 set_current_state(TASK_RUNNING
);
335 /* ENODATA means the HCI request command queue is empty.
336 * This can happen when a request with conditionals doesn't
337 * trigger any commands to be sent. This is normal behavior
338 * and should not trigger an error return.
346 schedule_timeout(timeout
);
348 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
350 if (signal_pending(current
))
353 switch (hdev
->req_status
) {
355 err
= -bt_to_errno(hdev
->req_result
);
358 case HCI_REQ_CANCELED
:
359 err
= -hdev
->req_result
;
367 hdev
->req_status
= hdev
->req_result
= 0;
369 BT_DBG("%s end: err %d", hdev
->name
, err
);
374 static int hci_req_sync(struct hci_dev
*hdev
,
375 void (*req
)(struct hci_request
*req
,
377 unsigned long opt
, __u32 timeout
)
381 if (!test_bit(HCI_UP
, &hdev
->flags
))
384 /* Serialize all requests */
386 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
387 hci_req_unlock(hdev
);
392 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
394 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
397 set_bit(HCI_RESET
, &req
->hdev
->flags
);
398 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
401 static void bredr_init(struct hci_request
*req
)
403 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
405 /* Read Local Supported Features */
406 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
408 /* Read Local Version */
409 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
411 /* Read BD Address */
412 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
415 static void amp_init1(struct hci_request
*req
)
417 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
419 /* Read Local Version */
420 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
422 /* Read Local Supported Commands */
423 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
425 /* Read Local AMP Info */
426 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
428 /* Read Data Blk size */
429 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
431 /* Read Flow Control Mode */
432 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
434 /* Read Location Data */
435 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
438 static void amp_init2(struct hci_request
*req
)
440 /* Read Local Supported Features. Not all AMP controllers
441 * support this so it's placed conditionally in the second
444 if (req
->hdev
->commands
[14] & 0x20)
445 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
448 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
450 struct hci_dev
*hdev
= req
->hdev
;
452 BT_DBG("%s %ld", hdev
->name
, opt
);
455 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
456 hci_reset_req(req
, 0);
458 switch (hdev
->dev_type
) {
468 BT_ERR("Unknown device type %d", hdev
->dev_type
);
473 static void bredr_setup(struct hci_request
*req
)
478 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
479 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
481 /* Read Class of Device */
482 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
484 /* Read Local Name */
485 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
487 /* Read Voice Setting */
488 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
490 /* Read Number of Supported IAC */
491 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
493 /* Read Current IAC LAP */
494 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
496 /* Clear Event Filters */
497 flt_type
= HCI_FLT_CLEAR_ALL
;
498 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
500 /* Connection accept timeout ~20 secs */
501 param
= cpu_to_le16(0x7d00);
502 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
505 static void le_setup(struct hci_request
*req
)
507 struct hci_dev
*hdev
= req
->hdev
;
509 /* Read LE Buffer Size */
510 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
512 /* Read LE Local Supported Features */
513 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
515 /* Read LE Supported States */
516 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
518 /* Read LE White List Size */
519 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
521 /* Clear LE White List */
522 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
524 /* LE-only controllers have LE implicitly enabled */
525 if (!lmp_bredr_capable(hdev
))
526 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
529 static void hci_setup_event_mask(struct hci_request
*req
)
531 struct hci_dev
*hdev
= req
->hdev
;
533 /* The second byte is 0xff instead of 0x9f (two reserved bits
534 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
537 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
539 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
540 * any event mask for pre 1.2 devices.
542 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
545 if (lmp_bredr_capable(hdev
)) {
546 events
[4] |= 0x01; /* Flow Specification Complete */
547 events
[4] |= 0x02; /* Inquiry Result with RSSI */
548 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
549 events
[5] |= 0x08; /* Synchronous Connection Complete */
550 events
[5] |= 0x10; /* Synchronous Connection Changed */
552 /* Use a different default for LE-only devices */
553 memset(events
, 0, sizeof(events
));
554 events
[0] |= 0x10; /* Disconnection Complete */
555 events
[1] |= 0x08; /* Read Remote Version Information Complete */
556 events
[1] |= 0x20; /* Command Complete */
557 events
[1] |= 0x40; /* Command Status */
558 events
[1] |= 0x80; /* Hardware Error */
559 events
[2] |= 0x04; /* Number of Completed Packets */
560 events
[3] |= 0x02; /* Data Buffer Overflow */
562 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
563 events
[0] |= 0x80; /* Encryption Change */
564 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
568 if (lmp_inq_rssi_capable(hdev
))
569 events
[4] |= 0x02; /* Inquiry Result with RSSI */
571 if (lmp_sniffsubr_capable(hdev
))
572 events
[5] |= 0x20; /* Sniff Subrating */
574 if (lmp_pause_enc_capable(hdev
))
575 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
577 if (lmp_ext_inq_capable(hdev
))
578 events
[5] |= 0x40; /* Extended Inquiry Result */
580 if (lmp_no_flush_capable(hdev
))
581 events
[7] |= 0x01; /* Enhanced Flush Complete */
583 if (lmp_lsto_capable(hdev
))
584 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
586 if (lmp_ssp_capable(hdev
)) {
587 events
[6] |= 0x01; /* IO Capability Request */
588 events
[6] |= 0x02; /* IO Capability Response */
589 events
[6] |= 0x04; /* User Confirmation Request */
590 events
[6] |= 0x08; /* User Passkey Request */
591 events
[6] |= 0x10; /* Remote OOB Data Request */
592 events
[6] |= 0x20; /* Simple Pairing Complete */
593 events
[7] |= 0x04; /* User Passkey Notification */
594 events
[7] |= 0x08; /* Keypress Notification */
595 events
[7] |= 0x10; /* Remote Host Supported
596 * Features Notification
600 if (lmp_le_capable(hdev
))
601 events
[7] |= 0x20; /* LE Meta-Event */
603 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
606 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
608 struct hci_dev
*hdev
= req
->hdev
;
610 if (hdev
->dev_type
== HCI_AMP
)
611 return amp_init2(req
);
613 if (lmp_bredr_capable(hdev
))
616 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
618 if (lmp_le_capable(hdev
))
621 /* All Bluetooth 1.2 and later controllers should support the
622 * HCI command for reading the local supported commands.
624 * Unfortunately some controllers indicate Bluetooth 1.2 support,
625 * but do not have support for this command. If that is the case,
626 * the driver can quirk the behavior and skip reading the local
627 * supported commands.
629 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
630 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
631 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
633 if (lmp_ssp_capable(hdev
)) {
634 /* When SSP is available, then the host features page
635 * should also be available as well. However some
636 * controllers list the max_page as 0 as long as SSP
637 * has not been enabled. To achieve proper debugging
638 * output, force the minimum max_page to 1 at least.
640 hdev
->max_page
= 0x01;
642 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
645 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
646 sizeof(mode
), &mode
);
648 struct hci_cp_write_eir cp
;
650 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
651 memset(&cp
, 0, sizeof(cp
));
653 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
657 if (lmp_inq_rssi_capable(hdev
) ||
658 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
661 /* If Extended Inquiry Result events are supported, then
662 * they are clearly preferred over Inquiry Result with RSSI
665 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
667 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
670 if (lmp_inq_tx_pwr_capable(hdev
))
671 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
673 if (lmp_ext_feat_capable(hdev
)) {
674 struct hci_cp_read_local_ext_features cp
;
677 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
681 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
683 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
688 static void hci_setup_link_policy(struct hci_request
*req
)
690 struct hci_dev
*hdev
= req
->hdev
;
691 struct hci_cp_write_def_link_policy cp
;
694 if (lmp_rswitch_capable(hdev
))
695 link_policy
|= HCI_LP_RSWITCH
;
696 if (lmp_hold_capable(hdev
))
697 link_policy
|= HCI_LP_HOLD
;
698 if (lmp_sniff_capable(hdev
))
699 link_policy
|= HCI_LP_SNIFF
;
700 if (lmp_park_capable(hdev
))
701 link_policy
|= HCI_LP_PARK
;
703 cp
.policy
= cpu_to_le16(link_policy
);
704 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
707 static void hci_set_le_support(struct hci_request
*req
)
709 struct hci_dev
*hdev
= req
->hdev
;
710 struct hci_cp_write_le_host_supported cp
;
712 /* LE-only devices do not support explicit enablement */
713 if (!lmp_bredr_capable(hdev
))
716 memset(&cp
, 0, sizeof(cp
));
718 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
723 if (cp
.le
!= lmp_host_le_capable(hdev
))
724 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
728 static void hci_set_event_mask_page_2(struct hci_request
*req
)
730 struct hci_dev
*hdev
= req
->hdev
;
731 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
733 /* If Connectionless Slave Broadcast master role is supported
734 * enable all necessary events for it.
736 if (lmp_csb_master_capable(hdev
)) {
737 events
[1] |= 0x40; /* Triggered Clock Capture */
738 events
[1] |= 0x80; /* Synchronization Train Complete */
739 events
[2] |= 0x10; /* Slave Page Response Timeout */
740 events
[2] |= 0x20; /* CSB Channel Map Change */
743 /* If Connectionless Slave Broadcast slave role is supported
744 * enable all necessary events for it.
746 if (lmp_csb_slave_capable(hdev
)) {
747 events
[2] |= 0x01; /* Synchronization Train Received */
748 events
[2] |= 0x02; /* CSB Receive */
749 events
[2] |= 0x04; /* CSB Timeout */
750 events
[2] |= 0x08; /* Truncated Page Complete */
753 /* Enable Authenticated Payload Timeout Expired event if supported */
754 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
)
757 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
760 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
762 struct hci_dev
*hdev
= req
->hdev
;
765 hci_setup_event_mask(req
);
767 if (hdev
->commands
[6] & 0x20 &&
768 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
769 struct hci_cp_read_stored_link_key cp
;
771 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
773 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
776 if (hdev
->commands
[5] & 0x10)
777 hci_setup_link_policy(req
);
779 if (hdev
->commands
[8] & 0x01)
780 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
782 /* Some older Broadcom based Bluetooth 1.2 controllers do not
783 * support the Read Page Scan Type command. Check support for
784 * this command in the bit mask of supported commands.
786 if (hdev
->commands
[13] & 0x01)
787 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
789 if (lmp_le_capable(hdev
)) {
792 memset(events
, 0, sizeof(events
));
795 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
796 events
[0] |= 0x10; /* LE Long Term Key Request */
798 /* If controller supports the Connection Parameters Request
799 * Link Layer Procedure, enable the corresponding event.
801 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
802 events
[0] |= 0x20; /* LE Remote Connection
806 /* If the controller supports the Data Length Extension
807 * feature, enable the corresponding event.
809 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
810 events
[0] |= 0x40; /* LE Data Length Change */
812 /* If the controller supports Extended Scanner Filter
813 * Policies, enable the correspondig event.
815 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
816 events
[1] |= 0x04; /* LE Direct Advertising
820 /* If the controller supports the LE Read Local P-256
821 * Public Key command, enable the corresponding event.
823 if (hdev
->commands
[34] & 0x02)
824 events
[0] |= 0x80; /* LE Read Local P-256
825 * Public Key Complete
828 /* If the controller supports the LE Generate DHKey
829 * command, enable the corresponding event.
831 if (hdev
->commands
[34] & 0x04)
832 events
[1] |= 0x01; /* LE Generate DHKey Complete */
834 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
837 if (hdev
->commands
[25] & 0x40) {
838 /* Read LE Advertising Channel TX Power */
839 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
842 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
843 /* Read LE Maximum Data Length */
844 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
846 /* Read LE Suggested Default Data Length */
847 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
850 hci_set_le_support(req
);
853 /* Read features beyond page 1 if available */
854 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
855 struct hci_cp_read_local_ext_features cp
;
858 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
863 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
865 struct hci_dev
*hdev
= req
->hdev
;
867 /* Some Broadcom based Bluetooth controllers do not support the
868 * Delete Stored Link Key command. They are clearly indicating its
869 * absence in the bit mask of supported commands.
871 * Check the supported commands and only if the the command is marked
872 * as supported send it. If not supported assume that the controller
873 * does not have actual support for stored link keys which makes this
874 * command redundant anyway.
876 * Some controllers indicate that they support handling deleting
877 * stored link keys, but they don't. The quirk lets a driver
878 * just disable this command.
880 if (hdev
->commands
[6] & 0x80 &&
881 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
882 struct hci_cp_delete_stored_link_key cp
;
884 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
885 cp
.delete_all
= 0x01;
886 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
890 /* Set event mask page 2 if the HCI command for it is supported */
891 if (hdev
->commands
[22] & 0x04)
892 hci_set_event_mask_page_2(req
);
894 /* Read local codec list if the HCI command is supported */
895 if (hdev
->commands
[29] & 0x20)
896 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
898 /* Get MWS transport configuration if the HCI command is supported */
899 if (hdev
->commands
[30] & 0x08)
900 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
902 /* Check for Synchronization Train support */
903 if (lmp_sync_train_capable(hdev
))
904 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
906 /* Enable Secure Connections if supported and configured */
907 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
908 bredr_sc_enabled(hdev
)) {
911 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
912 sizeof(support
), &support
);
916 static int __hci_init(struct hci_dev
*hdev
)
920 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
924 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
925 hci_debugfs_create_basic(hdev
);
927 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
931 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
932 * BR/EDR/LE type controllers. AMP controllers only need the
933 * first two stages of init.
935 if (hdev
->dev_type
!= HCI_BREDR
)
938 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
942 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
946 /* This function is only called when the controller is actually in
947 * configured state. When the controller is marked as unconfigured,
948 * this initialization procedure is not run.
950 * It means that it is possible that a controller runs through its
951 * setup phase and then discovers missing settings. If that is the
952 * case, then this function will not be called. It then will only
953 * be called during the config phase.
955 * So only when in setup phase or config phase, create the debugfs
956 * entries and register the SMP channels.
958 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
959 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
962 hci_debugfs_create_common(hdev
);
964 if (lmp_bredr_capable(hdev
))
965 hci_debugfs_create_bredr(hdev
);
967 if (lmp_le_capable(hdev
))
968 hci_debugfs_create_le(hdev
);
973 static void hci_init0_req(struct hci_request
*req
, unsigned long opt
)
975 struct hci_dev
*hdev
= req
->hdev
;
977 BT_DBG("%s %ld", hdev
->name
, opt
);
980 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
981 hci_reset_req(req
, 0);
983 /* Read Local Version */
984 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
986 /* Read BD Address */
987 if (hdev
->set_bdaddr
)
988 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
991 static int __hci_unconf_init(struct hci_dev
*hdev
)
995 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
998 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
);
1002 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
1003 hci_debugfs_create_basic(hdev
);
1008 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
1012 BT_DBG("%s %x", req
->hdev
->name
, scan
);
1014 /* Inquiry and Page scans */
1015 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1018 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
1022 BT_DBG("%s %x", req
->hdev
->name
, auth
);
1024 /* Authentication */
1025 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
1028 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
1032 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
1035 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
1038 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
1040 __le16 policy
= cpu_to_le16(opt
);
1042 BT_DBG("%s %x", req
->hdev
->name
, policy
);
1044 /* Default link policy */
1045 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1048 /* Get HCI device by index.
1049 * Device is held on return. */
1050 struct hci_dev
*hci_dev_get(int index
)
1052 struct hci_dev
*hdev
= NULL
, *d
;
1054 BT_DBG("%d", index
);
1059 read_lock(&hci_dev_list_lock
);
1060 list_for_each_entry(d
, &hci_dev_list
, list
) {
1061 if (d
->id
== index
) {
1062 hdev
= hci_dev_hold(d
);
1066 read_unlock(&hci_dev_list_lock
);
1070 /* ---- Inquiry support ---- */
1072 bool hci_discovery_active(struct hci_dev
*hdev
)
1074 struct discovery_state
*discov
= &hdev
->discovery
;
1076 switch (discov
->state
) {
1077 case DISCOVERY_FINDING
:
1078 case DISCOVERY_RESOLVING
:
1086 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1088 int old_state
= hdev
->discovery
.state
;
1090 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1092 if (old_state
== state
)
1095 hdev
->discovery
.state
= state
;
1098 case DISCOVERY_STOPPED
:
1099 hci_update_background_scan(hdev
);
1101 if (old_state
!= DISCOVERY_STARTING
)
1102 mgmt_discovering(hdev
, 0);
1104 case DISCOVERY_STARTING
:
1106 case DISCOVERY_FINDING
:
1107 mgmt_discovering(hdev
, 1);
1109 case DISCOVERY_RESOLVING
:
1111 case DISCOVERY_STOPPING
:
1116 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1118 struct discovery_state
*cache
= &hdev
->discovery
;
1119 struct inquiry_entry
*p
, *n
;
1121 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1126 INIT_LIST_HEAD(&cache
->unknown
);
1127 INIT_LIST_HEAD(&cache
->resolve
);
1130 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1133 struct discovery_state
*cache
= &hdev
->discovery
;
1134 struct inquiry_entry
*e
;
1136 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1138 list_for_each_entry(e
, &cache
->all
, all
) {
1139 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1146 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1149 struct discovery_state
*cache
= &hdev
->discovery
;
1150 struct inquiry_entry
*e
;
1152 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1154 list_for_each_entry(e
, &cache
->unknown
, list
) {
1155 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1162 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1166 struct discovery_state
*cache
= &hdev
->discovery
;
1167 struct inquiry_entry
*e
;
1169 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1171 list_for_each_entry(e
, &cache
->resolve
, list
) {
1172 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1174 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1181 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1182 struct inquiry_entry
*ie
)
1184 struct discovery_state
*cache
= &hdev
->discovery
;
1185 struct list_head
*pos
= &cache
->resolve
;
1186 struct inquiry_entry
*p
;
1188 list_del(&ie
->list
);
1190 list_for_each_entry(p
, &cache
->resolve
, list
) {
1191 if (p
->name_state
!= NAME_PENDING
&&
1192 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1197 list_add(&ie
->list
, pos
);
1200 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1203 struct discovery_state
*cache
= &hdev
->discovery
;
1204 struct inquiry_entry
*ie
;
1207 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1209 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1211 if (!data
->ssp_mode
)
1212 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1214 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1216 if (!ie
->data
.ssp_mode
)
1217 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1219 if (ie
->name_state
== NAME_NEEDED
&&
1220 data
->rssi
!= ie
->data
.rssi
) {
1221 ie
->data
.rssi
= data
->rssi
;
1222 hci_inquiry_cache_update_resolve(hdev
, ie
);
1228 /* Entry not in the cache. Add new one. */
1229 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1231 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1235 list_add(&ie
->all
, &cache
->all
);
1238 ie
->name_state
= NAME_KNOWN
;
1240 ie
->name_state
= NAME_NOT_KNOWN
;
1241 list_add(&ie
->list
, &cache
->unknown
);
1245 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1246 ie
->name_state
!= NAME_PENDING
) {
1247 ie
->name_state
= NAME_KNOWN
;
1248 list_del(&ie
->list
);
1251 memcpy(&ie
->data
, data
, sizeof(*data
));
1252 ie
->timestamp
= jiffies
;
1253 cache
->timestamp
= jiffies
;
1255 if (ie
->name_state
== NAME_NOT_KNOWN
)
1256 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1262 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1264 struct discovery_state
*cache
= &hdev
->discovery
;
1265 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1266 struct inquiry_entry
*e
;
1269 list_for_each_entry(e
, &cache
->all
, all
) {
1270 struct inquiry_data
*data
= &e
->data
;
1275 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1276 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1277 info
->pscan_period_mode
= data
->pscan_period_mode
;
1278 info
->pscan_mode
= data
->pscan_mode
;
1279 memcpy(info
->dev_class
, data
->dev_class
, 3);
1280 info
->clock_offset
= data
->clock_offset
;
1286 BT_DBG("cache %p, copied %d", cache
, copied
);
1290 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1292 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1293 struct hci_dev
*hdev
= req
->hdev
;
1294 struct hci_cp_inquiry cp
;
1296 BT_DBG("%s", hdev
->name
);
1298 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1302 memcpy(&cp
.lap
, &ir
->lap
, 3);
1303 cp
.length
= ir
->length
;
1304 cp
.num_rsp
= ir
->num_rsp
;
1305 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1308 int hci_inquiry(void __user
*arg
)
1310 __u8 __user
*ptr
= arg
;
1311 struct hci_inquiry_req ir
;
1312 struct hci_dev
*hdev
;
1313 int err
= 0, do_inquiry
= 0, max_rsp
;
1317 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1320 hdev
= hci_dev_get(ir
.dev_id
);
1324 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1329 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1334 if (hdev
->dev_type
!= HCI_BREDR
) {
1339 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1345 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1346 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1347 hci_inquiry_cache_flush(hdev
);
1350 hci_dev_unlock(hdev
);
1352 timeo
= ir
.length
* msecs_to_jiffies(2000);
1355 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1360 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361 * cleared). If it is interrupted by a signal, return -EINTR.
1363 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1364 TASK_INTERRUPTIBLE
))
1368 /* for unlimited number of responses we will use buffer with
1371 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374 * copy it to the user space.
1376 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1383 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1384 hci_dev_unlock(hdev
);
1386 BT_DBG("num_rsp %d", ir
.num_rsp
);
1388 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1390 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1403 static int hci_dev_do_open(struct hci_dev
*hdev
)
1407 BT_DBG("%s %p", hdev
->name
, hdev
);
1411 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1416 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1417 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1418 /* Check for rfkill but allow the HCI setup stage to
1419 * proceed (which in itself doesn't cause any RF activity).
1421 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1426 /* Check for valid public address or a configured static
1427 * random adddress, but let the HCI setup proceed to
1428 * be able to determine if there is a public address
1431 * In case of user channel usage, it is not important
1432 * if a public address or static random address is
1435 * This check is only valid for BR/EDR controllers
1436 * since AMP controllers do not have an address.
1438 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1439 hdev
->dev_type
== HCI_BREDR
&&
1440 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1441 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1442 ret
= -EADDRNOTAVAIL
;
1447 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1452 if (hdev
->open(hdev
)) {
1457 set_bit(HCI_RUNNING
, &hdev
->flags
);
1458 hci_notify(hdev
, HCI_DEV_OPEN
);
1460 atomic_set(&hdev
->cmd_cnt
, 1);
1461 set_bit(HCI_INIT
, &hdev
->flags
);
1463 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
1464 hci_sock_dev_event(hdev
, HCI_DEV_SETUP
);
1467 ret
= hdev
->setup(hdev
);
1469 /* The transport driver can set these quirks before
1470 * creating the HCI device or in its setup callback.
1472 * In case any of them is set, the controller has to
1473 * start up as unconfigured.
1475 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1476 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1477 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1479 /* For an unconfigured controller it is required to
1480 * read at least the version information provided by
1481 * the Read Local Version Information command.
1483 * If the set_bdaddr driver callback is provided, then
1484 * also the original Bluetooth public device address
1485 * will be read using the Read BD Address command.
1487 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1488 ret
= __hci_unconf_init(hdev
);
1491 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1492 /* If public address change is configured, ensure that
1493 * the address gets programmed. If the driver does not
1494 * support changing the public address, fail the power
1497 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1499 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1501 ret
= -EADDRNOTAVAIL
;
1505 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1506 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
1507 ret
= __hci_init(hdev
);
1510 /* If the HCI Reset command is clearing all diagnostic settings,
1511 * then they need to be reprogrammed after the init procedure
1514 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
1515 hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) && hdev
->set_diag
)
1516 ret
= hdev
->set_diag(hdev
, true);
1518 clear_bit(HCI_INIT
, &hdev
->flags
);
1522 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1523 set_bit(HCI_UP
, &hdev
->flags
);
1524 hci_notify(hdev
, HCI_DEV_UP
);
1525 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1526 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1527 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1528 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1529 hdev
->dev_type
== HCI_BREDR
) {
1531 mgmt_powered(hdev
, 1);
1532 hci_dev_unlock(hdev
);
1535 /* Init failed, cleanup */
1536 flush_work(&hdev
->tx_work
);
1537 flush_work(&hdev
->cmd_work
);
1538 flush_work(&hdev
->rx_work
);
1540 skb_queue_purge(&hdev
->cmd_q
);
1541 skb_queue_purge(&hdev
->rx_q
);
1546 if (hdev
->sent_cmd
) {
1547 kfree_skb(hdev
->sent_cmd
);
1548 hdev
->sent_cmd
= NULL
;
1551 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1552 hci_notify(hdev
, HCI_DEV_CLOSE
);
1555 hdev
->flags
&= BIT(HCI_RAW
);
1559 hci_req_unlock(hdev
);
1563 /* ---- HCI ioctl helpers ---- */
1565 int hci_dev_open(__u16 dev
)
1567 struct hci_dev
*hdev
;
1570 hdev
= hci_dev_get(dev
);
1574 /* Devices that are marked as unconfigured can only be powered
1575 * up as user channel. Trying to bring them up as normal devices
1576 * will result into a failure. Only user channel operation is
1579 * When this function is called for a user channel, the flag
1580 * HCI_USER_CHANNEL will be set first before attempting to
1583 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1584 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1589 /* We need to ensure that no other power on/off work is pending
1590 * before proceeding to call hci_dev_do_open. This is
1591 * particularly important if the setup procedure has not yet
1594 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1595 cancel_delayed_work(&hdev
->power_off
);
1597 /* After this call it is guaranteed that the setup procedure
1598 * has finished. This means that error conditions like RFKILL
1599 * or no valid public or static random address apply.
1601 flush_workqueue(hdev
->req_workqueue
);
1603 /* For controllers not using the management interface and that
1604 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1605 * so that pairing works for them. Once the management interface
1606 * is in use this bit will be cleared again and userspace has
1607 * to explicitly enable it.
1609 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1610 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1611 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1613 err
= hci_dev_do_open(hdev
);
1620 /* This function requires the caller holds hdev->lock */
1621 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1623 struct hci_conn_params
*p
;
1625 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1627 hci_conn_drop(p
->conn
);
1628 hci_conn_put(p
->conn
);
1631 list_del_init(&p
->action
);
1634 BT_DBG("All LE pending actions cleared");
1637 int hci_dev_do_close(struct hci_dev
*hdev
)
1641 BT_DBG("%s %p", hdev
->name
, hdev
);
1643 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1644 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1645 test_bit(HCI_UP
, &hdev
->flags
)) {
1646 /* Execute vendor specific shutdown routine */
1648 hdev
->shutdown(hdev
);
1651 cancel_delayed_work(&hdev
->power_off
);
1653 hci_req_cancel(hdev
, ENODEV
);
1656 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1657 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1658 hci_req_unlock(hdev
);
1662 /* Flush RX and TX works */
1663 flush_work(&hdev
->tx_work
);
1664 flush_work(&hdev
->rx_work
);
1666 if (hdev
->discov_timeout
> 0) {
1667 cancel_delayed_work(&hdev
->discov_off
);
1668 hdev
->discov_timeout
= 0;
1669 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1670 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1673 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1674 cancel_delayed_work(&hdev
->service_cache
);
1676 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1677 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
1679 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1680 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1682 if (hdev
->adv_instance_timeout
) {
1683 cancel_delayed_work_sync(&hdev
->adv_instance_expire
);
1684 hdev
->adv_instance_timeout
= 0;
1687 /* Avoid potential lockdep warnings from the *_flush() calls by
1688 * ensuring the workqueue is empty up front.
1690 drain_workqueue(hdev
->workqueue
);
1694 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1696 auto_off
= hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
);
1698 if (!auto_off
&& hdev
->dev_type
== HCI_BREDR
)
1699 mgmt_powered(hdev
, 0);
1701 hci_inquiry_cache_flush(hdev
);
1702 hci_pend_le_actions_clear(hdev
);
1703 hci_conn_hash_flush(hdev
);
1704 hci_dev_unlock(hdev
);
1706 smp_unregister(hdev
);
1708 hci_notify(hdev
, HCI_DEV_DOWN
);
1714 skb_queue_purge(&hdev
->cmd_q
);
1715 atomic_set(&hdev
->cmd_cnt
, 1);
1716 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
) &&
1717 !auto_off
&& !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1718 set_bit(HCI_INIT
, &hdev
->flags
);
1719 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1720 clear_bit(HCI_INIT
, &hdev
->flags
);
1723 /* flush cmd work */
1724 flush_work(&hdev
->cmd_work
);
1727 skb_queue_purge(&hdev
->rx_q
);
1728 skb_queue_purge(&hdev
->cmd_q
);
1729 skb_queue_purge(&hdev
->raw_q
);
1731 /* Drop last sent command */
1732 if (hdev
->sent_cmd
) {
1733 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1734 kfree_skb(hdev
->sent_cmd
);
1735 hdev
->sent_cmd
= NULL
;
1738 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1739 hci_notify(hdev
, HCI_DEV_CLOSE
);
1741 /* After this point our queues are empty
1742 * and no tasks are scheduled. */
1746 hdev
->flags
&= BIT(HCI_RAW
);
1747 hci_dev_clear_volatile_flags(hdev
);
1749 /* Controller radio is available but is currently powered down */
1750 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1752 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1753 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1754 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1756 hci_req_unlock(hdev
);
1762 int hci_dev_close(__u16 dev
)
1764 struct hci_dev
*hdev
;
1767 hdev
= hci_dev_get(dev
);
1771 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1776 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1777 cancel_delayed_work(&hdev
->power_off
);
1779 err
= hci_dev_do_close(hdev
);
1786 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1790 BT_DBG("%s %p", hdev
->name
, hdev
);
1795 skb_queue_purge(&hdev
->rx_q
);
1796 skb_queue_purge(&hdev
->cmd_q
);
1798 /* Avoid potential lockdep warnings from the *_flush() calls by
1799 * ensuring the workqueue is empty up front.
1801 drain_workqueue(hdev
->workqueue
);
1804 hci_inquiry_cache_flush(hdev
);
1805 hci_conn_hash_flush(hdev
);
1806 hci_dev_unlock(hdev
);
1811 atomic_set(&hdev
->cmd_cnt
, 1);
1812 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1814 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
1816 hci_req_unlock(hdev
);
1820 int hci_dev_reset(__u16 dev
)
1822 struct hci_dev
*hdev
;
1825 hdev
= hci_dev_get(dev
);
1829 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1834 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1839 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1844 err
= hci_dev_do_reset(hdev
);
1851 int hci_dev_reset_stat(__u16 dev
)
1853 struct hci_dev
*hdev
;
1856 hdev
= hci_dev_get(dev
);
1860 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1865 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1870 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1877 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1879 bool conn_changed
, discov_changed
;
1881 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1883 if ((scan
& SCAN_PAGE
))
1884 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1887 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1890 if ((scan
& SCAN_INQUIRY
)) {
1891 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1894 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1895 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1899 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1902 if (conn_changed
|| discov_changed
) {
1903 /* In case this was disabled through mgmt */
1904 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1906 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1907 mgmt_update_adv_data(hdev
);
1909 mgmt_new_settings(hdev
);
1913 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1915 struct hci_dev
*hdev
;
1916 struct hci_dev_req dr
;
1919 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1922 hdev
= hci_dev_get(dr
.dev_id
);
1926 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1931 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1936 if (hdev
->dev_type
!= HCI_BREDR
) {
1941 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1948 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1953 if (!lmp_encrypt_capable(hdev
)) {
1958 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1959 /* Auth must be enabled first */
1960 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1966 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1971 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1974 /* Ensure that the connectable and discoverable states
1975 * get correctly modified as this was a non-mgmt change.
1978 hci_update_scan_state(hdev
, dr
.dev_opt
);
1982 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1986 case HCISETLINKMODE
:
1987 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1988 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1992 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1996 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1997 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2001 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2002 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2015 int hci_get_dev_list(void __user
*arg
)
2017 struct hci_dev
*hdev
;
2018 struct hci_dev_list_req
*dl
;
2019 struct hci_dev_req
*dr
;
2020 int n
= 0, size
, err
;
2023 if (get_user(dev_num
, (__u16 __user
*) arg
))
2026 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
2029 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
2031 dl
= kzalloc(size
, GFP_KERNEL
);
2037 read_lock(&hci_dev_list_lock
);
2038 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2039 unsigned long flags
= hdev
->flags
;
2041 /* When the auto-off is configured it means the transport
2042 * is running, but in that case still indicate that the
2043 * device is actually down.
2045 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2046 flags
&= ~BIT(HCI_UP
);
2048 (dr
+ n
)->dev_id
= hdev
->id
;
2049 (dr
+ n
)->dev_opt
= flags
;
2054 read_unlock(&hci_dev_list_lock
);
2057 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2059 err
= copy_to_user(arg
, dl
, size
);
2062 return err
? -EFAULT
: 0;
2065 int hci_get_dev_info(void __user
*arg
)
2067 struct hci_dev
*hdev
;
2068 struct hci_dev_info di
;
2069 unsigned long flags
;
2072 if (copy_from_user(&di
, arg
, sizeof(di
)))
2075 hdev
= hci_dev_get(di
.dev_id
);
2079 /* When the auto-off is configured it means the transport
2080 * is running, but in that case still indicate that the
2081 * device is actually down.
2083 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2084 flags
= hdev
->flags
& ~BIT(HCI_UP
);
2086 flags
= hdev
->flags
;
2088 strcpy(di
.name
, hdev
->name
);
2089 di
.bdaddr
= hdev
->bdaddr
;
2090 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2092 di
.pkt_type
= hdev
->pkt_type
;
2093 if (lmp_bredr_capable(hdev
)) {
2094 di
.acl_mtu
= hdev
->acl_mtu
;
2095 di
.acl_pkts
= hdev
->acl_pkts
;
2096 di
.sco_mtu
= hdev
->sco_mtu
;
2097 di
.sco_pkts
= hdev
->sco_pkts
;
2099 di
.acl_mtu
= hdev
->le_mtu
;
2100 di
.acl_pkts
= hdev
->le_pkts
;
2104 di
.link_policy
= hdev
->link_policy
;
2105 di
.link_mode
= hdev
->link_mode
;
2107 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2108 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2110 if (copy_to_user(arg
, &di
, sizeof(di
)))
2118 /* ---- Interface to HCI drivers ---- */
2120 static int hci_rfkill_set_block(void *data
, bool blocked
)
2122 struct hci_dev
*hdev
= data
;
2124 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2126 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
2130 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
2131 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
2132 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
2133 hci_dev_do_close(hdev
);
2135 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2141 static const struct rfkill_ops hci_rfkill_ops
= {
2142 .set_block
= hci_rfkill_set_block
,
2145 static void hci_power_on(struct work_struct
*work
)
2147 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2150 BT_DBG("%s", hdev
->name
);
2152 err
= hci_dev_do_open(hdev
);
2155 mgmt_set_powered_failed(hdev
, err
);
2156 hci_dev_unlock(hdev
);
2160 /* During the HCI setup phase, a few error conditions are
2161 * ignored and they need to be checked now. If they are still
2162 * valid, it is important to turn the device back off.
2164 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2165 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2166 (hdev
->dev_type
== HCI_BREDR
&&
2167 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2168 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2169 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2170 hci_dev_do_close(hdev
);
2171 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2172 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2173 HCI_AUTO_OFF_TIMEOUT
);
2176 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2177 /* For unconfigured devices, set the HCI_RAW flag
2178 * so that userspace can easily identify them.
2180 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2181 set_bit(HCI_RAW
, &hdev
->flags
);
2183 /* For fully configured devices, this will send
2184 * the Index Added event. For unconfigured devices,
2185 * it will send Unconfigued Index Added event.
2187 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2188 * and no event will be send.
2190 mgmt_index_added(hdev
);
2191 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2192 /* When the controller is now configured, then it
2193 * is important to clear the HCI_RAW flag.
2195 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2196 clear_bit(HCI_RAW
, &hdev
->flags
);
2198 /* Powering on the controller with HCI_CONFIG set only
2199 * happens with the transition from unconfigured to
2200 * configured. This will send the Index Added event.
2202 mgmt_index_added(hdev
);
2206 static void hci_power_off(struct work_struct
*work
)
2208 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2211 BT_DBG("%s", hdev
->name
);
2213 hci_dev_do_close(hdev
);
2216 static void hci_error_reset(struct work_struct
*work
)
2218 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2220 BT_DBG("%s", hdev
->name
);
2223 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2225 BT_ERR("%s hardware error 0x%2.2x", hdev
->name
,
2226 hdev
->hw_error_code
);
2228 if (hci_dev_do_close(hdev
))
2231 hci_dev_do_open(hdev
);
2234 static void hci_discov_off(struct work_struct
*work
)
2236 struct hci_dev
*hdev
;
2238 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
2240 BT_DBG("%s", hdev
->name
);
2242 mgmt_discoverable_timeout(hdev
);
2245 static void hci_adv_timeout_expire(struct work_struct
*work
)
2247 struct hci_dev
*hdev
;
2249 hdev
= container_of(work
, struct hci_dev
, adv_instance_expire
.work
);
2251 BT_DBG("%s", hdev
->name
);
2253 mgmt_adv_timeout_expired(hdev
);
2256 void hci_uuids_clear(struct hci_dev
*hdev
)
2258 struct bt_uuid
*uuid
, *tmp
;
2260 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2261 list_del(&uuid
->list
);
2266 void hci_link_keys_clear(struct hci_dev
*hdev
)
2268 struct link_key
*key
;
2270 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2271 list_del_rcu(&key
->list
);
2272 kfree_rcu(key
, rcu
);
2276 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2280 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2281 list_del_rcu(&k
->list
);
2286 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2290 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2291 list_del_rcu(&k
->list
);
2296 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2301 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2302 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2312 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2313 u8 key_type
, u8 old_key_type
)
2316 if (key_type
< 0x03)
2319 /* Debug keys are insecure so don't store them persistently */
2320 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2323 /* Changed combination key and there's no previous one */
2324 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2327 /* Security mode 3 case */
2331 /* BR/EDR key derived using SC from an LE link */
2332 if (conn
->type
== LE_LINK
)
2335 /* Neither local nor remote side had no-bonding as requirement */
2336 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2339 /* Local side had dedicated bonding as requirement */
2340 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2343 /* Remote side had dedicated bonding as requirement */
2344 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2347 /* If none of the above criteria match, then don't store the key
2352 static u8
ltk_role(u8 type
)
2354 if (type
== SMP_LTK
)
2355 return HCI_ROLE_MASTER
;
2357 return HCI_ROLE_SLAVE
;
2360 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2361 u8 addr_type
, u8 role
)
2366 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2367 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2370 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2380 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2382 struct smp_irk
*irk
;
2385 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2386 if (!bacmp(&irk
->rpa
, rpa
)) {
2392 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2393 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2394 bacpy(&irk
->rpa
, rpa
);
2404 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2407 struct smp_irk
*irk
;
2409 /* Identity Address must be public or static random */
2410 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2414 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2415 if (addr_type
== irk
->addr_type
&&
2416 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2426 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2427 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2428 u8 pin_len
, bool *persistent
)
2430 struct link_key
*key
, *old_key
;
2433 old_key
= hci_find_link_key(hdev
, bdaddr
);
2435 old_key_type
= old_key
->type
;
2438 old_key_type
= conn
? conn
->key_type
: 0xff;
2439 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2442 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2445 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2447 /* Some buggy controller combinations generate a changed
2448 * combination key for legacy pairing even when there's no
2450 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2451 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2452 type
= HCI_LK_COMBINATION
;
2454 conn
->key_type
= type
;
2457 bacpy(&key
->bdaddr
, bdaddr
);
2458 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2459 key
->pin_len
= pin_len
;
2461 if (type
== HCI_LK_CHANGED_COMBINATION
)
2462 key
->type
= old_key_type
;
2467 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2473 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2474 u8 addr_type
, u8 type
, u8 authenticated
,
2475 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2477 struct smp_ltk
*key
, *old_key
;
2478 u8 role
= ltk_role(type
);
2480 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2484 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2487 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2490 bacpy(&key
->bdaddr
, bdaddr
);
2491 key
->bdaddr_type
= addr_type
;
2492 memcpy(key
->val
, tk
, sizeof(key
->val
));
2493 key
->authenticated
= authenticated
;
2496 key
->enc_size
= enc_size
;
2502 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2503 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2505 struct smp_irk
*irk
;
2507 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2509 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2513 bacpy(&irk
->bdaddr
, bdaddr
);
2514 irk
->addr_type
= addr_type
;
2516 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2519 memcpy(irk
->val
, val
, 16);
2520 bacpy(&irk
->rpa
, rpa
);
2525 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2527 struct link_key
*key
;
2529 key
= hci_find_link_key(hdev
, bdaddr
);
2533 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2535 list_del_rcu(&key
->list
);
2536 kfree_rcu(key
, rcu
);
2541 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2546 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2547 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2550 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2552 list_del_rcu(&k
->list
);
2557 return removed
? 0 : -ENOENT
;
2560 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2564 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2565 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2568 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2570 list_del_rcu(&k
->list
);
2575 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2578 struct smp_irk
*irk
;
2581 if (type
== BDADDR_BREDR
) {
2582 if (hci_find_link_key(hdev
, bdaddr
))
2587 /* Convert to HCI addr type which struct smp_ltk uses */
2588 if (type
== BDADDR_LE_PUBLIC
)
2589 addr_type
= ADDR_LE_DEV_PUBLIC
;
2591 addr_type
= ADDR_LE_DEV_RANDOM
;
2593 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2595 bdaddr
= &irk
->bdaddr
;
2596 addr_type
= irk
->addr_type
;
2600 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2601 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2611 /* HCI command timer function */
2612 static void hci_cmd_timeout(struct work_struct
*work
)
2614 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2617 if (hdev
->sent_cmd
) {
2618 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2619 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2621 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2623 BT_ERR("%s command tx timeout", hdev
->name
);
2626 atomic_set(&hdev
->cmd_cnt
, 1);
2627 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2630 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2631 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2633 struct oob_data
*data
;
2635 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2636 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2638 if (data
->bdaddr_type
!= bdaddr_type
)
2646 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2649 struct oob_data
*data
;
2651 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2655 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2657 list_del(&data
->list
);
2663 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2665 struct oob_data
*data
, *n
;
2667 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2668 list_del(&data
->list
);
2673 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2674 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2675 u8
*hash256
, u8
*rand256
)
2677 struct oob_data
*data
;
2679 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2681 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2685 bacpy(&data
->bdaddr
, bdaddr
);
2686 data
->bdaddr_type
= bdaddr_type
;
2687 list_add(&data
->list
, &hdev
->remote_oob_data
);
2690 if (hash192
&& rand192
) {
2691 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2692 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2693 if (hash256
&& rand256
)
2694 data
->present
= 0x03;
2696 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2697 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2698 if (hash256
&& rand256
)
2699 data
->present
= 0x02;
2701 data
->present
= 0x00;
2704 if (hash256
&& rand256
) {
2705 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2706 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2708 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2709 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2710 if (hash192
&& rand192
)
2711 data
->present
= 0x01;
2714 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2719 /* This function requires the caller holds hdev->lock */
2720 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2722 struct adv_info
*adv_instance
;
2724 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
2725 if (adv_instance
->instance
== instance
)
2726 return adv_instance
;
2732 /* This function requires the caller holds hdev->lock */
2733 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
) {
2734 struct adv_info
*cur_instance
;
2736 cur_instance
= hci_find_adv_instance(hdev
, instance
);
2740 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
2741 struct adv_info
, list
))
2742 return list_first_entry(&hdev
->adv_instances
,
2743 struct adv_info
, list
);
2745 return list_next_entry(cur_instance
, list
);
2748 /* This function requires the caller holds hdev->lock */
2749 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2751 struct adv_info
*adv_instance
;
2753 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2757 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
2759 if (hdev
->cur_adv_instance
== instance
&& hdev
->adv_instance_timeout
) {
2760 cancel_delayed_work(&hdev
->adv_instance_expire
);
2761 hdev
->adv_instance_timeout
= 0;
2764 list_del(&adv_instance
->list
);
2765 kfree(adv_instance
);
2767 hdev
->adv_instance_cnt
--;
2772 /* This function requires the caller holds hdev->lock */
2773 void hci_adv_instances_clear(struct hci_dev
*hdev
)
2775 struct adv_info
*adv_instance
, *n
;
2777 if (hdev
->adv_instance_timeout
) {
2778 cancel_delayed_work(&hdev
->adv_instance_expire
);
2779 hdev
->adv_instance_timeout
= 0;
2782 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
2783 list_del(&adv_instance
->list
);
2784 kfree(adv_instance
);
2787 hdev
->adv_instance_cnt
= 0;
2790 /* This function requires the caller holds hdev->lock */
2791 int hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
, u32 flags
,
2792 u16 adv_data_len
, u8
*adv_data
,
2793 u16 scan_rsp_len
, u8
*scan_rsp_data
,
2794 u16 timeout
, u16 duration
)
2796 struct adv_info
*adv_instance
;
2798 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2800 memset(adv_instance
->adv_data
, 0,
2801 sizeof(adv_instance
->adv_data
));
2802 memset(adv_instance
->scan_rsp_data
, 0,
2803 sizeof(adv_instance
->scan_rsp_data
));
2805 if (hdev
->adv_instance_cnt
>= HCI_MAX_ADV_INSTANCES
||
2806 instance
< 1 || instance
> HCI_MAX_ADV_INSTANCES
)
2809 adv_instance
= kzalloc(sizeof(*adv_instance
), GFP_KERNEL
);
2813 adv_instance
->pending
= true;
2814 adv_instance
->instance
= instance
;
2815 list_add(&adv_instance
->list
, &hdev
->adv_instances
);
2816 hdev
->adv_instance_cnt
++;
2819 adv_instance
->flags
= flags
;
2820 adv_instance
->adv_data_len
= adv_data_len
;
2821 adv_instance
->scan_rsp_len
= scan_rsp_len
;
2824 memcpy(adv_instance
->adv_data
, adv_data
, adv_data_len
);
2827 memcpy(adv_instance
->scan_rsp_data
,
2828 scan_rsp_data
, scan_rsp_len
);
2830 adv_instance
->timeout
= timeout
;
2831 adv_instance
->remaining_time
= timeout
;
2834 adv_instance
->duration
= HCI_DEFAULT_ADV_DURATION
;
2836 adv_instance
->duration
= duration
;
2838 BT_DBG("%s for %dMR", hdev
->name
, instance
);
2843 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2844 bdaddr_t
*bdaddr
, u8 type
)
2846 struct bdaddr_list
*b
;
2848 list_for_each_entry(b
, bdaddr_list
, list
) {
2849 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2856 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2858 struct list_head
*p
, *n
;
2860 list_for_each_safe(p
, n
, bdaddr_list
) {
2861 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
2868 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2870 struct bdaddr_list
*entry
;
2872 if (!bacmp(bdaddr
, BDADDR_ANY
))
2875 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2878 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2882 bacpy(&entry
->bdaddr
, bdaddr
);
2883 entry
->bdaddr_type
= type
;
2885 list_add(&entry
->list
, list
);
2890 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2892 struct bdaddr_list
*entry
;
2894 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2895 hci_bdaddr_list_clear(list
);
2899 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2903 list_del(&entry
->list
);
2909 /* This function requires the caller holds hdev->lock */
2910 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2911 bdaddr_t
*addr
, u8 addr_type
)
2913 struct hci_conn_params
*params
;
2915 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2916 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2917 params
->addr_type
== addr_type
) {
2925 /* This function requires the caller holds hdev->lock */
2926 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2927 bdaddr_t
*addr
, u8 addr_type
)
2929 struct hci_conn_params
*param
;
2931 list_for_each_entry(param
, list
, action
) {
2932 if (bacmp(¶m
->addr
, addr
) == 0 &&
2933 param
->addr_type
== addr_type
)
2940 /* This function requires the caller holds hdev->lock */
2941 struct hci_conn_params
*hci_explicit_connect_lookup(struct hci_dev
*hdev
,
2945 struct hci_conn_params
*param
;
2947 list_for_each_entry(param
, &hdev
->pend_le_conns
, action
) {
2948 if (bacmp(¶m
->addr
, addr
) == 0 &&
2949 param
->addr_type
== addr_type
&&
2950 param
->explicit_connect
)
2957 /* This function requires the caller holds hdev->lock */
2958 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2959 bdaddr_t
*addr
, u8 addr_type
)
2961 struct hci_conn_params
*params
;
2963 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2967 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2969 BT_ERR("Out of memory");
2973 bacpy(¶ms
->addr
, addr
);
2974 params
->addr_type
= addr_type
;
2976 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2977 INIT_LIST_HEAD(¶ms
->action
);
2979 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2980 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2981 params
->conn_latency
= hdev
->le_conn_latency
;
2982 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2983 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2985 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2990 static void hci_conn_params_free(struct hci_conn_params
*params
)
2993 hci_conn_drop(params
->conn
);
2994 hci_conn_put(params
->conn
);
2997 list_del(¶ms
->action
);
2998 list_del(¶ms
->list
);
3002 /* This function requires the caller holds hdev->lock */
3003 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
3005 struct hci_conn_params
*params
;
3007 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
3011 hci_conn_params_free(params
);
3013 hci_update_background_scan(hdev
);
3015 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3018 /* This function requires the caller holds hdev->lock */
3019 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
3021 struct hci_conn_params
*params
, *tmp
;
3023 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
3024 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
3027 /* If trying to estabilish one time connection to disabled
3028 * device, leave the params, but mark them as just once.
3030 if (params
->explicit_connect
) {
3031 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
3035 list_del(¶ms
->list
);
3039 BT_DBG("All LE disabled connection parameters were removed");
3042 /* This function requires the caller holds hdev->lock */
3043 void hci_conn_params_clear_all(struct hci_dev
*hdev
)
3045 struct hci_conn_params
*params
, *tmp
;
3047 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
3048 hci_conn_params_free(params
);
3050 hci_update_background_scan(hdev
);
3052 BT_DBG("All LE connection parameters were removed");
3055 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3058 BT_ERR("Failed to start inquiry: status %d", status
);
3061 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3062 hci_dev_unlock(hdev
);
3067 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
,
3070 /* General inquiry access code (GIAC) */
3071 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3072 struct hci_cp_inquiry cp
;
3076 BT_ERR("Failed to disable LE scanning: status %d", status
);
3080 hdev
->discovery
.scan_start
= 0;
3082 switch (hdev
->discovery
.type
) {
3083 case DISCOV_TYPE_LE
:
3085 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3086 hci_dev_unlock(hdev
);
3089 case DISCOV_TYPE_INTERLEAVED
:
3092 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
3094 /* If we were running LE only scan, change discovery
3095 * state. If we were running both LE and BR/EDR inquiry
3096 * simultaneously, and BR/EDR inquiry is already
3097 * finished, stop discovery, otherwise BR/EDR inquiry
3098 * will stop discovery when finished. If we will resolve
3099 * remote device name, do not change discovery state.
3101 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
3102 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
3103 hci_discovery_set_state(hdev
,
3106 struct hci_request req
;
3108 hci_inquiry_cache_flush(hdev
);
3110 hci_req_init(&req
, hdev
);
3112 memset(&cp
, 0, sizeof(cp
));
3113 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
3114 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
3115 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
3117 err
= hci_req_run(&req
, inquiry_complete
);
3119 BT_ERR("Inquiry request failed: err %d", err
);
3120 hci_discovery_set_state(hdev
,
3125 hci_dev_unlock(hdev
);
3130 static void le_scan_disable_work(struct work_struct
*work
)
3132 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3133 le_scan_disable
.work
);
3134 struct hci_request req
;
3137 BT_DBG("%s", hdev
->name
);
3139 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
3141 hci_req_init(&req
, hdev
);
3143 hci_req_add_le_scan_disable(&req
);
3145 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
3147 BT_ERR("Disable LE scanning request failed: err %d", err
);
3150 static void le_scan_restart_work_complete(struct hci_dev
*hdev
, u8 status
,
3153 unsigned long timeout
, duration
, scan_start
, now
;
3155 BT_DBG("%s", hdev
->name
);
3158 BT_ERR("Failed to restart LE scan: status %d", status
);
3162 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
3163 !hdev
->discovery
.scan_start
)
3166 /* When the scan was started, hdev->le_scan_disable has been queued
3167 * after duration from scan_start. During scan restart this job
3168 * has been canceled, and we need to queue it again after proper
3169 * timeout, to make sure that scan does not run indefinitely.
3171 duration
= hdev
->discovery
.scan_duration
;
3172 scan_start
= hdev
->discovery
.scan_start
;
3174 if (now
- scan_start
<= duration
) {
3177 if (now
>= scan_start
)
3178 elapsed
= now
- scan_start
;
3180 elapsed
= ULONG_MAX
- scan_start
+ now
;
3182 timeout
= duration
- elapsed
;
3186 queue_delayed_work(hdev
->workqueue
,
3187 &hdev
->le_scan_disable
, timeout
);
3190 static void le_scan_restart_work(struct work_struct
*work
)
3192 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3193 le_scan_restart
.work
);
3194 struct hci_request req
;
3195 struct hci_cp_le_set_scan_enable cp
;
3198 BT_DBG("%s", hdev
->name
);
3200 /* If controller is not scanning we are done. */
3201 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
3204 hci_req_init(&req
, hdev
);
3206 hci_req_add_le_scan_disable(&req
);
3208 memset(&cp
, 0, sizeof(cp
));
3209 cp
.enable
= LE_SCAN_ENABLE
;
3210 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3211 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
3213 err
= hci_req_run(&req
, le_scan_restart_work_complete
);
3215 BT_ERR("Restart LE scan request failed: err %d", err
);
3218 /* Copy the Identity Address of the controller.
3220 * If the controller has a public BD_ADDR, then by default use that one.
3221 * If this is a LE only controller without a public address, default to
3222 * the static random address.
3224 * For debugging purposes it is possible to force controllers with a
3225 * public address to use the static random address instead.
3227 * In case BR/EDR has been disabled on a dual-mode controller and
3228 * userspace has configured a static address, then that address
3229 * becomes the identity address instead of the public BR/EDR address.
3231 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3234 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
3235 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
3236 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
3237 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
3238 bacpy(bdaddr
, &hdev
->static_addr
);
3239 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
3241 bacpy(bdaddr
, &hdev
->bdaddr
);
3242 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
3246 /* Alloc HCI device */
3247 struct hci_dev
*hci_alloc_dev(void)
3249 struct hci_dev
*hdev
;
3251 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
3255 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
3256 hdev
->esco_type
= (ESCO_HV1
);
3257 hdev
->link_mode
= (HCI_LM_ACCEPT
);
3258 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
3259 hdev
->io_capability
= 0x03; /* No Input No Output */
3260 hdev
->manufacturer
= 0xffff; /* Default to internal use */
3261 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
3262 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
3263 hdev
->adv_instance_cnt
= 0;
3264 hdev
->cur_adv_instance
= 0x00;
3265 hdev
->adv_instance_timeout
= 0;
3267 hdev
->sniff_max_interval
= 800;
3268 hdev
->sniff_min_interval
= 80;
3270 hdev
->le_adv_channel_map
= 0x07;
3271 hdev
->le_adv_min_interval
= 0x0800;
3272 hdev
->le_adv_max_interval
= 0x0800;
3273 hdev
->le_scan_interval
= 0x0060;
3274 hdev
->le_scan_window
= 0x0030;
3275 hdev
->le_conn_min_interval
= 0x0028;
3276 hdev
->le_conn_max_interval
= 0x0038;
3277 hdev
->le_conn_latency
= 0x0000;
3278 hdev
->le_supv_timeout
= 0x002a;
3279 hdev
->le_def_tx_len
= 0x001b;
3280 hdev
->le_def_tx_time
= 0x0148;
3281 hdev
->le_max_tx_len
= 0x001b;
3282 hdev
->le_max_tx_time
= 0x0148;
3283 hdev
->le_max_rx_len
= 0x001b;
3284 hdev
->le_max_rx_time
= 0x0148;
3286 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
3287 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
3288 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
3289 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
3291 mutex_init(&hdev
->lock
);
3292 mutex_init(&hdev
->req_lock
);
3294 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
3295 INIT_LIST_HEAD(&hdev
->blacklist
);
3296 INIT_LIST_HEAD(&hdev
->whitelist
);
3297 INIT_LIST_HEAD(&hdev
->uuids
);
3298 INIT_LIST_HEAD(&hdev
->link_keys
);
3299 INIT_LIST_HEAD(&hdev
->long_term_keys
);
3300 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
3301 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
3302 INIT_LIST_HEAD(&hdev
->le_white_list
);
3303 INIT_LIST_HEAD(&hdev
->le_conn_params
);
3304 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
3305 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
3306 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
3307 INIT_LIST_HEAD(&hdev
->adv_instances
);
3309 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
3310 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
3311 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
3312 INIT_WORK(&hdev
->power_on
, hci_power_on
);
3313 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
3315 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
3316 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
3317 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
3318 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
3319 INIT_DELAYED_WORK(&hdev
->adv_instance_expire
, hci_adv_timeout_expire
);
3321 skb_queue_head_init(&hdev
->rx_q
);
3322 skb_queue_head_init(&hdev
->cmd_q
);
3323 skb_queue_head_init(&hdev
->raw_q
);
3325 init_waitqueue_head(&hdev
->req_wait_q
);
3327 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
3329 hci_init_sysfs(hdev
);
3330 discovery_init(hdev
);
3334 EXPORT_SYMBOL(hci_alloc_dev
);
3336 /* Free HCI device */
3337 void hci_free_dev(struct hci_dev
*hdev
)
3339 /* will free via device release */
3340 put_device(&hdev
->dev
);
3342 EXPORT_SYMBOL(hci_free_dev
);
3344 /* Register HCI device */
3345 int hci_register_dev(struct hci_dev
*hdev
)
3349 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3352 /* Do not allow HCI_AMP devices to register at index 0,
3353 * so the index can be used as the AMP controller ID.
3355 switch (hdev
->dev_type
) {
3357 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3360 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3369 sprintf(hdev
->name
, "hci%d", id
);
3372 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3374 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3375 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3376 if (!hdev
->workqueue
) {
3381 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3382 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3383 if (!hdev
->req_workqueue
) {
3384 destroy_workqueue(hdev
->workqueue
);
3389 if (!IS_ERR_OR_NULL(bt_debugfs
))
3390 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3392 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3394 error
= device_add(&hdev
->dev
);
3398 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3399 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3402 if (rfkill_register(hdev
->rfkill
) < 0) {
3403 rfkill_destroy(hdev
->rfkill
);
3404 hdev
->rfkill
= NULL
;
3408 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3409 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3411 hci_dev_set_flag(hdev
, HCI_SETUP
);
3412 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3414 if (hdev
->dev_type
== HCI_BREDR
) {
3415 /* Assume BR/EDR support until proven otherwise (such as
3416 * through reading supported features during init.
3418 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3421 write_lock(&hci_dev_list_lock
);
3422 list_add(&hdev
->list
, &hci_dev_list
);
3423 write_unlock(&hci_dev_list_lock
);
3425 /* Devices that are marked for raw-only usage are unconfigured
3426 * and should not be included in normal operation.
3428 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3429 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3431 hci_notify(hdev
, HCI_DEV_REG
);
3434 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3439 destroy_workqueue(hdev
->workqueue
);
3440 destroy_workqueue(hdev
->req_workqueue
);
3442 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3446 EXPORT_SYMBOL(hci_register_dev
);
3448 /* Unregister HCI device */
3449 void hci_unregister_dev(struct hci_dev
*hdev
)
3453 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3455 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3459 write_lock(&hci_dev_list_lock
);
3460 list_del(&hdev
->list
);
3461 write_unlock(&hci_dev_list_lock
);
3463 hci_dev_do_close(hdev
);
3465 cancel_work_sync(&hdev
->power_on
);
3467 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3468 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3469 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3471 mgmt_index_removed(hdev
);
3472 hci_dev_unlock(hdev
);
3475 /* mgmt_index_removed should take care of emptying the
3477 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3479 hci_notify(hdev
, HCI_DEV_UNREG
);
3482 rfkill_unregister(hdev
->rfkill
);
3483 rfkill_destroy(hdev
->rfkill
);
3486 device_del(&hdev
->dev
);
3488 debugfs_remove_recursive(hdev
->debugfs
);
3490 destroy_workqueue(hdev
->workqueue
);
3491 destroy_workqueue(hdev
->req_workqueue
);
3494 hci_bdaddr_list_clear(&hdev
->blacklist
);
3495 hci_bdaddr_list_clear(&hdev
->whitelist
);
3496 hci_uuids_clear(hdev
);
3497 hci_link_keys_clear(hdev
);
3498 hci_smp_ltks_clear(hdev
);
3499 hci_smp_irks_clear(hdev
);
3500 hci_remote_oob_data_clear(hdev
);
3501 hci_adv_instances_clear(hdev
);
3502 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3503 hci_conn_params_clear_all(hdev
);
3504 hci_discovery_filter_clear(hdev
);
3505 hci_dev_unlock(hdev
);
3509 ida_simple_remove(&hci_index_ida
, id
);
3511 EXPORT_SYMBOL(hci_unregister_dev
);
3513 /* Suspend HCI device */
3514 int hci_suspend_dev(struct hci_dev
*hdev
)
3516 hci_notify(hdev
, HCI_DEV_SUSPEND
);
3519 EXPORT_SYMBOL(hci_suspend_dev
);
3521 /* Resume HCI device */
3522 int hci_resume_dev(struct hci_dev
*hdev
)
3524 hci_notify(hdev
, HCI_DEV_RESUME
);
3527 EXPORT_SYMBOL(hci_resume_dev
);
3529 /* Reset HCI device */
3530 int hci_reset_dev(struct hci_dev
*hdev
)
3532 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3533 struct sk_buff
*skb
;
3535 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3539 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
3540 memcpy(skb_put(skb
, 3), hw_err
, 3);
3542 /* Send Hardware Error to upper stack */
3543 return hci_recv_frame(hdev
, skb
);
3545 EXPORT_SYMBOL(hci_reset_dev
);
3547 /* Receive frame from HCI drivers */
3548 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3550 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3551 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3556 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
3557 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
3558 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
3564 bt_cb(skb
)->incoming
= 1;
3567 __net_timestamp(skb
);
3569 skb_queue_tail(&hdev
->rx_q
, skb
);
3570 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3574 EXPORT_SYMBOL(hci_recv_frame
);
3576 /* Receive diagnostic message from HCI drivers */
3577 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3579 /* Mark as diagnostic packet */
3580 bt_cb(skb
)->pkt_type
= HCI_DIAG_PKT
;
3583 __net_timestamp(skb
);
3585 skb_queue_tail(&hdev
->rx_q
, skb
);
3586 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3590 EXPORT_SYMBOL(hci_recv_diag
);
3592 /* ---- Interface to upper protocols ---- */
3594 int hci_register_cb(struct hci_cb
*cb
)
3596 BT_DBG("%p name %s", cb
, cb
->name
);
3598 mutex_lock(&hci_cb_list_lock
);
3599 list_add_tail(&cb
->list
, &hci_cb_list
);
3600 mutex_unlock(&hci_cb_list_lock
);
3604 EXPORT_SYMBOL(hci_register_cb
);
3606 int hci_unregister_cb(struct hci_cb
*cb
)
3608 BT_DBG("%p name %s", cb
, cb
->name
);
3610 mutex_lock(&hci_cb_list_lock
);
3611 list_del(&cb
->list
);
3612 mutex_unlock(&hci_cb_list_lock
);
3616 EXPORT_SYMBOL(hci_unregister_cb
);
3618 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3622 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
3625 __net_timestamp(skb
);
3627 /* Send copy to monitor */
3628 hci_send_to_monitor(hdev
, skb
);
3630 if (atomic_read(&hdev
->promisc
)) {
3631 /* Send copy to the sockets */
3632 hci_send_to_sock(hdev
, skb
);
3635 /* Get rid of skb owner, prior to sending to the driver. */
3638 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3643 err
= hdev
->send(hdev
, skb
);
3645 BT_ERR("%s sending frame failed (%d)", hdev
->name
, err
);
3650 /* Send HCI command */
3651 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3654 struct sk_buff
*skb
;
3656 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3658 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3660 BT_ERR("%s no memory for command", hdev
->name
);
3664 /* Stand-alone HCI commands must be flagged as
3665 * single-command requests.
3667 bt_cb(skb
)->req
.start
= true;
3669 skb_queue_tail(&hdev
->cmd_q
, skb
);
3670 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3675 /* Get data from the previously sent command */
3676 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3678 struct hci_command_hdr
*hdr
;
3680 if (!hdev
->sent_cmd
)
3683 hdr
= (void *) hdev
->sent_cmd
->data
;
3685 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3688 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3690 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3693 /* Send HCI command and wait for command commplete event */
3694 struct sk_buff
*hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3695 const void *param
, u32 timeout
)
3697 struct sk_buff
*skb
;
3699 if (!test_bit(HCI_UP
, &hdev
->flags
))
3700 return ERR_PTR(-ENETDOWN
);
3702 bt_dev_dbg(hdev
, "opcode 0x%4.4x plen %d", opcode
, plen
);
3705 skb
= __hci_cmd_sync(hdev
, opcode
, plen
, param
, timeout
);
3706 hci_req_unlock(hdev
);
3710 EXPORT_SYMBOL(hci_cmd_sync
);
3713 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3715 struct hci_acl_hdr
*hdr
;
3718 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3719 skb_reset_transport_header(skb
);
3720 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3721 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3722 hdr
->dlen
= cpu_to_le16(len
);
3725 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3726 struct sk_buff
*skb
, __u16 flags
)
3728 struct hci_conn
*conn
= chan
->conn
;
3729 struct hci_dev
*hdev
= conn
->hdev
;
3730 struct sk_buff
*list
;
3732 skb
->len
= skb_headlen(skb
);
3735 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3737 switch (hdev
->dev_type
) {
3739 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3742 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3745 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3749 list
= skb_shinfo(skb
)->frag_list
;
3751 /* Non fragmented */
3752 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3754 skb_queue_tail(queue
, skb
);
3757 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3759 skb_shinfo(skb
)->frag_list
= NULL
;
3761 /* Queue all fragments atomically. We need to use spin_lock_bh
3762 * here because of 6LoWPAN links, as there this function is
3763 * called from softirq and using normal spin lock could cause
3766 spin_lock_bh(&queue
->lock
);
3768 __skb_queue_tail(queue
, skb
);
3770 flags
&= ~ACL_START
;
3773 skb
= list
; list
= list
->next
;
3775 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3776 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3778 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3780 __skb_queue_tail(queue
, skb
);
3783 spin_unlock_bh(&queue
->lock
);
3787 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3789 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3791 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3793 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3795 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3799 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3801 struct hci_dev
*hdev
= conn
->hdev
;
3802 struct hci_sco_hdr hdr
;
3804 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3806 hdr
.handle
= cpu_to_le16(conn
->handle
);
3807 hdr
.dlen
= skb
->len
;
3809 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3810 skb_reset_transport_header(skb
);
3811 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3813 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
3815 skb_queue_tail(&conn
->data_q
, skb
);
3816 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3819 /* ---- HCI TX task (outgoing data) ---- */
3821 /* HCI Connection scheduler */
3822 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3825 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3826 struct hci_conn
*conn
= NULL
, *c
;
3827 unsigned int num
= 0, min
= ~0;
3829 /* We don't have to lock device here. Connections are always
3830 * added and removed with TX task disabled. */
3834 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3835 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3838 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3843 if (c
->sent
< min
) {
3848 if (hci_conn_num(hdev
, type
) == num
)
3857 switch (conn
->type
) {
3859 cnt
= hdev
->acl_cnt
;
3863 cnt
= hdev
->sco_cnt
;
3866 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3870 BT_ERR("Unknown link type");
3878 BT_DBG("conn %p quote %d", conn
, *quote
);
3882 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3884 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3887 BT_ERR("%s link tx timeout", hdev
->name
);
3891 /* Kill stalled connections */
3892 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3893 if (c
->type
== type
&& c
->sent
) {
3894 BT_ERR("%s killing stalled connection %pMR",
3895 hdev
->name
, &c
->dst
);
3896 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3903 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3906 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3907 struct hci_chan
*chan
= NULL
;
3908 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3909 struct hci_conn
*conn
;
3910 int cnt
, q
, conn_num
= 0;
3912 BT_DBG("%s", hdev
->name
);
3916 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3917 struct hci_chan
*tmp
;
3919 if (conn
->type
!= type
)
3922 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3927 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3928 struct sk_buff
*skb
;
3930 if (skb_queue_empty(&tmp
->data_q
))
3933 skb
= skb_peek(&tmp
->data_q
);
3934 if (skb
->priority
< cur_prio
)
3937 if (skb
->priority
> cur_prio
) {
3940 cur_prio
= skb
->priority
;
3945 if (conn
->sent
< min
) {
3951 if (hci_conn_num(hdev
, type
) == conn_num
)
3960 switch (chan
->conn
->type
) {
3962 cnt
= hdev
->acl_cnt
;
3965 cnt
= hdev
->block_cnt
;
3969 cnt
= hdev
->sco_cnt
;
3972 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3976 BT_ERR("Unknown link type");
3981 BT_DBG("chan %p quote %d", chan
, *quote
);
3985 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3987 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3988 struct hci_conn
*conn
;
3991 BT_DBG("%s", hdev
->name
);
3995 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3996 struct hci_chan
*chan
;
3998 if (conn
->type
!= type
)
4001 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
4006 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
4007 struct sk_buff
*skb
;
4014 if (skb_queue_empty(&chan
->data_q
))
4017 skb
= skb_peek(&chan
->data_q
);
4018 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
4021 skb
->priority
= HCI_PRIO_MAX
- 1;
4023 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
4027 if (hci_conn_num(hdev
, type
) == num
)
4035 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4037 /* Calculate count of blocks used by this packet */
4038 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
4041 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
4043 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
4044 /* ACL tx timeout must be longer than maximum
4045 * link supervision timeout (40.9 seconds) */
4046 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
4047 HCI_ACL_TX_TIMEOUT
))
4048 hci_link_tx_to(hdev
, ACL_LINK
);
4052 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
4054 unsigned int cnt
= hdev
->acl_cnt
;
4055 struct hci_chan
*chan
;
4056 struct sk_buff
*skb
;
4059 __check_timeout(hdev
, cnt
);
4061 while (hdev
->acl_cnt
&&
4062 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
4063 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4064 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4065 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4066 skb
->len
, skb
->priority
);
4068 /* Stop if priority has changed */
4069 if (skb
->priority
< priority
)
4072 skb
= skb_dequeue(&chan
->data_q
);
4074 hci_conn_enter_active_mode(chan
->conn
,
4075 bt_cb(skb
)->force_active
);
4077 hci_send_frame(hdev
, skb
);
4078 hdev
->acl_last_tx
= jiffies
;
4086 if (cnt
!= hdev
->acl_cnt
)
4087 hci_prio_recalculate(hdev
, ACL_LINK
);
4090 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
4092 unsigned int cnt
= hdev
->block_cnt
;
4093 struct hci_chan
*chan
;
4094 struct sk_buff
*skb
;
4098 __check_timeout(hdev
, cnt
);
4100 BT_DBG("%s", hdev
->name
);
4102 if (hdev
->dev_type
== HCI_AMP
)
4107 while (hdev
->block_cnt
> 0 &&
4108 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
4109 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4110 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
4113 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4114 skb
->len
, skb
->priority
);
4116 /* Stop if priority has changed */
4117 if (skb
->priority
< priority
)
4120 skb
= skb_dequeue(&chan
->data_q
);
4122 blocks
= __get_blocks(hdev
, skb
);
4123 if (blocks
> hdev
->block_cnt
)
4126 hci_conn_enter_active_mode(chan
->conn
,
4127 bt_cb(skb
)->force_active
);
4129 hci_send_frame(hdev
, skb
);
4130 hdev
->acl_last_tx
= jiffies
;
4132 hdev
->block_cnt
-= blocks
;
4135 chan
->sent
+= blocks
;
4136 chan
->conn
->sent
+= blocks
;
4140 if (cnt
!= hdev
->block_cnt
)
4141 hci_prio_recalculate(hdev
, type
);
4144 static void hci_sched_acl(struct hci_dev
*hdev
)
4146 BT_DBG("%s", hdev
->name
);
4148 /* No ACL link over BR/EDR controller */
4149 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
4152 /* No AMP link over AMP controller */
4153 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
4156 switch (hdev
->flow_ctl_mode
) {
4157 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
4158 hci_sched_acl_pkt(hdev
);
4161 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
4162 hci_sched_acl_blk(hdev
);
4168 static void hci_sched_sco(struct hci_dev
*hdev
)
4170 struct hci_conn
*conn
;
4171 struct sk_buff
*skb
;
4174 BT_DBG("%s", hdev
->name
);
4176 if (!hci_conn_num(hdev
, SCO_LINK
))
4179 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
4180 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4181 BT_DBG("skb %p len %d", skb
, skb
->len
);
4182 hci_send_frame(hdev
, skb
);
4185 if (conn
->sent
== ~0)
4191 static void hci_sched_esco(struct hci_dev
*hdev
)
4193 struct hci_conn
*conn
;
4194 struct sk_buff
*skb
;
4197 BT_DBG("%s", hdev
->name
);
4199 if (!hci_conn_num(hdev
, ESCO_LINK
))
4202 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
4204 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4205 BT_DBG("skb %p len %d", skb
, skb
->len
);
4206 hci_send_frame(hdev
, skb
);
4209 if (conn
->sent
== ~0)
4215 static void hci_sched_le(struct hci_dev
*hdev
)
4217 struct hci_chan
*chan
;
4218 struct sk_buff
*skb
;
4219 int quote
, cnt
, tmp
;
4221 BT_DBG("%s", hdev
->name
);
4223 if (!hci_conn_num(hdev
, LE_LINK
))
4226 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
4227 /* LE tx timeout must be longer than maximum
4228 * link supervision timeout (40.9 seconds) */
4229 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
4230 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
4231 hci_link_tx_to(hdev
, LE_LINK
);
4234 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
4236 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
4237 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4238 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4239 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4240 skb
->len
, skb
->priority
);
4242 /* Stop if priority has changed */
4243 if (skb
->priority
< priority
)
4246 skb
= skb_dequeue(&chan
->data_q
);
4248 hci_send_frame(hdev
, skb
);
4249 hdev
->le_last_tx
= jiffies
;
4260 hdev
->acl_cnt
= cnt
;
4263 hci_prio_recalculate(hdev
, LE_LINK
);
4266 static void hci_tx_work(struct work_struct
*work
)
4268 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
4269 struct sk_buff
*skb
;
4271 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
4272 hdev
->sco_cnt
, hdev
->le_cnt
);
4274 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4275 /* Schedule queues and send stuff to HCI driver */
4276 hci_sched_acl(hdev
);
4277 hci_sched_sco(hdev
);
4278 hci_sched_esco(hdev
);
4282 /* Send next queued raw (unknown type) packet */
4283 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
4284 hci_send_frame(hdev
, skb
);
4287 /* ----- HCI RX task (incoming data processing) ----- */
4289 /* ACL data packet */
4290 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4292 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4293 struct hci_conn
*conn
;
4294 __u16 handle
, flags
;
4296 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4298 handle
= __le16_to_cpu(hdr
->handle
);
4299 flags
= hci_flags(handle
);
4300 handle
= hci_handle(handle
);
4302 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4305 hdev
->stat
.acl_rx
++;
4308 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4309 hci_dev_unlock(hdev
);
4312 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4314 /* Send to upper protocol */
4315 l2cap_recv_acldata(conn
, skb
, flags
);
4318 BT_ERR("%s ACL packet for unknown connection handle %d",
4319 hdev
->name
, handle
);
4325 /* SCO data packet */
4326 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4328 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4329 struct hci_conn
*conn
;
4332 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4334 handle
= __le16_to_cpu(hdr
->handle
);
4336 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4338 hdev
->stat
.sco_rx
++;
4341 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4342 hci_dev_unlock(hdev
);
4345 /* Send to upper protocol */
4346 sco_recv_scodata(conn
, skb
);
4349 BT_ERR("%s SCO packet for unknown connection handle %d",
4350 hdev
->name
, handle
);
4356 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4358 struct sk_buff
*skb
;
4360 skb
= skb_peek(&hdev
->cmd_q
);
4364 return bt_cb(skb
)->req
.start
;
4367 static void hci_resend_last(struct hci_dev
*hdev
)
4369 struct hci_command_hdr
*sent
;
4370 struct sk_buff
*skb
;
4373 if (!hdev
->sent_cmd
)
4376 sent
= (void *) hdev
->sent_cmd
->data
;
4377 opcode
= __le16_to_cpu(sent
->opcode
);
4378 if (opcode
== HCI_OP_RESET
)
4381 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4385 skb_queue_head(&hdev
->cmd_q
, skb
);
4386 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4389 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4390 hci_req_complete_t
*req_complete
,
4391 hci_req_complete_skb_t
*req_complete_skb
)
4393 struct sk_buff
*skb
;
4394 unsigned long flags
;
4396 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4398 /* If the completed command doesn't match the last one that was
4399 * sent we need to do special handling of it.
4401 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4402 /* Some CSR based controllers generate a spontaneous
4403 * reset complete event during init and any pending
4404 * command will never be completed. In such a case we
4405 * need to resend whatever was the last sent
4408 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4409 hci_resend_last(hdev
);
4414 /* If the command succeeded and there's still more commands in
4415 * this request the request is not yet complete.
4417 if (!status
&& !hci_req_is_complete(hdev
))
4420 /* If this was the last command in a request the complete
4421 * callback would be found in hdev->sent_cmd instead of the
4422 * command queue (hdev->cmd_q).
4424 if (bt_cb(hdev
->sent_cmd
)->req
.complete
) {
4425 *req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
4429 if (bt_cb(hdev
->sent_cmd
)->req
.complete_skb
) {
4430 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->req
.complete_skb
;
4434 /* Remove all pending commands belonging to this request */
4435 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4436 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4437 if (bt_cb(skb
)->req
.start
) {
4438 __skb_queue_head(&hdev
->cmd_q
, skb
);
4442 *req_complete
= bt_cb(skb
)->req
.complete
;
4443 *req_complete_skb
= bt_cb(skb
)->req
.complete_skb
;
4446 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4449 static void hci_rx_work(struct work_struct
*work
)
4451 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4452 struct sk_buff
*skb
;
4454 BT_DBG("%s", hdev
->name
);
4456 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4457 /* Send copy to monitor */
4458 hci_send_to_monitor(hdev
, skb
);
4460 if (atomic_read(&hdev
->promisc
)) {
4461 /* Send copy to the sockets */
4462 hci_send_to_sock(hdev
, skb
);
4465 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4470 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4471 /* Don't process data packets in this states. */
4472 switch (bt_cb(skb
)->pkt_type
) {
4473 case HCI_ACLDATA_PKT
:
4474 case HCI_SCODATA_PKT
:
4481 switch (bt_cb(skb
)->pkt_type
) {
4483 BT_DBG("%s Event packet", hdev
->name
);
4484 hci_event_packet(hdev
, skb
);
4487 case HCI_ACLDATA_PKT
:
4488 BT_DBG("%s ACL data packet", hdev
->name
);
4489 hci_acldata_packet(hdev
, skb
);
4492 case HCI_SCODATA_PKT
:
4493 BT_DBG("%s SCO data packet", hdev
->name
);
4494 hci_scodata_packet(hdev
, skb
);
4504 static void hci_cmd_work(struct work_struct
*work
)
4506 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4507 struct sk_buff
*skb
;
4509 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4510 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4512 /* Send queued commands */
4513 if (atomic_read(&hdev
->cmd_cnt
)) {
4514 skb
= skb_dequeue(&hdev
->cmd_q
);
4518 kfree_skb(hdev
->sent_cmd
);
4520 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4521 if (hdev
->sent_cmd
) {
4522 atomic_dec(&hdev
->cmd_cnt
);
4523 hci_send_frame(hdev
, skb
);
4524 if (test_bit(HCI_RESET
, &hdev
->flags
))
4525 cancel_delayed_work(&hdev
->cmd_timer
);
4527 schedule_delayed_work(&hdev
->cmd_timer
,
4530 skb_queue_head(&hdev
->cmd_q
, skb
);
4531 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);