2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct
*work
);
45 static void hci_cmd_work(struct work_struct
*work
);
46 static void hci_tx_work(struct work_struct
*work
);
49 LIST_HEAD(hci_dev_list
);
50 DEFINE_RWLOCK(hci_dev_list_lock
);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list
);
54 DEFINE_MUTEX(hci_cb_list_lock
);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida
);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev
*hdev
, int event
)
72 hci_sock_dev_event(hdev
, event
);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
78 size_t count
, loff_t
*ppos
)
80 struct hci_dev
*hdev
= file
->private_data
;
83 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
89 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
90 size_t count
, loff_t
*ppos
)
92 struct hci_dev
*hdev
= file
->private_data
;
95 size_t buf_size
= min(count
, (sizeof(buf
)-1));
98 if (!test_bit(HCI_UP
, &hdev
->flags
))
101 if (copy_from_user(buf
, user_buf
, buf_size
))
104 buf
[buf_size
] = '\0';
105 if (strtobool(buf
, &enable
))
108 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
113 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
116 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
118 hci_req_unlock(hdev
);
125 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
130 static const struct file_operations dut_mode_fops
= {
132 .read
= dut_mode_read
,
133 .write
= dut_mode_write
,
134 .llseek
= default_llseek
,
137 static ssize_t
vendor_diag_read(struct file
*file
, char __user
*user_buf
,
138 size_t count
, loff_t
*ppos
)
140 struct hci_dev
*hdev
= file
->private_data
;
143 buf
[0] = hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) ? 'Y': 'N';
146 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
149 static ssize_t
vendor_diag_write(struct file
*file
, const char __user
*user_buf
,
150 size_t count
, loff_t
*ppos
)
152 struct hci_dev
*hdev
= file
->private_data
;
154 size_t buf_size
= min(count
, (sizeof(buf
)-1));
158 if (copy_from_user(buf
, user_buf
, buf_size
))
161 buf
[buf_size
] = '\0';
162 if (strtobool(buf
, &enable
))
166 err
= hdev
->set_diag(hdev
, enable
);
167 hci_req_unlock(hdev
);
173 hci_dev_set_flag(hdev
, HCI_VENDOR_DIAG
);
175 hci_dev_clear_flag(hdev
, HCI_VENDOR_DIAG
);
180 static const struct file_operations vendor_diag_fops
= {
182 .read
= vendor_diag_read
,
183 .write
= vendor_diag_write
,
184 .llseek
= default_llseek
,
187 static void hci_debugfs_create_basic(struct hci_dev
*hdev
)
189 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
193 debugfs_create_file("vendor_diag", 0644, hdev
->debugfs
, hdev
,
197 /* ---- HCI requests ---- */
199 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
202 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
204 if (hdev
->req_status
== HCI_REQ_PEND
) {
205 hdev
->req_result
= result
;
206 hdev
->req_status
= HCI_REQ_DONE
;
208 hdev
->req_skb
= skb_get(skb
);
209 wake_up_interruptible(&hdev
->req_wait_q
);
213 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
215 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
217 if (hdev
->req_status
== HCI_REQ_PEND
) {
218 hdev
->req_result
= err
;
219 hdev
->req_status
= HCI_REQ_CANCELED
;
220 wake_up_interruptible(&hdev
->req_wait_q
);
224 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
225 const void *param
, u8 event
, u32 timeout
)
227 DECLARE_WAITQUEUE(wait
, current
);
228 struct hci_request req
;
232 BT_DBG("%s", hdev
->name
);
234 hci_req_init(&req
, hdev
);
236 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
238 hdev
->req_status
= HCI_REQ_PEND
;
240 add_wait_queue(&hdev
->req_wait_q
, &wait
);
241 set_current_state(TASK_INTERRUPTIBLE
);
243 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
245 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
246 set_current_state(TASK_RUNNING
);
250 schedule_timeout(timeout
);
252 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
254 if (signal_pending(current
))
255 return ERR_PTR(-EINTR
);
257 switch (hdev
->req_status
) {
259 err
= -bt_to_errno(hdev
->req_result
);
262 case HCI_REQ_CANCELED
:
263 err
= -hdev
->req_result
;
271 hdev
->req_status
= hdev
->req_result
= 0;
273 hdev
->req_skb
= NULL
;
275 BT_DBG("%s end: err %d", hdev
->name
, err
);
283 return ERR_PTR(-ENODATA
);
287 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
289 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
290 const void *param
, u32 timeout
)
292 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
294 EXPORT_SYMBOL(__hci_cmd_sync
);
296 /* Execute request and wait for completion. */
297 static int __hci_req_sync(struct hci_dev
*hdev
,
298 void (*func
)(struct hci_request
*req
,
300 unsigned long opt
, __u32 timeout
)
302 struct hci_request req
;
303 DECLARE_WAITQUEUE(wait
, current
);
306 BT_DBG("%s start", hdev
->name
);
308 hci_req_init(&req
, hdev
);
310 hdev
->req_status
= HCI_REQ_PEND
;
314 add_wait_queue(&hdev
->req_wait_q
, &wait
);
315 set_current_state(TASK_INTERRUPTIBLE
);
317 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
319 hdev
->req_status
= 0;
321 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
322 set_current_state(TASK_RUNNING
);
324 /* ENODATA means the HCI request command queue is empty.
325 * This can happen when a request with conditionals doesn't
326 * trigger any commands to be sent. This is normal behavior
327 * and should not trigger an error return.
335 schedule_timeout(timeout
);
337 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
339 if (signal_pending(current
))
342 switch (hdev
->req_status
) {
344 err
= -bt_to_errno(hdev
->req_result
);
347 case HCI_REQ_CANCELED
:
348 err
= -hdev
->req_result
;
356 hdev
->req_status
= hdev
->req_result
= 0;
358 BT_DBG("%s end: err %d", hdev
->name
, err
);
363 static int hci_req_sync(struct hci_dev
*hdev
,
364 void (*req
)(struct hci_request
*req
,
366 unsigned long opt
, __u32 timeout
)
370 if (!test_bit(HCI_UP
, &hdev
->flags
))
373 /* Serialize all requests */
375 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
376 hci_req_unlock(hdev
);
381 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
383 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
386 set_bit(HCI_RESET
, &req
->hdev
->flags
);
387 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
390 static void bredr_init(struct hci_request
*req
)
392 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
394 /* Read Local Supported Features */
395 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
397 /* Read Local Version */
398 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
400 /* Read BD Address */
401 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
404 static void amp_init1(struct hci_request
*req
)
406 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
408 /* Read Local Version */
409 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
411 /* Read Local Supported Commands */
412 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
414 /* Read Local AMP Info */
415 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
417 /* Read Data Blk size */
418 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
420 /* Read Flow Control Mode */
421 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
423 /* Read Location Data */
424 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
427 static void amp_init2(struct hci_request
*req
)
429 /* Read Local Supported Features. Not all AMP controllers
430 * support this so it's placed conditionally in the second
433 if (req
->hdev
->commands
[14] & 0x20)
434 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
437 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
439 struct hci_dev
*hdev
= req
->hdev
;
441 BT_DBG("%s %ld", hdev
->name
, opt
);
444 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
445 hci_reset_req(req
, 0);
447 switch (hdev
->dev_type
) {
457 BT_ERR("Unknown device type %d", hdev
->dev_type
);
462 static void bredr_setup(struct hci_request
*req
)
467 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
468 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
470 /* Read Class of Device */
471 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
473 /* Read Local Name */
474 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
476 /* Read Voice Setting */
477 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
479 /* Read Number of Supported IAC */
480 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
482 /* Read Current IAC LAP */
483 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
485 /* Clear Event Filters */
486 flt_type
= HCI_FLT_CLEAR_ALL
;
487 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
489 /* Connection accept timeout ~20 secs */
490 param
= cpu_to_le16(0x7d00);
491 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
494 static void le_setup(struct hci_request
*req
)
496 struct hci_dev
*hdev
= req
->hdev
;
498 /* Read LE Buffer Size */
499 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
501 /* Read LE Local Supported Features */
502 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
504 /* Read LE Supported States */
505 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
507 /* Read LE White List Size */
508 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
510 /* Clear LE White List */
511 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
513 /* LE-only controllers have LE implicitly enabled */
514 if (!lmp_bredr_capable(hdev
))
515 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
518 static void hci_setup_event_mask(struct hci_request
*req
)
520 struct hci_dev
*hdev
= req
->hdev
;
522 /* The second byte is 0xff instead of 0x9f (two reserved bits
523 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
526 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
528 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
529 * any event mask for pre 1.2 devices.
531 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
534 if (lmp_bredr_capable(hdev
)) {
535 events
[4] |= 0x01; /* Flow Specification Complete */
536 events
[4] |= 0x02; /* Inquiry Result with RSSI */
537 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
538 events
[5] |= 0x08; /* Synchronous Connection Complete */
539 events
[5] |= 0x10; /* Synchronous Connection Changed */
541 /* Use a different default for LE-only devices */
542 memset(events
, 0, sizeof(events
));
543 events
[0] |= 0x10; /* Disconnection Complete */
544 events
[1] |= 0x08; /* Read Remote Version Information Complete */
545 events
[1] |= 0x20; /* Command Complete */
546 events
[1] |= 0x40; /* Command Status */
547 events
[1] |= 0x80; /* Hardware Error */
548 events
[2] |= 0x04; /* Number of Completed Packets */
549 events
[3] |= 0x02; /* Data Buffer Overflow */
551 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
552 events
[0] |= 0x80; /* Encryption Change */
553 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
557 if (lmp_inq_rssi_capable(hdev
))
558 events
[4] |= 0x02; /* Inquiry Result with RSSI */
560 if (lmp_sniffsubr_capable(hdev
))
561 events
[5] |= 0x20; /* Sniff Subrating */
563 if (lmp_pause_enc_capable(hdev
))
564 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
566 if (lmp_ext_inq_capable(hdev
))
567 events
[5] |= 0x40; /* Extended Inquiry Result */
569 if (lmp_no_flush_capable(hdev
))
570 events
[7] |= 0x01; /* Enhanced Flush Complete */
572 if (lmp_lsto_capable(hdev
))
573 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
575 if (lmp_ssp_capable(hdev
)) {
576 events
[6] |= 0x01; /* IO Capability Request */
577 events
[6] |= 0x02; /* IO Capability Response */
578 events
[6] |= 0x04; /* User Confirmation Request */
579 events
[6] |= 0x08; /* User Passkey Request */
580 events
[6] |= 0x10; /* Remote OOB Data Request */
581 events
[6] |= 0x20; /* Simple Pairing Complete */
582 events
[7] |= 0x04; /* User Passkey Notification */
583 events
[7] |= 0x08; /* Keypress Notification */
584 events
[7] |= 0x10; /* Remote Host Supported
585 * Features Notification
589 if (lmp_le_capable(hdev
))
590 events
[7] |= 0x20; /* LE Meta-Event */
592 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
595 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
597 struct hci_dev
*hdev
= req
->hdev
;
599 if (hdev
->dev_type
== HCI_AMP
)
600 return amp_init2(req
);
602 if (lmp_bredr_capable(hdev
))
605 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
607 if (lmp_le_capable(hdev
))
610 /* All Bluetooth 1.2 and later controllers should support the
611 * HCI command for reading the local supported commands.
613 * Unfortunately some controllers indicate Bluetooth 1.2 support,
614 * but do not have support for this command. If that is the case,
615 * the driver can quirk the behavior and skip reading the local
616 * supported commands.
618 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
619 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
620 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
622 if (lmp_ssp_capable(hdev
)) {
623 /* When SSP is available, then the host features page
624 * should also be available as well. However some
625 * controllers list the max_page as 0 as long as SSP
626 * has not been enabled. To achieve proper debugging
627 * output, force the minimum max_page to 1 at least.
629 hdev
->max_page
= 0x01;
631 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
634 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
635 sizeof(mode
), &mode
);
637 struct hci_cp_write_eir cp
;
639 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
640 memset(&cp
, 0, sizeof(cp
));
642 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
646 if (lmp_inq_rssi_capable(hdev
) ||
647 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
650 /* If Extended Inquiry Result events are supported, then
651 * they are clearly preferred over Inquiry Result with RSSI
654 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
656 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
659 if (lmp_inq_tx_pwr_capable(hdev
))
660 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
662 if (lmp_ext_feat_capable(hdev
)) {
663 struct hci_cp_read_local_ext_features cp
;
666 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
670 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
672 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
677 static void hci_setup_link_policy(struct hci_request
*req
)
679 struct hci_dev
*hdev
= req
->hdev
;
680 struct hci_cp_write_def_link_policy cp
;
683 if (lmp_rswitch_capable(hdev
))
684 link_policy
|= HCI_LP_RSWITCH
;
685 if (lmp_hold_capable(hdev
))
686 link_policy
|= HCI_LP_HOLD
;
687 if (lmp_sniff_capable(hdev
))
688 link_policy
|= HCI_LP_SNIFF
;
689 if (lmp_park_capable(hdev
))
690 link_policy
|= HCI_LP_PARK
;
692 cp
.policy
= cpu_to_le16(link_policy
);
693 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
696 static void hci_set_le_support(struct hci_request
*req
)
698 struct hci_dev
*hdev
= req
->hdev
;
699 struct hci_cp_write_le_host_supported cp
;
701 /* LE-only devices do not support explicit enablement */
702 if (!lmp_bredr_capable(hdev
))
705 memset(&cp
, 0, sizeof(cp
));
707 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
712 if (cp
.le
!= lmp_host_le_capable(hdev
))
713 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
717 static void hci_set_event_mask_page_2(struct hci_request
*req
)
719 struct hci_dev
*hdev
= req
->hdev
;
720 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
722 /* If Connectionless Slave Broadcast master role is supported
723 * enable all necessary events for it.
725 if (lmp_csb_master_capable(hdev
)) {
726 events
[1] |= 0x40; /* Triggered Clock Capture */
727 events
[1] |= 0x80; /* Synchronization Train Complete */
728 events
[2] |= 0x10; /* Slave Page Response Timeout */
729 events
[2] |= 0x20; /* CSB Channel Map Change */
732 /* If Connectionless Slave Broadcast slave role is supported
733 * enable all necessary events for it.
735 if (lmp_csb_slave_capable(hdev
)) {
736 events
[2] |= 0x01; /* Synchronization Train Received */
737 events
[2] |= 0x02; /* CSB Receive */
738 events
[2] |= 0x04; /* CSB Timeout */
739 events
[2] |= 0x08; /* Truncated Page Complete */
742 /* Enable Authenticated Payload Timeout Expired event if supported */
743 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
)
746 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
749 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
751 struct hci_dev
*hdev
= req
->hdev
;
754 hci_setup_event_mask(req
);
756 if (hdev
->commands
[6] & 0x20 &&
757 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
758 struct hci_cp_read_stored_link_key cp
;
760 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
762 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
765 if (hdev
->commands
[5] & 0x10)
766 hci_setup_link_policy(req
);
768 if (hdev
->commands
[8] & 0x01)
769 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
771 /* Some older Broadcom based Bluetooth 1.2 controllers do not
772 * support the Read Page Scan Type command. Check support for
773 * this command in the bit mask of supported commands.
775 if (hdev
->commands
[13] & 0x01)
776 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
778 if (lmp_le_capable(hdev
)) {
781 memset(events
, 0, sizeof(events
));
784 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
785 events
[0] |= 0x10; /* LE Long Term Key Request */
787 /* If controller supports the Connection Parameters Request
788 * Link Layer Procedure, enable the corresponding event.
790 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
791 events
[0] |= 0x20; /* LE Remote Connection
795 /* If the controller supports the Data Length Extension
796 * feature, enable the corresponding event.
798 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
799 events
[0] |= 0x40; /* LE Data Length Change */
801 /* If the controller supports Extended Scanner Filter
802 * Policies, enable the correspondig event.
804 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
805 events
[1] |= 0x04; /* LE Direct Advertising
809 /* If the controller supports the LE Read Local P-256
810 * Public Key command, enable the corresponding event.
812 if (hdev
->commands
[34] & 0x02)
813 events
[0] |= 0x80; /* LE Read Local P-256
814 * Public Key Complete
817 /* If the controller supports the LE Generate DHKey
818 * command, enable the corresponding event.
820 if (hdev
->commands
[34] & 0x04)
821 events
[1] |= 0x01; /* LE Generate DHKey Complete */
823 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
826 if (hdev
->commands
[25] & 0x40) {
827 /* Read LE Advertising Channel TX Power */
828 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
831 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
832 /* Read LE Maximum Data Length */
833 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
835 /* Read LE Suggested Default Data Length */
836 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
839 hci_set_le_support(req
);
842 /* Read features beyond page 1 if available */
843 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
844 struct hci_cp_read_local_ext_features cp
;
847 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
852 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
854 struct hci_dev
*hdev
= req
->hdev
;
856 /* Some Broadcom based Bluetooth controllers do not support the
857 * Delete Stored Link Key command. They are clearly indicating its
858 * absence in the bit mask of supported commands.
860 * Check the supported commands and only if the the command is marked
861 * as supported send it. If not supported assume that the controller
862 * does not have actual support for stored link keys which makes this
863 * command redundant anyway.
865 * Some controllers indicate that they support handling deleting
866 * stored link keys, but they don't. The quirk lets a driver
867 * just disable this command.
869 if (hdev
->commands
[6] & 0x80 &&
870 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
871 struct hci_cp_delete_stored_link_key cp
;
873 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
874 cp
.delete_all
= 0x01;
875 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
879 /* Set event mask page 2 if the HCI command for it is supported */
880 if (hdev
->commands
[22] & 0x04)
881 hci_set_event_mask_page_2(req
);
883 /* Read local codec list if the HCI command is supported */
884 if (hdev
->commands
[29] & 0x20)
885 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
887 /* Get MWS transport configuration if the HCI command is supported */
888 if (hdev
->commands
[30] & 0x08)
889 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
891 /* Check for Synchronization Train support */
892 if (lmp_sync_train_capable(hdev
))
893 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
895 /* Enable Secure Connections if supported and configured */
896 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
897 bredr_sc_enabled(hdev
)) {
900 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
901 sizeof(support
), &support
);
905 static int __hci_init(struct hci_dev
*hdev
)
909 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
913 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
914 hci_debugfs_create_basic(hdev
);
916 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
920 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
921 * BR/EDR/LE type controllers. AMP controllers only need the
922 * first two stages of init.
924 if (hdev
->dev_type
!= HCI_BREDR
)
927 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
931 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
935 /* This function is only called when the controller is actually in
936 * configured state. When the controller is marked as unconfigured,
937 * this initialization procedure is not run.
939 * It means that it is possible that a controller runs through its
940 * setup phase and then discovers missing settings. If that is the
941 * case, then this function will not be called. It then will only
942 * be called during the config phase.
944 * So only when in setup phase or config phase, create the debugfs
945 * entries and register the SMP channels.
947 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
948 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
951 hci_debugfs_create_common(hdev
);
953 if (lmp_bredr_capable(hdev
))
954 hci_debugfs_create_bredr(hdev
);
956 if (lmp_le_capable(hdev
))
957 hci_debugfs_create_le(hdev
);
962 static void hci_init0_req(struct hci_request
*req
, unsigned long opt
)
964 struct hci_dev
*hdev
= req
->hdev
;
966 BT_DBG("%s %ld", hdev
->name
, opt
);
969 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
970 hci_reset_req(req
, 0);
972 /* Read Local Version */
973 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
975 /* Read BD Address */
976 if (hdev
->set_bdaddr
)
977 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
980 static int __hci_unconf_init(struct hci_dev
*hdev
)
984 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
987 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
);
991 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
992 hci_debugfs_create_basic(hdev
);
997 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
1001 BT_DBG("%s %x", req
->hdev
->name
, scan
);
1003 /* Inquiry and Page scans */
1004 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1007 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
1011 BT_DBG("%s %x", req
->hdev
->name
, auth
);
1013 /* Authentication */
1014 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
1017 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
1021 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
1024 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
1027 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
1029 __le16 policy
= cpu_to_le16(opt
);
1031 BT_DBG("%s %x", req
->hdev
->name
, policy
);
1033 /* Default link policy */
1034 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1037 /* Get HCI device by index.
1038 * Device is held on return. */
1039 struct hci_dev
*hci_dev_get(int index
)
1041 struct hci_dev
*hdev
= NULL
, *d
;
1043 BT_DBG("%d", index
);
1048 read_lock(&hci_dev_list_lock
);
1049 list_for_each_entry(d
, &hci_dev_list
, list
) {
1050 if (d
->id
== index
) {
1051 hdev
= hci_dev_hold(d
);
1055 read_unlock(&hci_dev_list_lock
);
1059 /* ---- Inquiry support ---- */
1061 bool hci_discovery_active(struct hci_dev
*hdev
)
1063 struct discovery_state
*discov
= &hdev
->discovery
;
1065 switch (discov
->state
) {
1066 case DISCOVERY_FINDING
:
1067 case DISCOVERY_RESOLVING
:
1075 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1077 int old_state
= hdev
->discovery
.state
;
1079 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1081 if (old_state
== state
)
1084 hdev
->discovery
.state
= state
;
1087 case DISCOVERY_STOPPED
:
1088 hci_update_background_scan(hdev
);
1090 if (old_state
!= DISCOVERY_STARTING
)
1091 mgmt_discovering(hdev
, 0);
1093 case DISCOVERY_STARTING
:
1095 case DISCOVERY_FINDING
:
1096 mgmt_discovering(hdev
, 1);
1098 case DISCOVERY_RESOLVING
:
1100 case DISCOVERY_STOPPING
:
1105 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1107 struct discovery_state
*cache
= &hdev
->discovery
;
1108 struct inquiry_entry
*p
, *n
;
1110 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1115 INIT_LIST_HEAD(&cache
->unknown
);
1116 INIT_LIST_HEAD(&cache
->resolve
);
1119 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1122 struct discovery_state
*cache
= &hdev
->discovery
;
1123 struct inquiry_entry
*e
;
1125 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1127 list_for_each_entry(e
, &cache
->all
, all
) {
1128 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1135 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1138 struct discovery_state
*cache
= &hdev
->discovery
;
1139 struct inquiry_entry
*e
;
1141 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1143 list_for_each_entry(e
, &cache
->unknown
, list
) {
1144 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1151 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1155 struct discovery_state
*cache
= &hdev
->discovery
;
1156 struct inquiry_entry
*e
;
1158 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1160 list_for_each_entry(e
, &cache
->resolve
, list
) {
1161 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1163 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1170 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1171 struct inquiry_entry
*ie
)
1173 struct discovery_state
*cache
= &hdev
->discovery
;
1174 struct list_head
*pos
= &cache
->resolve
;
1175 struct inquiry_entry
*p
;
1177 list_del(&ie
->list
);
1179 list_for_each_entry(p
, &cache
->resolve
, list
) {
1180 if (p
->name_state
!= NAME_PENDING
&&
1181 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1186 list_add(&ie
->list
, pos
);
1189 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1192 struct discovery_state
*cache
= &hdev
->discovery
;
1193 struct inquiry_entry
*ie
;
1196 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1198 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1200 if (!data
->ssp_mode
)
1201 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1203 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1205 if (!ie
->data
.ssp_mode
)
1206 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1208 if (ie
->name_state
== NAME_NEEDED
&&
1209 data
->rssi
!= ie
->data
.rssi
) {
1210 ie
->data
.rssi
= data
->rssi
;
1211 hci_inquiry_cache_update_resolve(hdev
, ie
);
1217 /* Entry not in the cache. Add new one. */
1218 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1220 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1224 list_add(&ie
->all
, &cache
->all
);
1227 ie
->name_state
= NAME_KNOWN
;
1229 ie
->name_state
= NAME_NOT_KNOWN
;
1230 list_add(&ie
->list
, &cache
->unknown
);
1234 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1235 ie
->name_state
!= NAME_PENDING
) {
1236 ie
->name_state
= NAME_KNOWN
;
1237 list_del(&ie
->list
);
1240 memcpy(&ie
->data
, data
, sizeof(*data
));
1241 ie
->timestamp
= jiffies
;
1242 cache
->timestamp
= jiffies
;
1244 if (ie
->name_state
== NAME_NOT_KNOWN
)
1245 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1251 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1253 struct discovery_state
*cache
= &hdev
->discovery
;
1254 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1255 struct inquiry_entry
*e
;
1258 list_for_each_entry(e
, &cache
->all
, all
) {
1259 struct inquiry_data
*data
= &e
->data
;
1264 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1265 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1266 info
->pscan_period_mode
= data
->pscan_period_mode
;
1267 info
->pscan_mode
= data
->pscan_mode
;
1268 memcpy(info
->dev_class
, data
->dev_class
, 3);
1269 info
->clock_offset
= data
->clock_offset
;
1275 BT_DBG("cache %p, copied %d", cache
, copied
);
1279 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1281 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1282 struct hci_dev
*hdev
= req
->hdev
;
1283 struct hci_cp_inquiry cp
;
1285 BT_DBG("%s", hdev
->name
);
1287 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1291 memcpy(&cp
.lap
, &ir
->lap
, 3);
1292 cp
.length
= ir
->length
;
1293 cp
.num_rsp
= ir
->num_rsp
;
1294 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1297 int hci_inquiry(void __user
*arg
)
1299 __u8 __user
*ptr
= arg
;
1300 struct hci_inquiry_req ir
;
1301 struct hci_dev
*hdev
;
1302 int err
= 0, do_inquiry
= 0, max_rsp
;
1306 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1309 hdev
= hci_dev_get(ir
.dev_id
);
1313 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1318 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1323 if (hdev
->dev_type
!= HCI_BREDR
) {
1328 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1334 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1335 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1336 hci_inquiry_cache_flush(hdev
);
1339 hci_dev_unlock(hdev
);
1341 timeo
= ir
.length
* msecs_to_jiffies(2000);
1344 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1349 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1350 * cleared). If it is interrupted by a signal, return -EINTR.
1352 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1353 TASK_INTERRUPTIBLE
))
1357 /* for unlimited number of responses we will use buffer with
1360 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1362 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1363 * copy it to the user space.
1365 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1372 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1373 hci_dev_unlock(hdev
);
1375 BT_DBG("num_rsp %d", ir
.num_rsp
);
1377 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1379 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1392 static int hci_dev_do_open(struct hci_dev
*hdev
)
1396 BT_DBG("%s %p", hdev
->name
, hdev
);
1400 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1405 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1406 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1407 /* Check for rfkill but allow the HCI setup stage to
1408 * proceed (which in itself doesn't cause any RF activity).
1410 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1415 /* Check for valid public address or a configured static
1416 * random adddress, but let the HCI setup proceed to
1417 * be able to determine if there is a public address
1420 * In case of user channel usage, it is not important
1421 * if a public address or static random address is
1424 * This check is only valid for BR/EDR controllers
1425 * since AMP controllers do not have an address.
1427 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1428 hdev
->dev_type
== HCI_BREDR
&&
1429 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1430 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1431 ret
= -EADDRNOTAVAIL
;
1436 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1441 if (hdev
->open(hdev
)) {
1446 set_bit(HCI_RUNNING
, &hdev
->flags
);
1447 hci_notify(hdev
, HCI_DEV_OPEN
);
1449 atomic_set(&hdev
->cmd_cnt
, 1);
1450 set_bit(HCI_INIT
, &hdev
->flags
);
1452 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
1454 ret
= hdev
->setup(hdev
);
1456 /* The transport driver can set these quirks before
1457 * creating the HCI device or in its setup callback.
1459 * In case any of them is set, the controller has to
1460 * start up as unconfigured.
1462 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1463 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1464 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1466 /* For an unconfigured controller it is required to
1467 * read at least the version information provided by
1468 * the Read Local Version Information command.
1470 * If the set_bdaddr driver callback is provided, then
1471 * also the original Bluetooth public device address
1472 * will be read using the Read BD Address command.
1474 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1475 ret
= __hci_unconf_init(hdev
);
1478 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1479 /* If public address change is configured, ensure that
1480 * the address gets programmed. If the driver does not
1481 * support changing the public address, fail the power
1484 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1486 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1488 ret
= -EADDRNOTAVAIL
;
1492 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1493 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
1494 ret
= __hci_init(hdev
);
1497 clear_bit(HCI_INIT
, &hdev
->flags
);
1501 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1502 set_bit(HCI_UP
, &hdev
->flags
);
1503 hci_notify(hdev
, HCI_DEV_UP
);
1504 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1505 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1506 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1507 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1508 hdev
->dev_type
== HCI_BREDR
) {
1510 mgmt_powered(hdev
, 1);
1511 hci_dev_unlock(hdev
);
1514 /* Init failed, cleanup */
1515 flush_work(&hdev
->tx_work
);
1516 flush_work(&hdev
->cmd_work
);
1517 flush_work(&hdev
->rx_work
);
1519 skb_queue_purge(&hdev
->cmd_q
);
1520 skb_queue_purge(&hdev
->rx_q
);
1525 if (hdev
->sent_cmd
) {
1526 kfree_skb(hdev
->sent_cmd
);
1527 hdev
->sent_cmd
= NULL
;
1530 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1531 hci_notify(hdev
, HCI_DEV_CLOSE
);
1534 hdev
->flags
&= BIT(HCI_RAW
);
1538 hci_req_unlock(hdev
);
1542 /* ---- HCI ioctl helpers ---- */
1544 int hci_dev_open(__u16 dev
)
1546 struct hci_dev
*hdev
;
1549 hdev
= hci_dev_get(dev
);
1553 /* Devices that are marked as unconfigured can only be powered
1554 * up as user channel. Trying to bring them up as normal devices
1555 * will result into a failure. Only user channel operation is
1558 * When this function is called for a user channel, the flag
1559 * HCI_USER_CHANNEL will be set first before attempting to
1562 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1563 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1568 /* We need to ensure that no other power on/off work is pending
1569 * before proceeding to call hci_dev_do_open. This is
1570 * particularly important if the setup procedure has not yet
1573 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1574 cancel_delayed_work(&hdev
->power_off
);
1576 /* After this call it is guaranteed that the setup procedure
1577 * has finished. This means that error conditions like RFKILL
1578 * or no valid public or static random address apply.
1580 flush_workqueue(hdev
->req_workqueue
);
1582 /* For controllers not using the management interface and that
1583 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1584 * so that pairing works for them. Once the management interface
1585 * is in use this bit will be cleared again and userspace has
1586 * to explicitly enable it.
1588 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1589 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1590 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1592 err
= hci_dev_do_open(hdev
);
1599 /* This function requires the caller holds hdev->lock */
1600 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1602 struct hci_conn_params
*p
;
1604 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1606 hci_conn_drop(p
->conn
);
1607 hci_conn_put(p
->conn
);
1610 list_del_init(&p
->action
);
1613 BT_DBG("All LE pending actions cleared");
1616 int hci_dev_do_close(struct hci_dev
*hdev
)
1620 BT_DBG("%s %p", hdev
->name
, hdev
);
1622 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1623 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1624 test_bit(HCI_UP
, &hdev
->flags
)) {
1625 /* Execute vendor specific shutdown routine */
1627 hdev
->shutdown(hdev
);
1630 cancel_delayed_work(&hdev
->power_off
);
1632 hci_req_cancel(hdev
, ENODEV
);
1635 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1636 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1637 hci_req_unlock(hdev
);
1641 /* Flush RX and TX works */
1642 flush_work(&hdev
->tx_work
);
1643 flush_work(&hdev
->rx_work
);
1645 if (hdev
->discov_timeout
> 0) {
1646 cancel_delayed_work(&hdev
->discov_off
);
1647 hdev
->discov_timeout
= 0;
1648 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1649 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1652 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1653 cancel_delayed_work(&hdev
->service_cache
);
1655 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1656 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
1658 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1659 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1661 if (hdev
->adv_instance_timeout
) {
1662 cancel_delayed_work_sync(&hdev
->adv_instance_expire
);
1663 hdev
->adv_instance_timeout
= 0;
1666 /* Avoid potential lockdep warnings from the *_flush() calls by
1667 * ensuring the workqueue is empty up front.
1669 drain_workqueue(hdev
->workqueue
);
1673 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1675 auto_off
= hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
);
1677 if (!auto_off
&& hdev
->dev_type
== HCI_BREDR
)
1678 mgmt_powered(hdev
, 0);
1680 hci_inquiry_cache_flush(hdev
);
1681 hci_pend_le_actions_clear(hdev
);
1682 hci_conn_hash_flush(hdev
);
1683 hci_dev_unlock(hdev
);
1685 smp_unregister(hdev
);
1687 hci_notify(hdev
, HCI_DEV_DOWN
);
1693 skb_queue_purge(&hdev
->cmd_q
);
1694 atomic_set(&hdev
->cmd_cnt
, 1);
1695 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
) &&
1696 !auto_off
&& !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1697 set_bit(HCI_INIT
, &hdev
->flags
);
1698 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1699 clear_bit(HCI_INIT
, &hdev
->flags
);
1702 /* flush cmd work */
1703 flush_work(&hdev
->cmd_work
);
1706 skb_queue_purge(&hdev
->rx_q
);
1707 skb_queue_purge(&hdev
->cmd_q
);
1708 skb_queue_purge(&hdev
->raw_q
);
1710 /* Drop last sent command */
1711 if (hdev
->sent_cmd
) {
1712 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1713 kfree_skb(hdev
->sent_cmd
);
1714 hdev
->sent_cmd
= NULL
;
1717 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1718 hci_notify(hdev
, HCI_DEV_CLOSE
);
1720 /* After this point our queues are empty
1721 * and no tasks are scheduled. */
1725 hdev
->flags
&= BIT(HCI_RAW
);
1726 hci_dev_clear_volatile_flags(hdev
);
1728 /* Controller radio is available but is currently powered down */
1729 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1731 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1732 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1733 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1735 hci_req_unlock(hdev
);
1741 int hci_dev_close(__u16 dev
)
1743 struct hci_dev
*hdev
;
1746 hdev
= hci_dev_get(dev
);
1750 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1755 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1756 cancel_delayed_work(&hdev
->power_off
);
1758 err
= hci_dev_do_close(hdev
);
1765 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1769 BT_DBG("%s %p", hdev
->name
, hdev
);
1774 skb_queue_purge(&hdev
->rx_q
);
1775 skb_queue_purge(&hdev
->cmd_q
);
1777 /* Avoid potential lockdep warnings from the *_flush() calls by
1778 * ensuring the workqueue is empty up front.
1780 drain_workqueue(hdev
->workqueue
);
1783 hci_inquiry_cache_flush(hdev
);
1784 hci_conn_hash_flush(hdev
);
1785 hci_dev_unlock(hdev
);
1790 atomic_set(&hdev
->cmd_cnt
, 1);
1791 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1793 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
1795 hci_req_unlock(hdev
);
1799 int hci_dev_reset(__u16 dev
)
1801 struct hci_dev
*hdev
;
1804 hdev
= hci_dev_get(dev
);
1808 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1813 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1818 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1823 err
= hci_dev_do_reset(hdev
);
1830 int hci_dev_reset_stat(__u16 dev
)
1832 struct hci_dev
*hdev
;
1835 hdev
= hci_dev_get(dev
);
1839 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1844 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1849 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1856 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1858 bool conn_changed
, discov_changed
;
1860 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1862 if ((scan
& SCAN_PAGE
))
1863 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1866 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1869 if ((scan
& SCAN_INQUIRY
)) {
1870 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1873 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1874 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1878 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1881 if (conn_changed
|| discov_changed
) {
1882 /* In case this was disabled through mgmt */
1883 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1885 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1886 mgmt_update_adv_data(hdev
);
1888 mgmt_new_settings(hdev
);
1892 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1894 struct hci_dev
*hdev
;
1895 struct hci_dev_req dr
;
1898 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1901 hdev
= hci_dev_get(dr
.dev_id
);
1905 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1910 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1915 if (hdev
->dev_type
!= HCI_BREDR
) {
1920 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1927 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1932 if (!lmp_encrypt_capable(hdev
)) {
1937 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1938 /* Auth must be enabled first */
1939 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1945 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1950 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1953 /* Ensure that the connectable and discoverable states
1954 * get correctly modified as this was a non-mgmt change.
1957 hci_update_scan_state(hdev
, dr
.dev_opt
);
1961 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1965 case HCISETLINKMODE
:
1966 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1967 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1971 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1975 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1976 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1980 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1981 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1994 int hci_get_dev_list(void __user
*arg
)
1996 struct hci_dev
*hdev
;
1997 struct hci_dev_list_req
*dl
;
1998 struct hci_dev_req
*dr
;
1999 int n
= 0, size
, err
;
2002 if (get_user(dev_num
, (__u16 __user
*) arg
))
2005 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
2008 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
2010 dl
= kzalloc(size
, GFP_KERNEL
);
2016 read_lock(&hci_dev_list_lock
);
2017 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2018 unsigned long flags
= hdev
->flags
;
2020 /* When the auto-off is configured it means the transport
2021 * is running, but in that case still indicate that the
2022 * device is actually down.
2024 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2025 flags
&= ~BIT(HCI_UP
);
2027 (dr
+ n
)->dev_id
= hdev
->id
;
2028 (dr
+ n
)->dev_opt
= flags
;
2033 read_unlock(&hci_dev_list_lock
);
2036 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2038 err
= copy_to_user(arg
, dl
, size
);
2041 return err
? -EFAULT
: 0;
2044 int hci_get_dev_info(void __user
*arg
)
2046 struct hci_dev
*hdev
;
2047 struct hci_dev_info di
;
2048 unsigned long flags
;
2051 if (copy_from_user(&di
, arg
, sizeof(di
)))
2054 hdev
= hci_dev_get(di
.dev_id
);
2058 /* When the auto-off is configured it means the transport
2059 * is running, but in that case still indicate that the
2060 * device is actually down.
2062 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2063 flags
= hdev
->flags
& ~BIT(HCI_UP
);
2065 flags
= hdev
->flags
;
2067 strcpy(di
.name
, hdev
->name
);
2068 di
.bdaddr
= hdev
->bdaddr
;
2069 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2071 di
.pkt_type
= hdev
->pkt_type
;
2072 if (lmp_bredr_capable(hdev
)) {
2073 di
.acl_mtu
= hdev
->acl_mtu
;
2074 di
.acl_pkts
= hdev
->acl_pkts
;
2075 di
.sco_mtu
= hdev
->sco_mtu
;
2076 di
.sco_pkts
= hdev
->sco_pkts
;
2078 di
.acl_mtu
= hdev
->le_mtu
;
2079 di
.acl_pkts
= hdev
->le_pkts
;
2083 di
.link_policy
= hdev
->link_policy
;
2084 di
.link_mode
= hdev
->link_mode
;
2086 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2087 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2089 if (copy_to_user(arg
, &di
, sizeof(di
)))
2097 /* ---- Interface to HCI drivers ---- */
2099 static int hci_rfkill_set_block(void *data
, bool blocked
)
2101 struct hci_dev
*hdev
= data
;
2103 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2105 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
2109 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
2110 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
2111 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
2112 hci_dev_do_close(hdev
);
2114 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2120 static const struct rfkill_ops hci_rfkill_ops
= {
2121 .set_block
= hci_rfkill_set_block
,
2124 static void hci_power_on(struct work_struct
*work
)
2126 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2129 BT_DBG("%s", hdev
->name
);
2131 err
= hci_dev_do_open(hdev
);
2134 mgmt_set_powered_failed(hdev
, err
);
2135 hci_dev_unlock(hdev
);
2139 /* During the HCI setup phase, a few error conditions are
2140 * ignored and they need to be checked now. If they are still
2141 * valid, it is important to turn the device back off.
2143 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2144 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2145 (hdev
->dev_type
== HCI_BREDR
&&
2146 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2147 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2148 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2149 hci_dev_do_close(hdev
);
2150 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2151 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2152 HCI_AUTO_OFF_TIMEOUT
);
2155 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2156 /* For unconfigured devices, set the HCI_RAW flag
2157 * so that userspace can easily identify them.
2159 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2160 set_bit(HCI_RAW
, &hdev
->flags
);
2162 /* For fully configured devices, this will send
2163 * the Index Added event. For unconfigured devices,
2164 * it will send Unconfigued Index Added event.
2166 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2167 * and no event will be send.
2169 mgmt_index_added(hdev
);
2170 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2171 /* When the controller is now configured, then it
2172 * is important to clear the HCI_RAW flag.
2174 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2175 clear_bit(HCI_RAW
, &hdev
->flags
);
2177 /* Powering on the controller with HCI_CONFIG set only
2178 * happens with the transition from unconfigured to
2179 * configured. This will send the Index Added event.
2181 mgmt_index_added(hdev
);
2185 static void hci_power_off(struct work_struct
*work
)
2187 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2190 BT_DBG("%s", hdev
->name
);
2192 hci_dev_do_close(hdev
);
2195 static void hci_error_reset(struct work_struct
*work
)
2197 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2199 BT_DBG("%s", hdev
->name
);
2202 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2204 BT_ERR("%s hardware error 0x%2.2x", hdev
->name
,
2205 hdev
->hw_error_code
);
2207 if (hci_dev_do_close(hdev
))
2210 hci_dev_do_open(hdev
);
2213 static void hci_discov_off(struct work_struct
*work
)
2215 struct hci_dev
*hdev
;
2217 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
2219 BT_DBG("%s", hdev
->name
);
2221 mgmt_discoverable_timeout(hdev
);
2224 static void hci_adv_timeout_expire(struct work_struct
*work
)
2226 struct hci_dev
*hdev
;
2228 hdev
= container_of(work
, struct hci_dev
, adv_instance_expire
.work
);
2230 BT_DBG("%s", hdev
->name
);
2232 mgmt_adv_timeout_expired(hdev
);
2235 void hci_uuids_clear(struct hci_dev
*hdev
)
2237 struct bt_uuid
*uuid
, *tmp
;
2239 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2240 list_del(&uuid
->list
);
2245 void hci_link_keys_clear(struct hci_dev
*hdev
)
2247 struct link_key
*key
;
2249 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2250 list_del_rcu(&key
->list
);
2251 kfree_rcu(key
, rcu
);
2255 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2259 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2260 list_del_rcu(&k
->list
);
2265 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2269 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2270 list_del_rcu(&k
->list
);
2275 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2280 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2281 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2291 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2292 u8 key_type
, u8 old_key_type
)
2295 if (key_type
< 0x03)
2298 /* Debug keys are insecure so don't store them persistently */
2299 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2302 /* Changed combination key and there's no previous one */
2303 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2306 /* Security mode 3 case */
2310 /* BR/EDR key derived using SC from an LE link */
2311 if (conn
->type
== LE_LINK
)
2314 /* Neither local nor remote side had no-bonding as requirement */
2315 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2318 /* Local side had dedicated bonding as requirement */
2319 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2322 /* Remote side had dedicated bonding as requirement */
2323 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2326 /* If none of the above criteria match, then don't store the key
2331 static u8
ltk_role(u8 type
)
2333 if (type
== SMP_LTK
)
2334 return HCI_ROLE_MASTER
;
2336 return HCI_ROLE_SLAVE
;
2339 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2340 u8 addr_type
, u8 role
)
2345 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2346 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2349 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2359 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2361 struct smp_irk
*irk
;
2364 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2365 if (!bacmp(&irk
->rpa
, rpa
)) {
2371 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2372 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2373 bacpy(&irk
->rpa
, rpa
);
2383 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2386 struct smp_irk
*irk
;
2388 /* Identity Address must be public or static random */
2389 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2393 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2394 if (addr_type
== irk
->addr_type
&&
2395 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2405 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2406 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2407 u8 pin_len
, bool *persistent
)
2409 struct link_key
*key
, *old_key
;
2412 old_key
= hci_find_link_key(hdev
, bdaddr
);
2414 old_key_type
= old_key
->type
;
2417 old_key_type
= conn
? conn
->key_type
: 0xff;
2418 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2421 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2424 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2426 /* Some buggy controller combinations generate a changed
2427 * combination key for legacy pairing even when there's no
2429 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2430 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2431 type
= HCI_LK_COMBINATION
;
2433 conn
->key_type
= type
;
2436 bacpy(&key
->bdaddr
, bdaddr
);
2437 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2438 key
->pin_len
= pin_len
;
2440 if (type
== HCI_LK_CHANGED_COMBINATION
)
2441 key
->type
= old_key_type
;
2446 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2452 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2453 u8 addr_type
, u8 type
, u8 authenticated
,
2454 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2456 struct smp_ltk
*key
, *old_key
;
2457 u8 role
= ltk_role(type
);
2459 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2463 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2466 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2469 bacpy(&key
->bdaddr
, bdaddr
);
2470 key
->bdaddr_type
= addr_type
;
2471 memcpy(key
->val
, tk
, sizeof(key
->val
));
2472 key
->authenticated
= authenticated
;
2475 key
->enc_size
= enc_size
;
2481 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2482 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2484 struct smp_irk
*irk
;
2486 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2488 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2492 bacpy(&irk
->bdaddr
, bdaddr
);
2493 irk
->addr_type
= addr_type
;
2495 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2498 memcpy(irk
->val
, val
, 16);
2499 bacpy(&irk
->rpa
, rpa
);
2504 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2506 struct link_key
*key
;
2508 key
= hci_find_link_key(hdev
, bdaddr
);
2512 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2514 list_del_rcu(&key
->list
);
2515 kfree_rcu(key
, rcu
);
2520 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2525 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2526 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2529 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2531 list_del_rcu(&k
->list
);
2536 return removed
? 0 : -ENOENT
;
2539 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2543 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2544 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2547 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2549 list_del_rcu(&k
->list
);
2554 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2557 struct smp_irk
*irk
;
2560 if (type
== BDADDR_BREDR
) {
2561 if (hci_find_link_key(hdev
, bdaddr
))
2566 /* Convert to HCI addr type which struct smp_ltk uses */
2567 if (type
== BDADDR_LE_PUBLIC
)
2568 addr_type
= ADDR_LE_DEV_PUBLIC
;
2570 addr_type
= ADDR_LE_DEV_RANDOM
;
2572 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2574 bdaddr
= &irk
->bdaddr
;
2575 addr_type
= irk
->addr_type
;
2579 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2580 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2590 /* HCI command timer function */
2591 static void hci_cmd_timeout(struct work_struct
*work
)
2593 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2596 if (hdev
->sent_cmd
) {
2597 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2598 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2600 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2602 BT_ERR("%s command tx timeout", hdev
->name
);
2605 atomic_set(&hdev
->cmd_cnt
, 1);
2606 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2609 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2610 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2612 struct oob_data
*data
;
2614 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2615 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2617 if (data
->bdaddr_type
!= bdaddr_type
)
2625 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2628 struct oob_data
*data
;
2630 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2634 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2636 list_del(&data
->list
);
2642 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2644 struct oob_data
*data
, *n
;
2646 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2647 list_del(&data
->list
);
2652 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2653 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2654 u8
*hash256
, u8
*rand256
)
2656 struct oob_data
*data
;
2658 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2660 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2664 bacpy(&data
->bdaddr
, bdaddr
);
2665 data
->bdaddr_type
= bdaddr_type
;
2666 list_add(&data
->list
, &hdev
->remote_oob_data
);
2669 if (hash192
&& rand192
) {
2670 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2671 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2672 if (hash256
&& rand256
)
2673 data
->present
= 0x03;
2675 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2676 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2677 if (hash256
&& rand256
)
2678 data
->present
= 0x02;
2680 data
->present
= 0x00;
2683 if (hash256
&& rand256
) {
2684 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2685 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2687 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2688 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2689 if (hash192
&& rand192
)
2690 data
->present
= 0x01;
2693 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2698 /* This function requires the caller holds hdev->lock */
2699 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2701 struct adv_info
*adv_instance
;
2703 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
2704 if (adv_instance
->instance
== instance
)
2705 return adv_instance
;
2711 /* This function requires the caller holds hdev->lock */
2712 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
) {
2713 struct adv_info
*cur_instance
;
2715 cur_instance
= hci_find_adv_instance(hdev
, instance
);
2719 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
2720 struct adv_info
, list
))
2721 return list_first_entry(&hdev
->adv_instances
,
2722 struct adv_info
, list
);
2724 return list_next_entry(cur_instance
, list
);
2727 /* This function requires the caller holds hdev->lock */
2728 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2730 struct adv_info
*adv_instance
;
2732 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2736 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
2738 if (hdev
->cur_adv_instance
== instance
&& hdev
->adv_instance_timeout
) {
2739 cancel_delayed_work(&hdev
->adv_instance_expire
);
2740 hdev
->adv_instance_timeout
= 0;
2743 list_del(&adv_instance
->list
);
2744 kfree(adv_instance
);
2746 hdev
->adv_instance_cnt
--;
2751 /* This function requires the caller holds hdev->lock */
2752 void hci_adv_instances_clear(struct hci_dev
*hdev
)
2754 struct adv_info
*adv_instance
, *n
;
2756 if (hdev
->adv_instance_timeout
) {
2757 cancel_delayed_work(&hdev
->adv_instance_expire
);
2758 hdev
->adv_instance_timeout
= 0;
2761 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
2762 list_del(&adv_instance
->list
);
2763 kfree(adv_instance
);
2766 hdev
->adv_instance_cnt
= 0;
2769 /* This function requires the caller holds hdev->lock */
2770 int hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
, u32 flags
,
2771 u16 adv_data_len
, u8
*adv_data
,
2772 u16 scan_rsp_len
, u8
*scan_rsp_data
,
2773 u16 timeout
, u16 duration
)
2775 struct adv_info
*adv_instance
;
2777 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2779 memset(adv_instance
->adv_data
, 0,
2780 sizeof(adv_instance
->adv_data
));
2781 memset(adv_instance
->scan_rsp_data
, 0,
2782 sizeof(adv_instance
->scan_rsp_data
));
2784 if (hdev
->adv_instance_cnt
>= HCI_MAX_ADV_INSTANCES
||
2785 instance
< 1 || instance
> HCI_MAX_ADV_INSTANCES
)
2788 adv_instance
= kzalloc(sizeof(*adv_instance
), GFP_KERNEL
);
2792 adv_instance
->pending
= true;
2793 adv_instance
->instance
= instance
;
2794 list_add(&adv_instance
->list
, &hdev
->adv_instances
);
2795 hdev
->adv_instance_cnt
++;
2798 adv_instance
->flags
= flags
;
2799 adv_instance
->adv_data_len
= adv_data_len
;
2800 adv_instance
->scan_rsp_len
= scan_rsp_len
;
2803 memcpy(adv_instance
->adv_data
, adv_data
, adv_data_len
);
2806 memcpy(adv_instance
->scan_rsp_data
,
2807 scan_rsp_data
, scan_rsp_len
);
2809 adv_instance
->timeout
= timeout
;
2810 adv_instance
->remaining_time
= timeout
;
2813 adv_instance
->duration
= HCI_DEFAULT_ADV_DURATION
;
2815 adv_instance
->duration
= duration
;
2817 BT_DBG("%s for %dMR", hdev
->name
, instance
);
2822 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2823 bdaddr_t
*bdaddr
, u8 type
)
2825 struct bdaddr_list
*b
;
2827 list_for_each_entry(b
, bdaddr_list
, list
) {
2828 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2835 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2837 struct list_head
*p
, *n
;
2839 list_for_each_safe(p
, n
, bdaddr_list
) {
2840 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
2847 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2849 struct bdaddr_list
*entry
;
2851 if (!bacmp(bdaddr
, BDADDR_ANY
))
2854 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2857 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2861 bacpy(&entry
->bdaddr
, bdaddr
);
2862 entry
->bdaddr_type
= type
;
2864 list_add(&entry
->list
, list
);
2869 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2871 struct bdaddr_list
*entry
;
2873 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2874 hci_bdaddr_list_clear(list
);
2878 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2882 list_del(&entry
->list
);
2888 /* This function requires the caller holds hdev->lock */
2889 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2890 bdaddr_t
*addr
, u8 addr_type
)
2892 struct hci_conn_params
*params
;
2894 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2895 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2896 params
->addr_type
== addr_type
) {
2904 /* This function requires the caller holds hdev->lock */
2905 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2906 bdaddr_t
*addr
, u8 addr_type
)
2908 struct hci_conn_params
*param
;
2910 list_for_each_entry(param
, list
, action
) {
2911 if (bacmp(¶m
->addr
, addr
) == 0 &&
2912 param
->addr_type
== addr_type
)
2919 /* This function requires the caller holds hdev->lock */
2920 struct hci_conn_params
*hci_explicit_connect_lookup(struct hci_dev
*hdev
,
2924 struct hci_conn_params
*param
;
2926 list_for_each_entry(param
, &hdev
->pend_le_conns
, action
) {
2927 if (bacmp(¶m
->addr
, addr
) == 0 &&
2928 param
->addr_type
== addr_type
&&
2929 param
->explicit_connect
)
2933 list_for_each_entry(param
, &hdev
->pend_le_reports
, action
) {
2934 if (bacmp(¶m
->addr
, addr
) == 0 &&
2935 param
->addr_type
== addr_type
&&
2936 param
->explicit_connect
)
2943 /* This function requires the caller holds hdev->lock */
2944 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2945 bdaddr_t
*addr
, u8 addr_type
)
2947 struct hci_conn_params
*params
;
2949 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2953 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2955 BT_ERR("Out of memory");
2959 bacpy(¶ms
->addr
, addr
);
2960 params
->addr_type
= addr_type
;
2962 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2963 INIT_LIST_HEAD(¶ms
->action
);
2965 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2966 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2967 params
->conn_latency
= hdev
->le_conn_latency
;
2968 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2969 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2971 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2976 static void hci_conn_params_free(struct hci_conn_params
*params
)
2979 hci_conn_drop(params
->conn
);
2980 hci_conn_put(params
->conn
);
2983 list_del(¶ms
->action
);
2984 list_del(¶ms
->list
);
2988 /* This function requires the caller holds hdev->lock */
2989 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
2991 struct hci_conn_params
*params
;
2993 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2997 hci_conn_params_free(params
);
2999 hci_update_background_scan(hdev
);
3001 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3004 /* This function requires the caller holds hdev->lock */
3005 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
3007 struct hci_conn_params
*params
, *tmp
;
3009 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
3010 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
3013 /* If trying to estabilish one time connection to disabled
3014 * device, leave the params, but mark them as just once.
3016 if (params
->explicit_connect
) {
3017 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
3021 list_del(¶ms
->list
);
3025 BT_DBG("All LE disabled connection parameters were removed");
3028 /* This function requires the caller holds hdev->lock */
3029 void hci_conn_params_clear_all(struct hci_dev
*hdev
)
3031 struct hci_conn_params
*params
, *tmp
;
3033 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
3034 hci_conn_params_free(params
);
3036 hci_update_background_scan(hdev
);
3038 BT_DBG("All LE connection parameters were removed");
3041 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3044 BT_ERR("Failed to start inquiry: status %d", status
);
3047 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3048 hci_dev_unlock(hdev
);
3053 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
,
3056 /* General inquiry access code (GIAC) */
3057 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3058 struct hci_cp_inquiry cp
;
3062 BT_ERR("Failed to disable LE scanning: status %d", status
);
3066 hdev
->discovery
.scan_start
= 0;
3068 switch (hdev
->discovery
.type
) {
3069 case DISCOV_TYPE_LE
:
3071 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3072 hci_dev_unlock(hdev
);
3075 case DISCOV_TYPE_INTERLEAVED
:
3078 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
3080 /* If we were running LE only scan, change discovery
3081 * state. If we were running both LE and BR/EDR inquiry
3082 * simultaneously, and BR/EDR inquiry is already
3083 * finished, stop discovery, otherwise BR/EDR inquiry
3084 * will stop discovery when finished. If we will resolve
3085 * remote device name, do not change discovery state.
3087 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
3088 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
3089 hci_discovery_set_state(hdev
,
3092 struct hci_request req
;
3094 hci_inquiry_cache_flush(hdev
);
3096 hci_req_init(&req
, hdev
);
3098 memset(&cp
, 0, sizeof(cp
));
3099 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
3100 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
3101 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
3103 err
= hci_req_run(&req
, inquiry_complete
);
3105 BT_ERR("Inquiry request failed: err %d", err
);
3106 hci_discovery_set_state(hdev
,
3111 hci_dev_unlock(hdev
);
3116 static void le_scan_disable_work(struct work_struct
*work
)
3118 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3119 le_scan_disable
.work
);
3120 struct hci_request req
;
3123 BT_DBG("%s", hdev
->name
);
3125 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
3127 hci_req_init(&req
, hdev
);
3129 hci_req_add_le_scan_disable(&req
);
3131 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
3133 BT_ERR("Disable LE scanning request failed: err %d", err
);
3136 static void le_scan_restart_work_complete(struct hci_dev
*hdev
, u8 status
,
3139 unsigned long timeout
, duration
, scan_start
, now
;
3141 BT_DBG("%s", hdev
->name
);
3144 BT_ERR("Failed to restart LE scan: status %d", status
);
3148 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
3149 !hdev
->discovery
.scan_start
)
3152 /* When the scan was started, hdev->le_scan_disable has been queued
3153 * after duration from scan_start. During scan restart this job
3154 * has been canceled, and we need to queue it again after proper
3155 * timeout, to make sure that scan does not run indefinitely.
3157 duration
= hdev
->discovery
.scan_duration
;
3158 scan_start
= hdev
->discovery
.scan_start
;
3160 if (now
- scan_start
<= duration
) {
3163 if (now
>= scan_start
)
3164 elapsed
= now
- scan_start
;
3166 elapsed
= ULONG_MAX
- scan_start
+ now
;
3168 timeout
= duration
- elapsed
;
3172 queue_delayed_work(hdev
->workqueue
,
3173 &hdev
->le_scan_disable
, timeout
);
3176 static void le_scan_restart_work(struct work_struct
*work
)
3178 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3179 le_scan_restart
.work
);
3180 struct hci_request req
;
3181 struct hci_cp_le_set_scan_enable cp
;
3184 BT_DBG("%s", hdev
->name
);
3186 /* If controller is not scanning we are done. */
3187 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
3190 hci_req_init(&req
, hdev
);
3192 hci_req_add_le_scan_disable(&req
);
3194 memset(&cp
, 0, sizeof(cp
));
3195 cp
.enable
= LE_SCAN_ENABLE
;
3196 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3197 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
3199 err
= hci_req_run(&req
, le_scan_restart_work_complete
);
3201 BT_ERR("Restart LE scan request failed: err %d", err
);
3204 /* Copy the Identity Address of the controller.
3206 * If the controller has a public BD_ADDR, then by default use that one.
3207 * If this is a LE only controller without a public address, default to
3208 * the static random address.
3210 * For debugging purposes it is possible to force controllers with a
3211 * public address to use the static random address instead.
3213 * In case BR/EDR has been disabled on a dual-mode controller and
3214 * userspace has configured a static address, then that address
3215 * becomes the identity address instead of the public BR/EDR address.
3217 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3220 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
3221 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
3222 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
3223 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
3224 bacpy(bdaddr
, &hdev
->static_addr
);
3225 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
3227 bacpy(bdaddr
, &hdev
->bdaddr
);
3228 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
3232 /* Alloc HCI device */
3233 struct hci_dev
*hci_alloc_dev(void)
3235 struct hci_dev
*hdev
;
3237 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
3241 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
3242 hdev
->esco_type
= (ESCO_HV1
);
3243 hdev
->link_mode
= (HCI_LM_ACCEPT
);
3244 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
3245 hdev
->io_capability
= 0x03; /* No Input No Output */
3246 hdev
->manufacturer
= 0xffff; /* Default to internal use */
3247 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
3248 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
3249 hdev
->adv_instance_cnt
= 0;
3250 hdev
->cur_adv_instance
= 0x00;
3251 hdev
->adv_instance_timeout
= 0;
3253 hdev
->sniff_max_interval
= 800;
3254 hdev
->sniff_min_interval
= 80;
3256 hdev
->le_adv_channel_map
= 0x07;
3257 hdev
->le_adv_min_interval
= 0x0800;
3258 hdev
->le_adv_max_interval
= 0x0800;
3259 hdev
->le_scan_interval
= 0x0060;
3260 hdev
->le_scan_window
= 0x0030;
3261 hdev
->le_conn_min_interval
= 0x0028;
3262 hdev
->le_conn_max_interval
= 0x0038;
3263 hdev
->le_conn_latency
= 0x0000;
3264 hdev
->le_supv_timeout
= 0x002a;
3265 hdev
->le_def_tx_len
= 0x001b;
3266 hdev
->le_def_tx_time
= 0x0148;
3267 hdev
->le_max_tx_len
= 0x001b;
3268 hdev
->le_max_tx_time
= 0x0148;
3269 hdev
->le_max_rx_len
= 0x001b;
3270 hdev
->le_max_rx_time
= 0x0148;
3272 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
3273 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
3274 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
3275 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
3277 mutex_init(&hdev
->lock
);
3278 mutex_init(&hdev
->req_lock
);
3280 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
3281 INIT_LIST_HEAD(&hdev
->blacklist
);
3282 INIT_LIST_HEAD(&hdev
->whitelist
);
3283 INIT_LIST_HEAD(&hdev
->uuids
);
3284 INIT_LIST_HEAD(&hdev
->link_keys
);
3285 INIT_LIST_HEAD(&hdev
->long_term_keys
);
3286 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
3287 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
3288 INIT_LIST_HEAD(&hdev
->le_white_list
);
3289 INIT_LIST_HEAD(&hdev
->le_conn_params
);
3290 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
3291 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
3292 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
3293 INIT_LIST_HEAD(&hdev
->adv_instances
);
3295 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
3296 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
3297 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
3298 INIT_WORK(&hdev
->power_on
, hci_power_on
);
3299 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
3301 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
3302 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
3303 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
3304 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
3305 INIT_DELAYED_WORK(&hdev
->adv_instance_expire
, hci_adv_timeout_expire
);
3307 skb_queue_head_init(&hdev
->rx_q
);
3308 skb_queue_head_init(&hdev
->cmd_q
);
3309 skb_queue_head_init(&hdev
->raw_q
);
3311 init_waitqueue_head(&hdev
->req_wait_q
);
3313 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
3315 hci_init_sysfs(hdev
);
3316 discovery_init(hdev
);
3320 EXPORT_SYMBOL(hci_alloc_dev
);
3322 /* Free HCI device */
3323 void hci_free_dev(struct hci_dev
*hdev
)
3325 /* will free via device release */
3326 put_device(&hdev
->dev
);
3328 EXPORT_SYMBOL(hci_free_dev
);
3330 /* Register HCI device */
3331 int hci_register_dev(struct hci_dev
*hdev
)
3335 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3338 /* Do not allow HCI_AMP devices to register at index 0,
3339 * so the index can be used as the AMP controller ID.
3341 switch (hdev
->dev_type
) {
3343 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3346 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3355 sprintf(hdev
->name
, "hci%d", id
);
3358 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3360 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3361 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3362 if (!hdev
->workqueue
) {
3367 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3368 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3369 if (!hdev
->req_workqueue
) {
3370 destroy_workqueue(hdev
->workqueue
);
3375 if (!IS_ERR_OR_NULL(bt_debugfs
))
3376 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3378 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3380 error
= device_add(&hdev
->dev
);
3384 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3385 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3388 if (rfkill_register(hdev
->rfkill
) < 0) {
3389 rfkill_destroy(hdev
->rfkill
);
3390 hdev
->rfkill
= NULL
;
3394 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3395 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3397 hci_dev_set_flag(hdev
, HCI_SETUP
);
3398 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3400 if (hdev
->dev_type
== HCI_BREDR
) {
3401 /* Assume BR/EDR support until proven otherwise (such as
3402 * through reading supported features during init.
3404 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3407 write_lock(&hci_dev_list_lock
);
3408 list_add(&hdev
->list
, &hci_dev_list
);
3409 write_unlock(&hci_dev_list_lock
);
3411 /* Devices that are marked for raw-only usage are unconfigured
3412 * and should not be included in normal operation.
3414 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3415 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3417 hci_notify(hdev
, HCI_DEV_REG
);
3420 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3425 destroy_workqueue(hdev
->workqueue
);
3426 destroy_workqueue(hdev
->req_workqueue
);
3428 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3432 EXPORT_SYMBOL(hci_register_dev
);
3434 /* Unregister HCI device */
3435 void hci_unregister_dev(struct hci_dev
*hdev
)
3439 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3441 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3445 write_lock(&hci_dev_list_lock
);
3446 list_del(&hdev
->list
);
3447 write_unlock(&hci_dev_list_lock
);
3449 hci_dev_do_close(hdev
);
3451 cancel_work_sync(&hdev
->power_on
);
3453 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3454 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3455 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3457 mgmt_index_removed(hdev
);
3458 hci_dev_unlock(hdev
);
3461 /* mgmt_index_removed should take care of emptying the
3463 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3465 hci_notify(hdev
, HCI_DEV_UNREG
);
3468 rfkill_unregister(hdev
->rfkill
);
3469 rfkill_destroy(hdev
->rfkill
);
3472 device_del(&hdev
->dev
);
3474 debugfs_remove_recursive(hdev
->debugfs
);
3476 destroy_workqueue(hdev
->workqueue
);
3477 destroy_workqueue(hdev
->req_workqueue
);
3480 hci_bdaddr_list_clear(&hdev
->blacklist
);
3481 hci_bdaddr_list_clear(&hdev
->whitelist
);
3482 hci_uuids_clear(hdev
);
3483 hci_link_keys_clear(hdev
);
3484 hci_smp_ltks_clear(hdev
);
3485 hci_smp_irks_clear(hdev
);
3486 hci_remote_oob_data_clear(hdev
);
3487 hci_adv_instances_clear(hdev
);
3488 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3489 hci_conn_params_clear_all(hdev
);
3490 hci_discovery_filter_clear(hdev
);
3491 hci_dev_unlock(hdev
);
3495 ida_simple_remove(&hci_index_ida
, id
);
3497 EXPORT_SYMBOL(hci_unregister_dev
);
3499 /* Suspend HCI device */
3500 int hci_suspend_dev(struct hci_dev
*hdev
)
3502 hci_notify(hdev
, HCI_DEV_SUSPEND
);
3505 EXPORT_SYMBOL(hci_suspend_dev
);
3507 /* Resume HCI device */
3508 int hci_resume_dev(struct hci_dev
*hdev
)
3510 hci_notify(hdev
, HCI_DEV_RESUME
);
3513 EXPORT_SYMBOL(hci_resume_dev
);
3515 /* Reset HCI device */
3516 int hci_reset_dev(struct hci_dev
*hdev
)
3518 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3519 struct sk_buff
*skb
;
3521 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3525 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
3526 memcpy(skb_put(skb
, 3), hw_err
, 3);
3528 /* Send Hardware Error to upper stack */
3529 return hci_recv_frame(hdev
, skb
);
3531 EXPORT_SYMBOL(hci_reset_dev
);
3533 /* Receive frame from HCI drivers */
3534 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3536 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3537 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3542 if (bt_cb(skb
)->pkt_type
!= HCI_EVENT_PKT
&&
3543 bt_cb(skb
)->pkt_type
!= HCI_ACLDATA_PKT
&&
3544 bt_cb(skb
)->pkt_type
!= HCI_SCODATA_PKT
) {
3550 bt_cb(skb
)->incoming
= 1;
3553 __net_timestamp(skb
);
3555 skb_queue_tail(&hdev
->rx_q
, skb
);
3556 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3560 EXPORT_SYMBOL(hci_recv_frame
);
3562 /* Receive diagnostic message from HCI drivers */
3563 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3566 __net_timestamp(skb
);
3568 /* Mark as diagnostic packet and send to monitor */
3569 bt_cb(skb
)->pkt_type
= HCI_DIAG_PKT
;
3570 hci_send_to_monitor(hdev
, skb
);
3575 EXPORT_SYMBOL(hci_recv_diag
);
3577 /* ---- Interface to upper protocols ---- */
3579 int hci_register_cb(struct hci_cb
*cb
)
3581 BT_DBG("%p name %s", cb
, cb
->name
);
3583 mutex_lock(&hci_cb_list_lock
);
3584 list_add_tail(&cb
->list
, &hci_cb_list
);
3585 mutex_unlock(&hci_cb_list_lock
);
3589 EXPORT_SYMBOL(hci_register_cb
);
3591 int hci_unregister_cb(struct hci_cb
*cb
)
3593 BT_DBG("%p name %s", cb
, cb
->name
);
3595 mutex_lock(&hci_cb_list_lock
);
3596 list_del(&cb
->list
);
3597 mutex_unlock(&hci_cb_list_lock
);
3601 EXPORT_SYMBOL(hci_unregister_cb
);
3603 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3607 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
3610 __net_timestamp(skb
);
3612 /* Send copy to monitor */
3613 hci_send_to_monitor(hdev
, skb
);
3615 if (atomic_read(&hdev
->promisc
)) {
3616 /* Send copy to the sockets */
3617 hci_send_to_sock(hdev
, skb
);
3620 /* Get rid of skb owner, prior to sending to the driver. */
3623 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3628 err
= hdev
->send(hdev
, skb
);
3630 BT_ERR("%s sending frame failed (%d)", hdev
->name
, err
);
3635 /* Send HCI command */
3636 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3639 struct sk_buff
*skb
;
3641 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3643 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3645 BT_ERR("%s no memory for command", hdev
->name
);
3649 /* Stand-alone HCI commands must be flagged as
3650 * single-command requests.
3652 bt_cb(skb
)->req
.start
= true;
3654 skb_queue_tail(&hdev
->cmd_q
, skb
);
3655 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3660 /* Get data from the previously sent command */
3661 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3663 struct hci_command_hdr
*hdr
;
3665 if (!hdev
->sent_cmd
)
3668 hdr
= (void *) hdev
->sent_cmd
->data
;
3670 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3673 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3675 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3678 /* Send HCI command and wait for command commplete event */
3679 struct sk_buff
*hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3680 const void *param
, u32 timeout
)
3682 struct sk_buff
*skb
;
3684 if (!test_bit(HCI_UP
, &hdev
->flags
))
3685 return ERR_PTR(-ENETDOWN
);
3687 bt_dev_dbg(hdev
, "opcode 0x%4.4x plen %d", opcode
, plen
);
3690 skb
= __hci_cmd_sync(hdev
, opcode
, plen
, param
, timeout
);
3691 hci_req_unlock(hdev
);
3695 EXPORT_SYMBOL(hci_cmd_sync
);
3698 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3700 struct hci_acl_hdr
*hdr
;
3703 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3704 skb_reset_transport_header(skb
);
3705 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3706 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3707 hdr
->dlen
= cpu_to_le16(len
);
3710 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3711 struct sk_buff
*skb
, __u16 flags
)
3713 struct hci_conn
*conn
= chan
->conn
;
3714 struct hci_dev
*hdev
= conn
->hdev
;
3715 struct sk_buff
*list
;
3717 skb
->len
= skb_headlen(skb
);
3720 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3722 switch (hdev
->dev_type
) {
3724 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3727 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3730 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3734 list
= skb_shinfo(skb
)->frag_list
;
3736 /* Non fragmented */
3737 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3739 skb_queue_tail(queue
, skb
);
3742 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3744 skb_shinfo(skb
)->frag_list
= NULL
;
3746 /* Queue all fragments atomically. We need to use spin_lock_bh
3747 * here because of 6LoWPAN links, as there this function is
3748 * called from softirq and using normal spin lock could cause
3751 spin_lock_bh(&queue
->lock
);
3753 __skb_queue_tail(queue
, skb
);
3755 flags
&= ~ACL_START
;
3758 skb
= list
; list
= list
->next
;
3760 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3761 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3763 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3765 __skb_queue_tail(queue
, skb
);
3768 spin_unlock_bh(&queue
->lock
);
3772 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3774 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3776 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3778 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3780 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3784 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3786 struct hci_dev
*hdev
= conn
->hdev
;
3787 struct hci_sco_hdr hdr
;
3789 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3791 hdr
.handle
= cpu_to_le16(conn
->handle
);
3792 hdr
.dlen
= skb
->len
;
3794 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3795 skb_reset_transport_header(skb
);
3796 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3798 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
3800 skb_queue_tail(&conn
->data_q
, skb
);
3801 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3804 /* ---- HCI TX task (outgoing data) ---- */
3806 /* HCI Connection scheduler */
3807 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3810 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3811 struct hci_conn
*conn
= NULL
, *c
;
3812 unsigned int num
= 0, min
= ~0;
3814 /* We don't have to lock device here. Connections are always
3815 * added and removed with TX task disabled. */
3819 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3820 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3823 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3828 if (c
->sent
< min
) {
3833 if (hci_conn_num(hdev
, type
) == num
)
3842 switch (conn
->type
) {
3844 cnt
= hdev
->acl_cnt
;
3848 cnt
= hdev
->sco_cnt
;
3851 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3855 BT_ERR("Unknown link type");
3863 BT_DBG("conn %p quote %d", conn
, *quote
);
3867 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3869 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3872 BT_ERR("%s link tx timeout", hdev
->name
);
3876 /* Kill stalled connections */
3877 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3878 if (c
->type
== type
&& c
->sent
) {
3879 BT_ERR("%s killing stalled connection %pMR",
3880 hdev
->name
, &c
->dst
);
3881 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3888 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3891 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3892 struct hci_chan
*chan
= NULL
;
3893 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3894 struct hci_conn
*conn
;
3895 int cnt
, q
, conn_num
= 0;
3897 BT_DBG("%s", hdev
->name
);
3901 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3902 struct hci_chan
*tmp
;
3904 if (conn
->type
!= type
)
3907 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3912 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3913 struct sk_buff
*skb
;
3915 if (skb_queue_empty(&tmp
->data_q
))
3918 skb
= skb_peek(&tmp
->data_q
);
3919 if (skb
->priority
< cur_prio
)
3922 if (skb
->priority
> cur_prio
) {
3925 cur_prio
= skb
->priority
;
3930 if (conn
->sent
< min
) {
3936 if (hci_conn_num(hdev
, type
) == conn_num
)
3945 switch (chan
->conn
->type
) {
3947 cnt
= hdev
->acl_cnt
;
3950 cnt
= hdev
->block_cnt
;
3954 cnt
= hdev
->sco_cnt
;
3957 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3961 BT_ERR("Unknown link type");
3966 BT_DBG("chan %p quote %d", chan
, *quote
);
3970 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3972 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3973 struct hci_conn
*conn
;
3976 BT_DBG("%s", hdev
->name
);
3980 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3981 struct hci_chan
*chan
;
3983 if (conn
->type
!= type
)
3986 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3991 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3992 struct sk_buff
*skb
;
3999 if (skb_queue_empty(&chan
->data_q
))
4002 skb
= skb_peek(&chan
->data_q
);
4003 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
4006 skb
->priority
= HCI_PRIO_MAX
- 1;
4008 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
4012 if (hci_conn_num(hdev
, type
) == num
)
4020 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4022 /* Calculate count of blocks used by this packet */
4023 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
4026 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
4028 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
4029 /* ACL tx timeout must be longer than maximum
4030 * link supervision timeout (40.9 seconds) */
4031 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
4032 HCI_ACL_TX_TIMEOUT
))
4033 hci_link_tx_to(hdev
, ACL_LINK
);
4037 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
4039 unsigned int cnt
= hdev
->acl_cnt
;
4040 struct hci_chan
*chan
;
4041 struct sk_buff
*skb
;
4044 __check_timeout(hdev
, cnt
);
4046 while (hdev
->acl_cnt
&&
4047 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
4048 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4049 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4050 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4051 skb
->len
, skb
->priority
);
4053 /* Stop if priority has changed */
4054 if (skb
->priority
< priority
)
4057 skb
= skb_dequeue(&chan
->data_q
);
4059 hci_conn_enter_active_mode(chan
->conn
,
4060 bt_cb(skb
)->force_active
);
4062 hci_send_frame(hdev
, skb
);
4063 hdev
->acl_last_tx
= jiffies
;
4071 if (cnt
!= hdev
->acl_cnt
)
4072 hci_prio_recalculate(hdev
, ACL_LINK
);
4075 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
4077 unsigned int cnt
= hdev
->block_cnt
;
4078 struct hci_chan
*chan
;
4079 struct sk_buff
*skb
;
4083 __check_timeout(hdev
, cnt
);
4085 BT_DBG("%s", hdev
->name
);
4087 if (hdev
->dev_type
== HCI_AMP
)
4092 while (hdev
->block_cnt
> 0 &&
4093 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
4094 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4095 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
4098 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4099 skb
->len
, skb
->priority
);
4101 /* Stop if priority has changed */
4102 if (skb
->priority
< priority
)
4105 skb
= skb_dequeue(&chan
->data_q
);
4107 blocks
= __get_blocks(hdev
, skb
);
4108 if (blocks
> hdev
->block_cnt
)
4111 hci_conn_enter_active_mode(chan
->conn
,
4112 bt_cb(skb
)->force_active
);
4114 hci_send_frame(hdev
, skb
);
4115 hdev
->acl_last_tx
= jiffies
;
4117 hdev
->block_cnt
-= blocks
;
4120 chan
->sent
+= blocks
;
4121 chan
->conn
->sent
+= blocks
;
4125 if (cnt
!= hdev
->block_cnt
)
4126 hci_prio_recalculate(hdev
, type
);
4129 static void hci_sched_acl(struct hci_dev
*hdev
)
4131 BT_DBG("%s", hdev
->name
);
4133 /* No ACL link over BR/EDR controller */
4134 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
4137 /* No AMP link over AMP controller */
4138 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
4141 switch (hdev
->flow_ctl_mode
) {
4142 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
4143 hci_sched_acl_pkt(hdev
);
4146 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
4147 hci_sched_acl_blk(hdev
);
4153 static void hci_sched_sco(struct hci_dev
*hdev
)
4155 struct hci_conn
*conn
;
4156 struct sk_buff
*skb
;
4159 BT_DBG("%s", hdev
->name
);
4161 if (!hci_conn_num(hdev
, SCO_LINK
))
4164 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
4165 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4166 BT_DBG("skb %p len %d", skb
, skb
->len
);
4167 hci_send_frame(hdev
, skb
);
4170 if (conn
->sent
== ~0)
4176 static void hci_sched_esco(struct hci_dev
*hdev
)
4178 struct hci_conn
*conn
;
4179 struct sk_buff
*skb
;
4182 BT_DBG("%s", hdev
->name
);
4184 if (!hci_conn_num(hdev
, ESCO_LINK
))
4187 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
4189 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4190 BT_DBG("skb %p len %d", skb
, skb
->len
);
4191 hci_send_frame(hdev
, skb
);
4194 if (conn
->sent
== ~0)
4200 static void hci_sched_le(struct hci_dev
*hdev
)
4202 struct hci_chan
*chan
;
4203 struct sk_buff
*skb
;
4204 int quote
, cnt
, tmp
;
4206 BT_DBG("%s", hdev
->name
);
4208 if (!hci_conn_num(hdev
, LE_LINK
))
4211 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
4212 /* LE tx timeout must be longer than maximum
4213 * link supervision timeout (40.9 seconds) */
4214 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
4215 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
4216 hci_link_tx_to(hdev
, LE_LINK
);
4219 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
4221 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
4222 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4223 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4224 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4225 skb
->len
, skb
->priority
);
4227 /* Stop if priority has changed */
4228 if (skb
->priority
< priority
)
4231 skb
= skb_dequeue(&chan
->data_q
);
4233 hci_send_frame(hdev
, skb
);
4234 hdev
->le_last_tx
= jiffies
;
4245 hdev
->acl_cnt
= cnt
;
4248 hci_prio_recalculate(hdev
, LE_LINK
);
4251 static void hci_tx_work(struct work_struct
*work
)
4253 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
4254 struct sk_buff
*skb
;
4256 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
4257 hdev
->sco_cnt
, hdev
->le_cnt
);
4259 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4260 /* Schedule queues and send stuff to HCI driver */
4261 hci_sched_acl(hdev
);
4262 hci_sched_sco(hdev
);
4263 hci_sched_esco(hdev
);
4267 /* Send next queued raw (unknown type) packet */
4268 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
4269 hci_send_frame(hdev
, skb
);
4272 /* ----- HCI RX task (incoming data processing) ----- */
4274 /* ACL data packet */
4275 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4277 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4278 struct hci_conn
*conn
;
4279 __u16 handle
, flags
;
4281 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4283 handle
= __le16_to_cpu(hdr
->handle
);
4284 flags
= hci_flags(handle
);
4285 handle
= hci_handle(handle
);
4287 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4290 hdev
->stat
.acl_rx
++;
4293 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4294 hci_dev_unlock(hdev
);
4297 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4299 /* Send to upper protocol */
4300 l2cap_recv_acldata(conn
, skb
, flags
);
4303 BT_ERR("%s ACL packet for unknown connection handle %d",
4304 hdev
->name
, handle
);
4310 /* SCO data packet */
4311 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4313 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4314 struct hci_conn
*conn
;
4317 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4319 handle
= __le16_to_cpu(hdr
->handle
);
4321 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4323 hdev
->stat
.sco_rx
++;
4326 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4327 hci_dev_unlock(hdev
);
4330 /* Send to upper protocol */
4331 sco_recv_scodata(conn
, skb
);
4334 BT_ERR("%s SCO packet for unknown connection handle %d",
4335 hdev
->name
, handle
);
4341 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4343 struct sk_buff
*skb
;
4345 skb
= skb_peek(&hdev
->cmd_q
);
4349 return bt_cb(skb
)->req
.start
;
4352 static void hci_resend_last(struct hci_dev
*hdev
)
4354 struct hci_command_hdr
*sent
;
4355 struct sk_buff
*skb
;
4358 if (!hdev
->sent_cmd
)
4361 sent
= (void *) hdev
->sent_cmd
->data
;
4362 opcode
= __le16_to_cpu(sent
->opcode
);
4363 if (opcode
== HCI_OP_RESET
)
4366 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4370 skb_queue_head(&hdev
->cmd_q
, skb
);
4371 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4374 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4375 hci_req_complete_t
*req_complete
,
4376 hci_req_complete_skb_t
*req_complete_skb
)
4378 struct sk_buff
*skb
;
4379 unsigned long flags
;
4381 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4383 /* If the completed command doesn't match the last one that was
4384 * sent we need to do special handling of it.
4386 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4387 /* Some CSR based controllers generate a spontaneous
4388 * reset complete event during init and any pending
4389 * command will never be completed. In such a case we
4390 * need to resend whatever was the last sent
4393 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4394 hci_resend_last(hdev
);
4399 /* If the command succeeded and there's still more commands in
4400 * this request the request is not yet complete.
4402 if (!status
&& !hci_req_is_complete(hdev
))
4405 /* If this was the last command in a request the complete
4406 * callback would be found in hdev->sent_cmd instead of the
4407 * command queue (hdev->cmd_q).
4409 if (bt_cb(hdev
->sent_cmd
)->req
.complete
) {
4410 *req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
4414 if (bt_cb(hdev
->sent_cmd
)->req
.complete_skb
) {
4415 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->req
.complete_skb
;
4419 /* Remove all pending commands belonging to this request */
4420 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4421 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4422 if (bt_cb(skb
)->req
.start
) {
4423 __skb_queue_head(&hdev
->cmd_q
, skb
);
4427 *req_complete
= bt_cb(skb
)->req
.complete
;
4428 *req_complete_skb
= bt_cb(skb
)->req
.complete_skb
;
4431 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4434 static void hci_rx_work(struct work_struct
*work
)
4436 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4437 struct sk_buff
*skb
;
4439 BT_DBG("%s", hdev
->name
);
4441 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4442 /* Send copy to monitor */
4443 hci_send_to_monitor(hdev
, skb
);
4445 if (atomic_read(&hdev
->promisc
)) {
4446 /* Send copy to the sockets */
4447 hci_send_to_sock(hdev
, skb
);
4450 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4455 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4456 /* Don't process data packets in this states. */
4457 switch (bt_cb(skb
)->pkt_type
) {
4458 case HCI_ACLDATA_PKT
:
4459 case HCI_SCODATA_PKT
:
4466 switch (bt_cb(skb
)->pkt_type
) {
4468 BT_DBG("%s Event packet", hdev
->name
);
4469 hci_event_packet(hdev
, skb
);
4472 case HCI_ACLDATA_PKT
:
4473 BT_DBG("%s ACL data packet", hdev
->name
);
4474 hci_acldata_packet(hdev
, skb
);
4477 case HCI_SCODATA_PKT
:
4478 BT_DBG("%s SCO data packet", hdev
->name
);
4479 hci_scodata_packet(hdev
, skb
);
4489 static void hci_cmd_work(struct work_struct
*work
)
4491 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4492 struct sk_buff
*skb
;
4494 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4495 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4497 /* Send queued commands */
4498 if (atomic_read(&hdev
->cmd_cnt
)) {
4499 skb
= skb_dequeue(&hdev
->cmd_q
);
4503 kfree_skb(hdev
->sent_cmd
);
4505 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4506 if (hdev
->sent_cmd
) {
4507 atomic_dec(&hdev
->cmd_cnt
);
4508 hci_send_frame(hdev
, skb
);
4509 if (test_bit(HCI_RESET
, &hdev
->flags
))
4510 cancel_delayed_work(&hdev
->cmd_timer
);
4512 schedule_delayed_work(&hdev
->cmd_timer
,
4515 skb_queue_head(&hdev
->cmd_q
, skb
);
4516 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);