2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
39 skb_queue_head_init(&req
->cmd_q
);
44 void hci_req_purge(struct hci_request
*req
)
46 skb_queue_purge(&req
->cmd_q
);
49 static int req_run(struct hci_request
*req
, hci_req_complete_t complete
,
50 hci_req_complete_skb_t complete_skb
)
52 struct hci_dev
*hdev
= req
->hdev
;
56 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
62 skb_queue_purge(&req
->cmd_q
);
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req
->cmd_q
))
70 skb
= skb_peek_tail(&req
->cmd_q
);
72 bt_cb(skb
)->hci
.req_complete
= complete
;
73 } else if (complete_skb
) {
74 bt_cb(skb
)->hci
.req_complete_skb
= complete_skb
;
75 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_SKB
;
78 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
79 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
80 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
82 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
87 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
89 return req_run(req
, complete
, NULL
);
92 int hci_req_run_skb(struct hci_request
*req
, hci_req_complete_skb_t complete
)
94 return req_run(req
, NULL
, complete
);
97 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
100 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= result
;
104 hdev
->req_status
= HCI_REQ_DONE
;
106 hdev
->req_skb
= skb_get(skb
);
107 wake_up_interruptible(&hdev
->req_wait_q
);
111 void hci_req_sync_cancel(struct hci_dev
*hdev
, int err
)
113 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
115 if (hdev
->req_status
== HCI_REQ_PEND
) {
116 hdev
->req_result
= err
;
117 hdev
->req_status
= HCI_REQ_CANCELED
;
118 wake_up_interruptible(&hdev
->req_wait_q
);
122 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
123 const void *param
, u8 event
, u32 timeout
)
125 DECLARE_WAITQUEUE(wait
, current
);
126 struct hci_request req
;
130 BT_DBG("%s", hdev
->name
);
132 hci_req_init(&req
, hdev
);
134 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
136 hdev
->req_status
= HCI_REQ_PEND
;
138 add_wait_queue(&hdev
->req_wait_q
, &wait
);
139 set_current_state(TASK_INTERRUPTIBLE
);
141 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
143 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
144 set_current_state(TASK_RUNNING
);
148 schedule_timeout(timeout
);
150 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
152 if (signal_pending(current
))
153 return ERR_PTR(-EINTR
);
155 switch (hdev
->req_status
) {
157 err
= -bt_to_errno(hdev
->req_result
);
160 case HCI_REQ_CANCELED
:
161 err
= -hdev
->req_result
;
169 hdev
->req_status
= hdev
->req_result
= 0;
171 hdev
->req_skb
= NULL
;
173 BT_DBG("%s end: err %d", hdev
->name
, err
);
181 return ERR_PTR(-ENODATA
);
185 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
187 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
188 const void *param
, u32 timeout
)
190 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
192 EXPORT_SYMBOL(__hci_cmd_sync
);
194 /* Execute request and wait for completion. */
195 int __hci_req_sync(struct hci_dev
*hdev
, int (*func
)(struct hci_request
*req
,
197 unsigned long opt
, u32 timeout
, u8
*hci_status
)
199 struct hci_request req
;
200 DECLARE_WAITQUEUE(wait
, current
);
203 BT_DBG("%s start", hdev
->name
);
205 hci_req_init(&req
, hdev
);
207 hdev
->req_status
= HCI_REQ_PEND
;
209 err
= func(&req
, opt
);
212 *hci_status
= HCI_ERROR_UNSPECIFIED
;
216 add_wait_queue(&hdev
->req_wait_q
, &wait
);
217 set_current_state(TASK_INTERRUPTIBLE
);
219 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
221 hdev
->req_status
= 0;
223 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
224 set_current_state(TASK_RUNNING
);
226 /* ENODATA means the HCI request command queue is empty.
227 * This can happen when a request with conditionals doesn't
228 * trigger any commands to be sent. This is normal behavior
229 * and should not trigger an error return.
231 if (err
== -ENODATA
) {
238 *hci_status
= HCI_ERROR_UNSPECIFIED
;
243 schedule_timeout(timeout
);
245 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
247 if (signal_pending(current
))
250 switch (hdev
->req_status
) {
252 err
= -bt_to_errno(hdev
->req_result
);
254 *hci_status
= hdev
->req_result
;
257 case HCI_REQ_CANCELED
:
258 err
= -hdev
->req_result
;
260 *hci_status
= HCI_ERROR_UNSPECIFIED
;
266 *hci_status
= HCI_ERROR_UNSPECIFIED
;
270 kfree_skb(hdev
->req_skb
);
271 hdev
->req_skb
= NULL
;
272 hdev
->req_status
= hdev
->req_result
= 0;
274 BT_DBG("%s end: err %d", hdev
->name
, err
);
279 int hci_req_sync(struct hci_dev
*hdev
, int (*req
)(struct hci_request
*req
,
281 unsigned long opt
, u32 timeout
, u8
*hci_status
)
285 if (!test_bit(HCI_UP
, &hdev
->flags
))
288 /* Serialize all requests */
289 hci_req_sync_lock(hdev
);
290 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
, hci_status
);
291 hci_req_sync_unlock(hdev
);
296 struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
299 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
300 struct hci_command_hdr
*hdr
;
303 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
307 hdr
= skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
308 hdr
->opcode
= cpu_to_le16(opcode
);
312 skb_put_data(skb
, param
, plen
);
314 BT_DBG("skb len %d", skb
->len
);
316 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
317 hci_skb_opcode(skb
) = opcode
;
322 /* Queue a command to an asynchronous HCI request */
323 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
324 const void *param
, u8 event
)
326 struct hci_dev
*hdev
= req
->hdev
;
329 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
331 /* If an error occurred during request building, there is no point in
332 * queueing the HCI command. We can simply return.
337 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
339 bt_dev_err(hdev
, "no memory for command (opcode 0x%4.4x)",
345 if (skb_queue_empty(&req
->cmd_q
))
346 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
348 bt_cb(skb
)->hci
.req_event
= event
;
350 skb_queue_tail(&req
->cmd_q
, skb
);
353 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
356 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
359 void __hci_req_write_fast_connectable(struct hci_request
*req
, bool enable
)
361 struct hci_dev
*hdev
= req
->hdev
;
362 struct hci_cp_write_page_scan_activity acp
;
365 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
368 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
372 type
= PAGE_SCAN_TYPE_INTERLACED
;
374 /* 160 msec page scan interval */
375 acp
.interval
= cpu_to_le16(0x0100);
377 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
379 /* default 1.28 sec page scan */
380 acp
.interval
= cpu_to_le16(0x0800);
383 acp
.window
= cpu_to_le16(0x0012);
385 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
386 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
387 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
390 if (hdev
->page_scan_type
!= type
)
391 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
394 /* This function controls the background scanning based on hdev->pend_le_conns
395 * list. If there are pending LE connection we start the background scanning,
396 * otherwise we stop it.
398 * This function requires the caller holds hdev->lock.
400 static void __hci_update_background_scan(struct hci_request
*req
)
402 struct hci_dev
*hdev
= req
->hdev
;
404 if (!test_bit(HCI_UP
, &hdev
->flags
) ||
405 test_bit(HCI_INIT
, &hdev
->flags
) ||
406 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
407 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
408 hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) ||
409 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
412 /* No point in doing scanning if LE support hasn't been enabled */
413 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
416 /* If discovery is active don't interfere with it */
417 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
)
420 /* Reset RSSI and UUID filters when starting background scanning
421 * since these filters are meant for service discovery only.
423 * The Start Discovery and Start Service Discovery operations
424 * ensure to set proper values for RSSI threshold and UUID
425 * filter list. So it is safe to just reset them here.
427 hci_discovery_filter_clear(hdev
);
429 if (list_empty(&hdev
->pend_le_conns
) &&
430 list_empty(&hdev
->pend_le_reports
)) {
431 /* If there is no pending LE connections or devices
432 * to be scanned for, we should stop the background
436 /* If controller is not scanning we are done. */
437 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
440 hci_req_add_le_scan_disable(req
);
442 BT_DBG("%s stopping background scanning", hdev
->name
);
444 /* If there is at least one pending LE connection, we should
445 * keep the background scan running.
448 /* If controller is connecting, we should not start scanning
449 * since some controllers are not able to scan and connect at
452 if (hci_lookup_le_connect(hdev
))
455 /* If controller is currently scanning, we stop it to ensure we
456 * don't miss any advertising (due to duplicates filter).
458 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
459 hci_req_add_le_scan_disable(req
);
461 hci_req_add_le_passive_scan(req
);
463 BT_DBG("%s starting background scanning", hdev
->name
);
467 void __hci_req_update_name(struct hci_request
*req
)
469 struct hci_dev
*hdev
= req
->hdev
;
470 struct hci_cp_write_local_name cp
;
472 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
474 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
477 #define PNP_INFO_SVCLASS_ID 0x1200
479 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
481 u8
*ptr
= data
, *uuids_start
= NULL
;
482 struct bt_uuid
*uuid
;
487 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
490 if (uuid
->size
!= 16)
493 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
497 if (uuid16
== PNP_INFO_SVCLASS_ID
)
503 uuids_start
[1] = EIR_UUID16_ALL
;
507 /* Stop if not enough space to put next UUID */
508 if ((ptr
- data
) + sizeof(u16
) > len
) {
509 uuids_start
[1] = EIR_UUID16_SOME
;
513 *ptr
++ = (uuid16
& 0x00ff);
514 *ptr
++ = (uuid16
& 0xff00) >> 8;
515 uuids_start
[0] += sizeof(uuid16
);
521 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
523 u8
*ptr
= data
, *uuids_start
= NULL
;
524 struct bt_uuid
*uuid
;
529 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
530 if (uuid
->size
!= 32)
536 uuids_start
[1] = EIR_UUID32_ALL
;
540 /* Stop if not enough space to put next UUID */
541 if ((ptr
- data
) + sizeof(u32
) > len
) {
542 uuids_start
[1] = EIR_UUID32_SOME
;
546 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
548 uuids_start
[0] += sizeof(u32
);
554 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
556 u8
*ptr
= data
, *uuids_start
= NULL
;
557 struct bt_uuid
*uuid
;
562 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
563 if (uuid
->size
!= 128)
569 uuids_start
[1] = EIR_UUID128_ALL
;
573 /* Stop if not enough space to put next UUID */
574 if ((ptr
- data
) + 16 > len
) {
575 uuids_start
[1] = EIR_UUID128_SOME
;
579 memcpy(ptr
, uuid
->uuid
, 16);
581 uuids_start
[0] += 16;
587 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
592 name_len
= strlen(hdev
->dev_name
);
598 ptr
[1] = EIR_NAME_SHORT
;
600 ptr
[1] = EIR_NAME_COMPLETE
;
602 /* EIR Data length */
603 ptr
[0] = name_len
+ 1;
605 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
607 ptr
+= (name_len
+ 2);
610 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
612 ptr
[1] = EIR_TX_POWER
;
613 ptr
[2] = (u8
) hdev
->inq_tx_power
;
618 if (hdev
->devid_source
> 0) {
620 ptr
[1] = EIR_DEVICE_ID
;
622 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
623 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
624 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
625 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
630 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
631 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
632 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
635 void __hci_req_update_eir(struct hci_request
*req
)
637 struct hci_dev
*hdev
= req
->hdev
;
638 struct hci_cp_write_eir cp
;
640 if (!hdev_is_powered(hdev
))
643 if (!lmp_ext_inq_capable(hdev
))
646 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
649 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
652 memset(&cp
, 0, sizeof(cp
));
654 create_eir(hdev
, cp
.data
);
656 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
659 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
661 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
664 void hci_req_add_le_scan_disable(struct hci_request
*req
)
666 struct hci_cp_le_set_scan_enable cp
;
668 memset(&cp
, 0, sizeof(cp
));
669 cp
.enable
= LE_SCAN_DISABLE
;
670 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
673 static void add_to_white_list(struct hci_request
*req
,
674 struct hci_conn_params
*params
)
676 struct hci_cp_le_add_to_white_list cp
;
678 cp
.bdaddr_type
= params
->addr_type
;
679 bacpy(&cp
.bdaddr
, ¶ms
->addr
);
681 hci_req_add(req
, HCI_OP_LE_ADD_TO_WHITE_LIST
, sizeof(cp
), &cp
);
684 static u8
update_white_list(struct hci_request
*req
)
686 struct hci_dev
*hdev
= req
->hdev
;
687 struct hci_conn_params
*params
;
688 struct bdaddr_list
*b
;
689 uint8_t white_list_entries
= 0;
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
697 list_for_each_entry(b
, &hdev
->le_white_list
, list
) {
698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
701 if (!hci_pend_le_action_lookup(&hdev
->pend_le_conns
,
702 &b
->bdaddr
, b
->bdaddr_type
) &&
703 !hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
704 &b
->bdaddr
, b
->bdaddr_type
)) {
705 struct hci_cp_le_del_from_white_list cp
;
707 cp
.bdaddr_type
= b
->bdaddr_type
;
708 bacpy(&cp
.bdaddr
, &b
->bdaddr
);
710 hci_req_add(req
, HCI_OP_LE_DEL_FROM_WHITE_LIST
,
715 if (hci_find_irk_by_addr(hdev
, &b
->bdaddr
, b
->bdaddr_type
)) {
716 /* White list can not be used with RPAs */
720 white_list_entries
++;
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
733 list_for_each_entry(params
, &hdev
->pend_le_conns
, action
) {
734 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
735 ¶ms
->addr
, params
->addr_type
))
738 if (white_list_entries
>= hdev
->le_white_list_size
) {
739 /* Select filter policy to accept all advertising */
743 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
744 params
->addr_type
)) {
745 /* White list can not be used with RPAs */
749 white_list_entries
++;
750 add_to_white_list(req
, params
);
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
757 list_for_each_entry(params
, &hdev
->pend_le_reports
, action
) {
758 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
759 ¶ms
->addr
, params
->addr_type
))
762 if (white_list_entries
>= hdev
->le_white_list_size
) {
763 /* Select filter policy to accept all advertising */
767 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
768 params
->addr_type
)) {
769 /* White list can not be used with RPAs */
773 white_list_entries
++;
774 add_to_white_list(req
, params
);
777 /* Select filter policy to use white list */
781 static bool scan_use_rpa(struct hci_dev
*hdev
)
783 return hci_dev_test_flag(hdev
, HCI_PRIVACY
);
786 void hci_req_add_le_passive_scan(struct hci_request
*req
)
788 struct hci_cp_le_set_scan_param param_cp
;
789 struct hci_cp_le_set_scan_enable enable_cp
;
790 struct hci_dev
*hdev
= req
->hdev
;
794 /* Set require_privacy to false since no SCAN_REQ are send
795 * during passive scanning. Not using an non-resolvable address
796 * here is important so that peer devices using direct
797 * advertising with our address will be correctly reported
800 if (hci_update_random_address(req
, false, scan_use_rpa(hdev
),
804 /* Adding or removing entries from the white list must
805 * happen before enabling scanning. The controller does
806 * not allow white list modification while scanning.
808 filter_policy
= update_white_list(req
);
810 /* When the controller is using random resolvable addresses and
811 * with that having LE privacy enabled, then controllers with
812 * Extended Scanner Filter Policies support can now enable support
813 * for handling directed advertising.
815 * So instead of using filter polices 0x00 (no whitelist)
816 * and 0x01 (whitelist enabled) use the new filter policies
817 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
819 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
) &&
820 (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
))
821 filter_policy
|= 0x02;
823 memset(¶m_cp
, 0, sizeof(param_cp
));
824 param_cp
.type
= LE_SCAN_PASSIVE
;
825 param_cp
.interval
= cpu_to_le16(hdev
->le_scan_interval
);
826 param_cp
.window
= cpu_to_le16(hdev
->le_scan_window
);
827 param_cp
.own_address_type
= own_addr_type
;
828 param_cp
.filter_policy
= filter_policy
;
829 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
832 memset(&enable_cp
, 0, sizeof(enable_cp
));
833 enable_cp
.enable
= LE_SCAN_ENABLE
;
834 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
835 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
839 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
841 u8 instance
= hdev
->cur_adv_instance
;
842 struct adv_info
*adv_instance
;
844 /* Ignore instance 0 */
845 if (instance
== 0x00)
848 adv_instance
= hci_find_adv_instance(hdev
, instance
);
852 /* TODO: Take into account the "appearance" and "local-name" flags here.
853 * These are currently being ignored as they are not supported.
855 return adv_instance
->scan_rsp_len
;
858 void __hci_req_disable_advertising(struct hci_request
*req
)
862 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
865 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
868 struct adv_info
*adv_instance
;
870 if (instance
== 0x00) {
871 /* Instance 0 always manages the "Tx Power" and "Flags"
874 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
876 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
877 * corresponds to the "connectable" instance flag.
879 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
880 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
882 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
883 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
884 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
885 flags
|= MGMT_ADV_FLAG_DISCOV
;
890 adv_instance
= hci_find_adv_instance(hdev
, instance
);
892 /* Return 0 when we got an invalid instance identifier. */
896 return adv_instance
->flags
;
899 static bool adv_use_rpa(struct hci_dev
*hdev
, uint32_t flags
)
901 /* If privacy is not enabled don't use RPA */
902 if (!hci_dev_test_flag(hdev
, HCI_PRIVACY
))
905 /* If basic privacy mode is enabled use RPA */
906 if (!hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
909 /* If limited privacy mode is enabled don't use RPA if we're
910 * both discoverable and bondable.
912 if ((flags
& MGMT_ADV_FLAG_DISCOV
) &&
913 hci_dev_test_flag(hdev
, HCI_BONDABLE
))
916 /* We're neither bondable nor discoverable in the limited
917 * privacy mode, therefore use RPA.
922 void __hci_req_enable_advertising(struct hci_request
*req
)
924 struct hci_dev
*hdev
= req
->hdev
;
925 struct hci_cp_le_set_adv_param cp
;
926 u8 own_addr_type
, enable
= 0x01;
930 if (hci_conn_num(hdev
, LE_LINK
) > 0)
933 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
934 __hci_req_disable_advertising(req
);
936 /* Clear the HCI_LE_ADV bit temporarily so that the
937 * hci_update_random_address knows that it's safe to go ahead
938 * and write a new random address. The flag will be set back on
939 * as soon as the SET_ADV_ENABLE HCI command completes.
941 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
943 flags
= get_adv_instance_flags(hdev
, hdev
->cur_adv_instance
);
945 /* If the "connectable" instance flag was not set, then choose between
946 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
948 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
949 mgmt_get_connectable(hdev
);
951 /* Set require_privacy to true only when non-connectable
952 * advertising is used. In that case it is fine to use a
953 * non-resolvable private address.
955 if (hci_update_random_address(req
, !connectable
,
956 adv_use_rpa(hdev
, flags
),
960 memset(&cp
, 0, sizeof(cp
));
961 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
962 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
965 cp
.type
= LE_ADV_IND
;
966 else if (get_cur_adv_instance_scan_rsp_len(hdev
))
967 cp
.type
= LE_ADV_SCAN_IND
;
969 cp
.type
= LE_ADV_NONCONN_IND
;
971 cp
.own_address_type
= own_addr_type
;
972 cp
.channel_map
= hdev
->le_adv_channel_map
;
974 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
976 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
979 u8
append_local_name(struct hci_dev
*hdev
, u8
*ptr
, u8 ad_len
)
984 /* no space left for name (+ NULL + type + len) */
985 if ((HCI_MAX_AD_LENGTH
- ad_len
) < HCI_MAX_SHORT_NAME_LENGTH
+ 3)
988 /* use complete name if present and fits */
989 complete_len
= strlen(hdev
->dev_name
);
990 if (complete_len
&& complete_len
<= HCI_MAX_SHORT_NAME_LENGTH
)
991 return eir_append_data(ptr
, ad_len
, EIR_NAME_COMPLETE
,
992 hdev
->dev_name
, complete_len
+ 1);
994 /* use short name if present */
995 short_len
= strlen(hdev
->short_name
);
997 return eir_append_data(ptr
, ad_len
, EIR_NAME_SHORT
,
998 hdev
->short_name
, short_len
+ 1);
1000 /* use shortened full name if present, we already know that name
1001 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1004 u8 name
[HCI_MAX_SHORT_NAME_LENGTH
+ 1];
1006 memcpy(name
, hdev
->dev_name
, HCI_MAX_SHORT_NAME_LENGTH
);
1007 name
[HCI_MAX_SHORT_NAME_LENGTH
] = '\0';
1009 return eir_append_data(ptr
, ad_len
, EIR_NAME_SHORT
, name
,
1016 static u8
append_appearance(struct hci_dev
*hdev
, u8
*ptr
, u8 ad_len
)
1018 return eir_append_le16(ptr
, ad_len
, EIR_APPEARANCE
, hdev
->appearance
);
1021 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
1023 u8 scan_rsp_len
= 0;
1025 if (hdev
->appearance
) {
1026 scan_rsp_len
= append_appearance(hdev
, ptr
, scan_rsp_len
);
1029 return append_local_name(hdev
, ptr
, scan_rsp_len
);
1032 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
1035 struct adv_info
*adv_instance
;
1037 u8 scan_rsp_len
= 0;
1039 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1043 instance_flags
= adv_instance
->flags
;
1045 if ((instance_flags
& MGMT_ADV_FLAG_APPEARANCE
) && hdev
->appearance
) {
1046 scan_rsp_len
= append_appearance(hdev
, ptr
, scan_rsp_len
);
1049 memcpy(&ptr
[scan_rsp_len
], adv_instance
->scan_rsp_data
,
1050 adv_instance
->scan_rsp_len
);
1052 scan_rsp_len
+= adv_instance
->scan_rsp_len
;
1054 if (instance_flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
1055 scan_rsp_len
= append_local_name(hdev
, ptr
, scan_rsp_len
);
1057 return scan_rsp_len
;
1060 void __hci_req_update_scan_rsp_data(struct hci_request
*req
, u8 instance
)
1062 struct hci_dev
*hdev
= req
->hdev
;
1063 struct hci_cp_le_set_scan_rsp_data cp
;
1066 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1069 memset(&cp
, 0, sizeof(cp
));
1072 len
= create_instance_scan_rsp_data(hdev
, instance
, cp
.data
);
1074 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
1076 if (hdev
->scan_rsp_data_len
== len
&&
1077 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
1080 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
1081 hdev
->scan_rsp_data_len
= len
;
1085 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
1088 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1090 struct adv_info
*adv_instance
= NULL
;
1091 u8 ad_len
= 0, flags
= 0;
1094 /* Return 0 when the current instance identifier is invalid. */
1096 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1101 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1103 /* The Add Advertising command allows userspace to set both the general
1104 * and limited discoverable flags.
1106 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1107 flags
|= LE_AD_GENERAL
;
1109 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1110 flags
|= LE_AD_LIMITED
;
1112 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1113 flags
|= LE_AD_NO_BREDR
;
1115 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1116 /* If a discovery flag wasn't provided, simply use the global
1120 flags
|= mgmt_get_adv_discov_flags(hdev
);
1122 /* If flags would still be empty, then there is no need to
1123 * include the "Flags" AD field".
1136 memcpy(ptr
, adv_instance
->adv_data
,
1137 adv_instance
->adv_data_len
);
1138 ad_len
+= adv_instance
->adv_data_len
;
1139 ptr
+= adv_instance
->adv_data_len
;
1142 /* Provide Tx Power only if we can provide a valid value for it */
1143 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
&&
1144 (instance_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
1146 ptr
[1] = EIR_TX_POWER
;
1147 ptr
[2] = (u8
)hdev
->adv_tx_power
;
1156 void __hci_req_update_adv_data(struct hci_request
*req
, u8 instance
)
1158 struct hci_dev
*hdev
= req
->hdev
;
1159 struct hci_cp_le_set_adv_data cp
;
1162 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1165 memset(&cp
, 0, sizeof(cp
));
1167 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1169 /* There's nothing to do if the data hasn't changed */
1170 if (hdev
->adv_data_len
== len
&&
1171 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1174 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1175 hdev
->adv_data_len
= len
;
1179 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1182 int hci_req_update_adv_data(struct hci_dev
*hdev
, u8 instance
)
1184 struct hci_request req
;
1186 hci_req_init(&req
, hdev
);
1187 __hci_req_update_adv_data(&req
, instance
);
1189 return hci_req_run(&req
, NULL
);
1192 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1194 BT_DBG("%s status %u", hdev
->name
, status
);
1197 void hci_req_reenable_advertising(struct hci_dev
*hdev
)
1199 struct hci_request req
;
1201 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
1202 list_empty(&hdev
->adv_instances
))
1205 hci_req_init(&req
, hdev
);
1207 if (hdev
->cur_adv_instance
) {
1208 __hci_req_schedule_adv_instance(&req
, hdev
->cur_adv_instance
,
1211 __hci_req_update_adv_data(&req
, 0x00);
1212 __hci_req_update_scan_rsp_data(&req
, 0x00);
1213 __hci_req_enable_advertising(&req
);
1216 hci_req_run(&req
, adv_enable_complete
);
1219 static void adv_timeout_expire(struct work_struct
*work
)
1221 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1222 adv_instance_expire
.work
);
1224 struct hci_request req
;
1227 BT_DBG("%s", hdev
->name
);
1231 hdev
->adv_instance_timeout
= 0;
1233 instance
= hdev
->cur_adv_instance
;
1234 if (instance
== 0x00)
1237 hci_req_init(&req
, hdev
);
1239 hci_req_clear_adv_instance(hdev
, NULL
, &req
, instance
, false);
1241 if (list_empty(&hdev
->adv_instances
))
1242 __hci_req_disable_advertising(&req
);
1244 hci_req_run(&req
, NULL
);
1247 hci_dev_unlock(hdev
);
1250 int __hci_req_schedule_adv_instance(struct hci_request
*req
, u8 instance
,
1253 struct hci_dev
*hdev
= req
->hdev
;
1254 struct adv_info
*adv_instance
= NULL
;
1257 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1258 list_empty(&hdev
->adv_instances
))
1261 if (hdev
->adv_instance_timeout
)
1264 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1268 /* A zero timeout means unlimited advertising. As long as there is
1269 * only one instance, duration should be ignored. We still set a timeout
1270 * in case further instances are being added later on.
1272 * If the remaining lifetime of the instance is more than the duration
1273 * then the timeout corresponds to the duration, otherwise it will be
1274 * reduced to the remaining instance lifetime.
1276 if (adv_instance
->timeout
== 0 ||
1277 adv_instance
->duration
<= adv_instance
->remaining_time
)
1278 timeout
= adv_instance
->duration
;
1280 timeout
= adv_instance
->remaining_time
;
1282 /* The remaining time is being reduced unless the instance is being
1283 * advertised without time limit.
1285 if (adv_instance
->timeout
)
1286 adv_instance
->remaining_time
=
1287 adv_instance
->remaining_time
- timeout
;
1289 hdev
->adv_instance_timeout
= timeout
;
1290 queue_delayed_work(hdev
->req_workqueue
,
1291 &hdev
->adv_instance_expire
,
1292 msecs_to_jiffies(timeout
* 1000));
1294 /* If we're just re-scheduling the same instance again then do not
1295 * execute any HCI commands. This happens when a single instance is
1298 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1299 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1302 hdev
->cur_adv_instance
= instance
;
1303 __hci_req_update_adv_data(req
, instance
);
1304 __hci_req_update_scan_rsp_data(req
, instance
);
1305 __hci_req_enable_advertising(req
);
1310 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1312 if (hdev
->adv_instance_timeout
) {
1313 hdev
->adv_instance_timeout
= 0;
1314 cancel_delayed_work(&hdev
->adv_instance_expire
);
1318 /* For a single instance:
1319 * - force == true: The instance will be removed even when its remaining
1320 * lifetime is not zero.
1321 * - force == false: the instance will be deactivated but kept stored unless
1322 * the remaining lifetime is zero.
1324 * For instance == 0x00:
1325 * - force == true: All instances will be removed regardless of their timeout
1327 * - force == false: Only instances that have a timeout will be removed.
1329 void hci_req_clear_adv_instance(struct hci_dev
*hdev
, struct sock
*sk
,
1330 struct hci_request
*req
, u8 instance
,
1333 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1337 /* Cancel any timeout concerning the removed instance(s). */
1338 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1339 cancel_adv_timeout(hdev
);
1341 /* Get the next instance to advertise BEFORE we remove
1342 * the current one. This can be the same instance again
1343 * if there is only one instance.
1345 if (instance
&& hdev
->cur_adv_instance
== instance
)
1346 next_instance
= hci_get_next_instance(hdev
, instance
);
1348 if (instance
== 0x00) {
1349 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1351 if (!(force
|| adv_instance
->timeout
))
1354 rem_inst
= adv_instance
->instance
;
1355 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1357 mgmt_advertising_removed(sk
, hdev
, rem_inst
);
1360 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1362 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1363 !adv_instance
->remaining_time
)) {
1364 /* Don't advertise a removed instance. */
1365 if (next_instance
&&
1366 next_instance
->instance
== instance
)
1367 next_instance
= NULL
;
1369 err
= hci_remove_adv_instance(hdev
, instance
);
1371 mgmt_advertising_removed(sk
, hdev
, instance
);
1375 if (!req
|| !hdev_is_powered(hdev
) ||
1376 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1380 __hci_req_schedule_adv_instance(req
, next_instance
->instance
,
1384 static void set_random_addr(struct hci_request
*req
, bdaddr_t
*rpa
)
1386 struct hci_dev
*hdev
= req
->hdev
;
1388 /* If we're advertising or initiating an LE connection we can't
1389 * go ahead and change the random address at this time. This is
1390 * because the eventual initiator address used for the
1391 * subsequently created connection will be undefined (some
1392 * controllers use the new address and others the one we had
1393 * when the operation started).
1395 * In this kind of scenario skip the update and let the random
1396 * address be updated at the next cycle.
1398 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
) ||
1399 hci_lookup_le_connect(hdev
)) {
1400 BT_DBG("Deferring random address update");
1401 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1405 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, rpa
);
1408 int hci_update_random_address(struct hci_request
*req
, bool require_privacy
,
1409 bool use_rpa
, u8
*own_addr_type
)
1411 struct hci_dev
*hdev
= req
->hdev
;
1414 /* If privacy is enabled use a resolvable private address. If
1415 * current RPA has expired or there is something else than
1416 * the current RPA in use, then generate a new one.
1421 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1423 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
1424 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
1427 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
1429 bt_dev_err(hdev
, "failed to generate new RPA");
1433 set_random_addr(req
, &hdev
->rpa
);
1435 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
1436 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
1441 /* In case of required privacy without resolvable private address,
1442 * use an non-resolvable private address. This is useful for active
1443 * scanning and non-connectable advertising.
1445 if (require_privacy
) {
1449 /* The non-resolvable private address is generated
1450 * from random six bytes with the two most significant
1453 get_random_bytes(&nrpa
, 6);
1456 /* The non-resolvable private address shall not be
1457 * equal to the public address.
1459 if (bacmp(&hdev
->bdaddr
, &nrpa
))
1463 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1464 set_random_addr(req
, &nrpa
);
1468 /* If forcing static address is in use or there is no public
1469 * address use the static address as random address (but skip
1470 * the HCI command if the current random address is already the
1473 * In case BR/EDR has been disabled on a dual-mode controller
1474 * and a static address has been configured, then use that
1475 * address instead of the public BR/EDR address.
1477 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
1478 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
1479 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
1480 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
1481 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
1482 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
1483 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
1484 &hdev
->static_addr
);
1488 /* Neither privacy nor static address is being used so use a
1491 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1496 static bool disconnected_whitelist_entries(struct hci_dev
*hdev
)
1498 struct bdaddr_list
*b
;
1500 list_for_each_entry(b
, &hdev
->whitelist
, list
) {
1501 struct hci_conn
*conn
;
1503 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &b
->bdaddr
);
1507 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
1514 void __hci_req_update_scan(struct hci_request
*req
)
1516 struct hci_dev
*hdev
= req
->hdev
;
1519 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1522 if (!hdev_is_powered(hdev
))
1525 if (mgmt_powering_down(hdev
))
1528 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
) ||
1529 disconnected_whitelist_entries(hdev
))
1532 scan
= SCAN_DISABLED
;
1534 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
1535 scan
|= SCAN_INQUIRY
;
1537 if (test_bit(HCI_PSCAN
, &hdev
->flags
) == !!(scan
& SCAN_PAGE
) &&
1538 test_bit(HCI_ISCAN
, &hdev
->flags
) == !!(scan
& SCAN_INQUIRY
))
1541 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1544 static int update_scan(struct hci_request
*req
, unsigned long opt
)
1546 hci_dev_lock(req
->hdev
);
1547 __hci_req_update_scan(req
);
1548 hci_dev_unlock(req
->hdev
);
1552 static void scan_update_work(struct work_struct
*work
)
1554 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, scan_update
);
1556 hci_req_sync(hdev
, update_scan
, 0, HCI_CMD_TIMEOUT
, NULL
);
1559 static int connectable_update(struct hci_request
*req
, unsigned long opt
)
1561 struct hci_dev
*hdev
= req
->hdev
;
1565 __hci_req_update_scan(req
);
1567 /* If BR/EDR is not enabled and we disable advertising as a
1568 * by-product of disabling connectable, we need to update the
1569 * advertising flags.
1571 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1572 __hci_req_update_adv_data(req
, hdev
->cur_adv_instance
);
1574 /* Update the advertising parameters if necessary */
1575 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1576 !list_empty(&hdev
->adv_instances
))
1577 __hci_req_enable_advertising(req
);
1579 __hci_update_background_scan(req
);
1581 hci_dev_unlock(hdev
);
1586 static void connectable_update_work(struct work_struct
*work
)
1588 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1589 connectable_update
);
1592 hci_req_sync(hdev
, connectable_update
, 0, HCI_CMD_TIMEOUT
, &status
);
1593 mgmt_set_connectable_complete(hdev
, status
);
1596 static u8
get_service_classes(struct hci_dev
*hdev
)
1598 struct bt_uuid
*uuid
;
1601 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1602 val
|= uuid
->svc_hint
;
1607 void __hci_req_update_class(struct hci_request
*req
)
1609 struct hci_dev
*hdev
= req
->hdev
;
1612 BT_DBG("%s", hdev
->name
);
1614 if (!hdev_is_powered(hdev
))
1617 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1620 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1623 cod
[0] = hdev
->minor_class
;
1624 cod
[1] = hdev
->major_class
;
1625 cod
[2] = get_service_classes(hdev
);
1627 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
1630 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1633 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1636 static void write_iac(struct hci_request
*req
)
1638 struct hci_dev
*hdev
= req
->hdev
;
1639 struct hci_cp_write_current_iac_lap cp
;
1641 if (!hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
1644 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
)) {
1645 /* Limited discoverable mode */
1646 cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1647 cp
.iac_lap
[0] = 0x00; /* LIAC */
1648 cp
.iac_lap
[1] = 0x8b;
1649 cp
.iac_lap
[2] = 0x9e;
1650 cp
.iac_lap
[3] = 0x33; /* GIAC */
1651 cp
.iac_lap
[4] = 0x8b;
1652 cp
.iac_lap
[5] = 0x9e;
1654 /* General discoverable mode */
1656 cp
.iac_lap
[0] = 0x33; /* GIAC */
1657 cp
.iac_lap
[1] = 0x8b;
1658 cp
.iac_lap
[2] = 0x9e;
1661 hci_req_add(req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1662 (cp
.num_iac
* 3) + 1, &cp
);
1665 static int discoverable_update(struct hci_request
*req
, unsigned long opt
)
1667 struct hci_dev
*hdev
= req
->hdev
;
1671 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1673 __hci_req_update_scan(req
);
1674 __hci_req_update_class(req
);
1677 /* Advertising instances don't use the global discoverable setting, so
1678 * only update AD if advertising was enabled using Set Advertising.
1680 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
1681 __hci_req_update_adv_data(req
, 0x00);
1683 /* Discoverable mode affects the local advertising
1684 * address in limited privacy mode.
1686 if (hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
1687 __hci_req_enable_advertising(req
);
1690 hci_dev_unlock(hdev
);
1695 static void discoverable_update_work(struct work_struct
*work
)
1697 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1698 discoverable_update
);
1701 hci_req_sync(hdev
, discoverable_update
, 0, HCI_CMD_TIMEOUT
, &status
);
1702 mgmt_set_discoverable_complete(hdev
, status
);
1705 void __hci_abort_conn(struct hci_request
*req
, struct hci_conn
*conn
,
1708 switch (conn
->state
) {
1711 if (conn
->type
== AMP_LINK
) {
1712 struct hci_cp_disconn_phy_link cp
;
1714 cp
.phy_handle
= HCI_PHY_HANDLE(conn
->handle
);
1716 hci_req_add(req
, HCI_OP_DISCONN_PHY_LINK
, sizeof(cp
),
1719 struct hci_cp_disconnect dc
;
1721 dc
.handle
= cpu_to_le16(conn
->handle
);
1723 hci_req_add(req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1726 conn
->state
= BT_DISCONN
;
1730 if (conn
->type
== LE_LINK
) {
1731 if (test_bit(HCI_CONN_SCANNING
, &conn
->flags
))
1733 hci_req_add(req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1735 } else if (conn
->type
== ACL_LINK
) {
1736 if (req
->hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1738 hci_req_add(req
, HCI_OP_CREATE_CONN_CANCEL
,
1743 if (conn
->type
== ACL_LINK
) {
1744 struct hci_cp_reject_conn_req rej
;
1746 bacpy(&rej
.bdaddr
, &conn
->dst
);
1747 rej
.reason
= reason
;
1749 hci_req_add(req
, HCI_OP_REJECT_CONN_REQ
,
1751 } else if (conn
->type
== SCO_LINK
|| conn
->type
== ESCO_LINK
) {
1752 struct hci_cp_reject_sync_conn_req rej
;
1754 bacpy(&rej
.bdaddr
, &conn
->dst
);
1756 /* SCO rejection has its own limited set of
1757 * allowed error values (0x0D-0x0F) which isn't
1758 * compatible with most values passed to this
1759 * function. To be safe hard-code one of the
1760 * values that's suitable for SCO.
1762 rej
.reason
= HCI_ERROR_REJ_LIMITED_RESOURCES
;
1764 hci_req_add(req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1769 conn
->state
= BT_CLOSED
;
1774 static void abort_conn_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1777 BT_DBG("Failed to abort connection: status 0x%2.2x", status
);
1780 int hci_abort_conn(struct hci_conn
*conn
, u8 reason
)
1782 struct hci_request req
;
1785 hci_req_init(&req
, conn
->hdev
);
1787 __hci_abort_conn(&req
, conn
, reason
);
1789 err
= hci_req_run(&req
, abort_conn_complete
);
1790 if (err
&& err
!= -ENODATA
) {
1791 bt_dev_err(conn
->hdev
, "failed to run HCI request: err %d", err
);
1798 static int update_bg_scan(struct hci_request
*req
, unsigned long opt
)
1800 hci_dev_lock(req
->hdev
);
1801 __hci_update_background_scan(req
);
1802 hci_dev_unlock(req
->hdev
);
1806 static void bg_scan_update(struct work_struct
*work
)
1808 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1810 struct hci_conn
*conn
;
1814 err
= hci_req_sync(hdev
, update_bg_scan
, 0, HCI_CMD_TIMEOUT
, &status
);
1820 conn
= hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
);
1822 hci_le_conn_failed(conn
, status
);
1824 hci_dev_unlock(hdev
);
1827 static int le_scan_disable(struct hci_request
*req
, unsigned long opt
)
1829 hci_req_add_le_scan_disable(req
);
1833 static int bredr_inquiry(struct hci_request
*req
, unsigned long opt
)
1836 const u8 giac
[3] = { 0x33, 0x8b, 0x9e };
1837 const u8 liac
[3] = { 0x00, 0x8b, 0x9e };
1838 struct hci_cp_inquiry cp
;
1840 BT_DBG("%s", req
->hdev
->name
);
1842 hci_dev_lock(req
->hdev
);
1843 hci_inquiry_cache_flush(req
->hdev
);
1844 hci_dev_unlock(req
->hdev
);
1846 memset(&cp
, 0, sizeof(cp
));
1848 if (req
->hdev
->discovery
.limited
)
1849 memcpy(&cp
.lap
, liac
, sizeof(cp
.lap
));
1851 memcpy(&cp
.lap
, giac
, sizeof(cp
.lap
));
1855 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1860 static void le_scan_disable_work(struct work_struct
*work
)
1862 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1863 le_scan_disable
.work
);
1866 BT_DBG("%s", hdev
->name
);
1868 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
1871 cancel_delayed_work(&hdev
->le_scan_restart
);
1873 hci_req_sync(hdev
, le_scan_disable
, 0, HCI_CMD_TIMEOUT
, &status
);
1875 bt_dev_err(hdev
, "failed to disable LE scan: status 0x%02x",
1880 hdev
->discovery
.scan_start
= 0;
1882 /* If we were running LE only scan, change discovery state. If
1883 * we were running both LE and BR/EDR inquiry simultaneously,
1884 * and BR/EDR inquiry is already finished, stop discovery,
1885 * otherwise BR/EDR inquiry will stop discovery when finished.
1886 * If we will resolve remote device name, do not change
1890 if (hdev
->discovery
.type
== DISCOV_TYPE_LE
)
1891 goto discov_stopped
;
1893 if (hdev
->discovery
.type
!= DISCOV_TYPE_INTERLEAVED
)
1896 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
)) {
1897 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
1898 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
1899 goto discov_stopped
;
1904 hci_req_sync(hdev
, bredr_inquiry
, DISCOV_INTERLEAVED_INQUIRY_LEN
,
1905 HCI_CMD_TIMEOUT
, &status
);
1907 bt_dev_err(hdev
, "inquiry failed: status 0x%02x", status
);
1908 goto discov_stopped
;
1915 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1916 hci_dev_unlock(hdev
);
1919 static int le_scan_restart(struct hci_request
*req
, unsigned long opt
)
1921 struct hci_dev
*hdev
= req
->hdev
;
1922 struct hci_cp_le_set_scan_enable cp
;
1924 /* If controller is not scanning we are done. */
1925 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
1928 hci_req_add_le_scan_disable(req
);
1930 memset(&cp
, 0, sizeof(cp
));
1931 cp
.enable
= LE_SCAN_ENABLE
;
1932 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
1933 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1938 static void le_scan_restart_work(struct work_struct
*work
)
1940 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1941 le_scan_restart
.work
);
1942 unsigned long timeout
, duration
, scan_start
, now
;
1945 BT_DBG("%s", hdev
->name
);
1947 hci_req_sync(hdev
, le_scan_restart
, 0, HCI_CMD_TIMEOUT
, &status
);
1949 bt_dev_err(hdev
, "failed to restart LE scan: status %d",
1956 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
1957 !hdev
->discovery
.scan_start
)
1960 /* When the scan was started, hdev->le_scan_disable has been queued
1961 * after duration from scan_start. During scan restart this job
1962 * has been canceled, and we need to queue it again after proper
1963 * timeout, to make sure that scan does not run indefinitely.
1965 duration
= hdev
->discovery
.scan_duration
;
1966 scan_start
= hdev
->discovery
.scan_start
;
1968 if (now
- scan_start
<= duration
) {
1971 if (now
>= scan_start
)
1972 elapsed
= now
- scan_start
;
1974 elapsed
= ULONG_MAX
- scan_start
+ now
;
1976 timeout
= duration
- elapsed
;
1981 queue_delayed_work(hdev
->req_workqueue
,
1982 &hdev
->le_scan_disable
, timeout
);
1985 hci_dev_unlock(hdev
);
1988 static void disable_advertising(struct hci_request
*req
)
1992 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1995 static int active_scan(struct hci_request
*req
, unsigned long opt
)
1997 uint16_t interval
= opt
;
1998 struct hci_dev
*hdev
= req
->hdev
;
1999 struct hci_cp_le_set_scan_param param_cp
;
2000 struct hci_cp_le_set_scan_enable enable_cp
;
2004 BT_DBG("%s", hdev
->name
);
2006 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
2009 /* Don't let discovery abort an outgoing connection attempt
2010 * that's using directed advertising.
2012 if (hci_lookup_le_connect(hdev
)) {
2013 hci_dev_unlock(hdev
);
2017 cancel_adv_timeout(hdev
);
2018 hci_dev_unlock(hdev
);
2020 disable_advertising(req
);
2023 /* If controller is scanning, it means the background scanning is
2024 * running. Thus, we should temporarily stop it in order to set the
2025 * discovery scanning parameters.
2027 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2028 hci_req_add_le_scan_disable(req
);
2030 /* All active scans will be done with either a resolvable private
2031 * address (when privacy feature has been enabled) or non-resolvable
2034 err
= hci_update_random_address(req
, true, scan_use_rpa(hdev
),
2037 own_addr_type
= ADDR_LE_DEV_PUBLIC
;
2039 memset(¶m_cp
, 0, sizeof(param_cp
));
2040 param_cp
.type
= LE_SCAN_ACTIVE
;
2041 param_cp
.interval
= cpu_to_le16(interval
);
2042 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
2043 param_cp
.own_address_type
= own_addr_type
;
2045 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
2048 memset(&enable_cp
, 0, sizeof(enable_cp
));
2049 enable_cp
.enable
= LE_SCAN_ENABLE
;
2050 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
2052 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
2058 static int interleaved_discov(struct hci_request
*req
, unsigned long opt
)
2062 BT_DBG("%s", req
->hdev
->name
);
2064 err
= active_scan(req
, opt
);
2068 return bredr_inquiry(req
, DISCOV_BREDR_INQUIRY_LEN
);
2071 static void start_discovery(struct hci_dev
*hdev
, u8
*status
)
2073 unsigned long timeout
;
2075 BT_DBG("%s type %u", hdev
->name
, hdev
->discovery
.type
);
2077 switch (hdev
->discovery
.type
) {
2078 case DISCOV_TYPE_BREDR
:
2079 if (!hci_dev_test_flag(hdev
, HCI_INQUIRY
))
2080 hci_req_sync(hdev
, bredr_inquiry
,
2081 DISCOV_BREDR_INQUIRY_LEN
, HCI_CMD_TIMEOUT
,
2084 case DISCOV_TYPE_INTERLEAVED
:
2085 /* When running simultaneous discovery, the LE scanning time
2086 * should occupy the whole discovery time sine BR/EDR inquiry
2087 * and LE scanning are scheduled by the controller.
2089 * For interleaving discovery in comparison, BR/EDR inquiry
2090 * and LE scanning are done sequentially with separate
2093 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
2095 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
2096 /* During simultaneous discovery, we double LE scan
2097 * interval. We must leave some time for the controller
2098 * to do BR/EDR inquiry.
2100 hci_req_sync(hdev
, interleaved_discov
,
2101 DISCOV_LE_SCAN_INT
* 2, HCI_CMD_TIMEOUT
,
2106 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
2107 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
2108 HCI_CMD_TIMEOUT
, status
);
2110 case DISCOV_TYPE_LE
:
2111 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
2112 hci_req_sync(hdev
, active_scan
, DISCOV_LE_SCAN_INT
,
2113 HCI_CMD_TIMEOUT
, status
);
2116 *status
= HCI_ERROR_UNSPECIFIED
;
2123 BT_DBG("%s timeout %u ms", hdev
->name
, jiffies_to_msecs(timeout
));
2125 /* When service discovery is used and the controller has a
2126 * strict duplicate filter, it is important to remember the
2127 * start and duration of the scan. This is required for
2128 * restarting scanning during the discovery phase.
2130 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) &&
2131 hdev
->discovery
.result_filtering
) {
2132 hdev
->discovery
.scan_start
= jiffies
;
2133 hdev
->discovery
.scan_duration
= timeout
;
2136 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_disable
,
2140 bool hci_req_stop_discovery(struct hci_request
*req
)
2142 struct hci_dev
*hdev
= req
->hdev
;
2143 struct discovery_state
*d
= &hdev
->discovery
;
2144 struct hci_cp_remote_name_req_cancel cp
;
2145 struct inquiry_entry
*e
;
2148 BT_DBG("%s state %u", hdev
->name
, hdev
->discovery
.state
);
2150 if (d
->state
== DISCOVERY_FINDING
|| d
->state
== DISCOVERY_STOPPING
) {
2151 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2152 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2154 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
2155 cancel_delayed_work(&hdev
->le_scan_disable
);
2156 hci_req_add_le_scan_disable(req
);
2161 /* Passive scanning */
2162 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
2163 hci_req_add_le_scan_disable(req
);
2168 /* No further actions needed for LE-only discovery */
2169 if (d
->type
== DISCOV_TYPE_LE
)
2172 if (d
->state
== DISCOVERY_RESOLVING
|| d
->state
== DISCOVERY_STOPPING
) {
2173 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
2178 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
2179 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
2187 static int stop_discovery(struct hci_request
*req
, unsigned long opt
)
2189 hci_dev_lock(req
->hdev
);
2190 hci_req_stop_discovery(req
);
2191 hci_dev_unlock(req
->hdev
);
2196 static void discov_update(struct work_struct
*work
)
2198 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2202 switch (hdev
->discovery
.state
) {
2203 case DISCOVERY_STARTING
:
2204 start_discovery(hdev
, &status
);
2205 mgmt_start_discovery_complete(hdev
, status
);
2207 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2209 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
2211 case DISCOVERY_STOPPING
:
2212 hci_req_sync(hdev
, stop_discovery
, 0, HCI_CMD_TIMEOUT
, &status
);
2213 mgmt_stop_discovery_complete(hdev
, status
);
2215 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2217 case DISCOVERY_STOPPED
:
2223 static void discov_off(struct work_struct
*work
)
2225 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2228 BT_DBG("%s", hdev
->name
);
2232 /* When discoverable timeout triggers, then just make sure
2233 * the limited discoverable flag is cleared. Even in the case
2234 * of a timeout triggered from general discoverable, it is
2235 * safe to unconditionally clear the flag.
2237 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2238 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2239 hdev
->discov_timeout
= 0;
2241 hci_dev_unlock(hdev
);
2243 hci_req_sync(hdev
, discoverable_update
, 0, HCI_CMD_TIMEOUT
, NULL
);
2244 mgmt_new_settings(hdev
);
2247 static int powered_update_hci(struct hci_request
*req
, unsigned long opt
)
2249 struct hci_dev
*hdev
= req
->hdev
;
2254 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
2255 !lmp_host_ssp_capable(hdev
)) {
2258 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
2260 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
2263 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
2264 sizeof(support
), &support
);
2268 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2269 lmp_bredr_capable(hdev
)) {
2270 struct hci_cp_write_le_host_supported cp
;
2275 /* Check first if we already have the right
2276 * host state (host features set)
2278 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
2279 cp
.simul
!= lmp_host_le_br_capable(hdev
))
2280 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
2284 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2285 /* Make sure the controller has a good default for
2286 * advertising data. This also applies to the case
2287 * where BR/EDR was toggled during the AUTO_OFF phase.
2289 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2290 list_empty(&hdev
->adv_instances
)) {
2291 __hci_req_update_adv_data(req
, 0x00);
2292 __hci_req_update_scan_rsp_data(req
, 0x00);
2294 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
2295 __hci_req_enable_advertising(req
);
2296 } else if (!list_empty(&hdev
->adv_instances
)) {
2297 struct adv_info
*adv_instance
;
2299 adv_instance
= list_first_entry(&hdev
->adv_instances
,
2300 struct adv_info
, list
);
2301 __hci_req_schedule_adv_instance(req
,
2302 adv_instance
->instance
,
2307 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
2308 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
2309 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
,
2310 sizeof(link_sec
), &link_sec
);
2312 if (lmp_bredr_capable(hdev
)) {
2313 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
2314 __hci_req_write_fast_connectable(req
, true);
2316 __hci_req_write_fast_connectable(req
, false);
2317 __hci_req_update_scan(req
);
2318 __hci_req_update_class(req
);
2319 __hci_req_update_name(req
);
2320 __hci_req_update_eir(req
);
2323 hci_dev_unlock(hdev
);
2327 int __hci_req_hci_power_on(struct hci_dev
*hdev
)
2329 /* Register the available SMP channels (BR/EDR and LE) only when
2330 * successfully powering on the controller. This late
2331 * registration is required so that LE SMP can clearly decide if
2332 * the public address or static address is used.
2336 return __hci_req_sync(hdev
, powered_update_hci
, 0, HCI_CMD_TIMEOUT
,
2340 void hci_request_setup(struct hci_dev
*hdev
)
2342 INIT_WORK(&hdev
->discov_update
, discov_update
);
2343 INIT_WORK(&hdev
->bg_scan_update
, bg_scan_update
);
2344 INIT_WORK(&hdev
->scan_update
, scan_update_work
);
2345 INIT_WORK(&hdev
->connectable_update
, connectable_update_work
);
2346 INIT_WORK(&hdev
->discoverable_update
, discoverable_update_work
);
2347 INIT_DELAYED_WORK(&hdev
->discov_off
, discov_off
);
2348 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2349 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
2350 INIT_DELAYED_WORK(&hdev
->adv_instance_expire
, adv_timeout_expire
);
2353 void hci_request_cancel_all(struct hci_dev
*hdev
)
2355 hci_req_sync_cancel(hdev
, ENODEV
);
2357 cancel_work_sync(&hdev
->discov_update
);
2358 cancel_work_sync(&hdev
->bg_scan_update
);
2359 cancel_work_sync(&hdev
->scan_update
);
2360 cancel_work_sync(&hdev
->connectable_update
);
2361 cancel_work_sync(&hdev
->discoverable_update
);
2362 cancel_delayed_work_sync(&hdev
->discov_off
);
2363 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
2364 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
2366 if (hdev
->adv_instance_timeout
) {
2367 cancel_delayed_work_sync(&hdev
->adv_instance_expire
);
2368 hdev
->adv_instance_timeout
= 0;