2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
28 #include "hci_request.h"
30 #define HCI_REQ_DONE 0
31 #define HCI_REQ_PEND 1
32 #define HCI_REQ_CANCELED 2
34 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
36 skb_queue_head_init(&req
->cmd_q
);
41 static int req_run(struct hci_request
*req
, hci_req_complete_t complete
,
42 hci_req_complete_skb_t complete_skb
)
44 struct hci_dev
*hdev
= req
->hdev
;
48 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
54 skb_queue_purge(&req
->cmd_q
);
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req
->cmd_q
))
62 skb
= skb_peek_tail(&req
->cmd_q
);
64 bt_cb(skb
)->hci
.req_complete
= complete
;
65 } else if (complete_skb
) {
66 bt_cb(skb
)->hci
.req_complete_skb
= complete_skb
;
67 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_SKB
;
70 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
71 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
72 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
74 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
79 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
81 return req_run(req
, complete
, NULL
);
84 int hci_req_run_skb(struct hci_request
*req
, hci_req_complete_skb_t complete
)
86 return req_run(req
, NULL
, complete
);
89 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
92 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
94 if (hdev
->req_status
== HCI_REQ_PEND
) {
95 hdev
->req_result
= result
;
96 hdev
->req_status
= HCI_REQ_DONE
;
98 hdev
->req_skb
= skb_get(skb
);
99 wake_up_interruptible(&hdev
->req_wait_q
);
103 void hci_req_sync_cancel(struct hci_dev
*hdev
, int err
)
105 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
107 if (hdev
->req_status
== HCI_REQ_PEND
) {
108 hdev
->req_result
= err
;
109 hdev
->req_status
= HCI_REQ_CANCELED
;
110 wake_up_interruptible(&hdev
->req_wait_q
);
114 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
115 const void *param
, u8 event
, u32 timeout
)
117 DECLARE_WAITQUEUE(wait
, current
);
118 struct hci_request req
;
122 BT_DBG("%s", hdev
->name
);
124 hci_req_init(&req
, hdev
);
126 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
128 hdev
->req_status
= HCI_REQ_PEND
;
130 add_wait_queue(&hdev
->req_wait_q
, &wait
);
131 set_current_state(TASK_INTERRUPTIBLE
);
133 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
135 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
136 set_current_state(TASK_RUNNING
);
140 schedule_timeout(timeout
);
142 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
144 if (signal_pending(current
))
145 return ERR_PTR(-EINTR
);
147 switch (hdev
->req_status
) {
149 err
= -bt_to_errno(hdev
->req_result
);
152 case HCI_REQ_CANCELED
:
153 err
= -hdev
->req_result
;
161 hdev
->req_status
= hdev
->req_result
= 0;
163 hdev
->req_skb
= NULL
;
165 BT_DBG("%s end: err %d", hdev
->name
, err
);
173 return ERR_PTR(-ENODATA
);
177 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
179 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
180 const void *param
, u32 timeout
)
182 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
184 EXPORT_SYMBOL(__hci_cmd_sync
);
186 /* Execute request and wait for completion. */
187 int __hci_req_sync(struct hci_dev
*hdev
, void (*func
)(struct hci_request
*req
,
189 unsigned long opt
, __u32 timeout
)
191 struct hci_request req
;
192 DECLARE_WAITQUEUE(wait
, current
);
195 BT_DBG("%s start", hdev
->name
);
197 hci_req_init(&req
, hdev
);
199 hdev
->req_status
= HCI_REQ_PEND
;
203 add_wait_queue(&hdev
->req_wait_q
, &wait
);
204 set_current_state(TASK_INTERRUPTIBLE
);
206 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
208 hdev
->req_status
= 0;
210 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
211 set_current_state(TASK_RUNNING
);
213 /* ENODATA means the HCI request command queue is empty.
214 * This can happen when a request with conditionals doesn't
215 * trigger any commands to be sent. This is normal behavior
216 * and should not trigger an error return.
224 schedule_timeout(timeout
);
226 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
228 if (signal_pending(current
))
231 switch (hdev
->req_status
) {
233 err
= -bt_to_errno(hdev
->req_result
);
236 case HCI_REQ_CANCELED
:
237 err
= -hdev
->req_result
;
245 hdev
->req_status
= hdev
->req_result
= 0;
247 BT_DBG("%s end: err %d", hdev
->name
, err
);
252 int hci_req_sync(struct hci_dev
*hdev
, void (*req
)(struct hci_request
*req
,
254 unsigned long opt
, __u32 timeout
)
258 if (!test_bit(HCI_UP
, &hdev
->flags
))
261 /* Serialize all requests */
262 hci_req_sync_lock(hdev
);
263 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
264 hci_req_sync_unlock(hdev
);
269 struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
272 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
273 struct hci_command_hdr
*hdr
;
276 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
280 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
281 hdr
->opcode
= cpu_to_le16(opcode
);
285 memcpy(skb_put(skb
, plen
), param
, plen
);
287 BT_DBG("skb len %d", skb
->len
);
289 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
290 hci_skb_opcode(skb
) = opcode
;
295 /* Queue a command to an asynchronous HCI request */
296 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
297 const void *param
, u8 event
)
299 struct hci_dev
*hdev
= req
->hdev
;
302 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
304 /* If an error occurred during request building, there is no point in
305 * queueing the HCI command. We can simply return.
310 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
312 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
318 if (skb_queue_empty(&req
->cmd_q
))
319 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
321 bt_cb(skb
)->hci
.req_event
= event
;
323 skb_queue_tail(&req
->cmd_q
, skb
);
326 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
329 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
332 void hci_req_add_le_scan_disable(struct hci_request
*req
)
334 struct hci_cp_le_set_scan_enable cp
;
336 memset(&cp
, 0, sizeof(cp
));
337 cp
.enable
= LE_SCAN_DISABLE
;
338 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
341 static void add_to_white_list(struct hci_request
*req
,
342 struct hci_conn_params
*params
)
344 struct hci_cp_le_add_to_white_list cp
;
346 cp
.bdaddr_type
= params
->addr_type
;
347 bacpy(&cp
.bdaddr
, ¶ms
->addr
);
349 hci_req_add(req
, HCI_OP_LE_ADD_TO_WHITE_LIST
, sizeof(cp
), &cp
);
352 static u8
update_white_list(struct hci_request
*req
)
354 struct hci_dev
*hdev
= req
->hdev
;
355 struct hci_conn_params
*params
;
356 struct bdaddr_list
*b
;
357 uint8_t white_list_entries
= 0;
359 /* Go through the current white list programmed into the
360 * controller one by one and check if that address is still
361 * in the list of pending connections or list of devices to
362 * report. If not present in either list, then queue the
363 * command to remove it from the controller.
365 list_for_each_entry(b
, &hdev
->le_white_list
, list
) {
366 struct hci_cp_le_del_from_white_list cp
;
368 if (hci_pend_le_action_lookup(&hdev
->pend_le_conns
,
369 &b
->bdaddr
, b
->bdaddr_type
) ||
370 hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
371 &b
->bdaddr
, b
->bdaddr_type
)) {
372 white_list_entries
++;
376 cp
.bdaddr_type
= b
->bdaddr_type
;
377 bacpy(&cp
.bdaddr
, &b
->bdaddr
);
379 hci_req_add(req
, HCI_OP_LE_DEL_FROM_WHITE_LIST
,
383 /* Since all no longer valid white list entries have been
384 * removed, walk through the list of pending connections
385 * and ensure that any new device gets programmed into
388 * If the list of the devices is larger than the list of
389 * available white list entries in the controller, then
390 * just abort and return filer policy value to not use the
393 list_for_each_entry(params
, &hdev
->pend_le_conns
, action
) {
394 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
395 ¶ms
->addr
, params
->addr_type
))
398 if (white_list_entries
>= hdev
->le_white_list_size
) {
399 /* Select filter policy to accept all advertising */
403 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
404 params
->addr_type
)) {
405 /* White list can not be used with RPAs */
409 white_list_entries
++;
410 add_to_white_list(req
, params
);
413 /* After adding all new pending connections, walk through
414 * the list of pending reports and also add these to the
415 * white list if there is still space.
417 list_for_each_entry(params
, &hdev
->pend_le_reports
, action
) {
418 if (hci_bdaddr_list_lookup(&hdev
->le_white_list
,
419 ¶ms
->addr
, params
->addr_type
))
422 if (white_list_entries
>= hdev
->le_white_list_size
) {
423 /* Select filter policy to accept all advertising */
427 if (hci_find_irk_by_addr(hdev
, ¶ms
->addr
,
428 params
->addr_type
)) {
429 /* White list can not be used with RPAs */
433 white_list_entries
++;
434 add_to_white_list(req
, params
);
437 /* Select filter policy to use white list */
441 void hci_req_add_le_passive_scan(struct hci_request
*req
)
443 struct hci_cp_le_set_scan_param param_cp
;
444 struct hci_cp_le_set_scan_enable enable_cp
;
445 struct hci_dev
*hdev
= req
->hdev
;
449 /* Set require_privacy to false since no SCAN_REQ are send
450 * during passive scanning. Not using an non-resolvable address
451 * here is important so that peer devices using direct
452 * advertising with our address will be correctly reported
455 if (hci_update_random_address(req
, false, &own_addr_type
))
458 /* Adding or removing entries from the white list must
459 * happen before enabling scanning. The controller does
460 * not allow white list modification while scanning.
462 filter_policy
= update_white_list(req
);
464 /* When the controller is using random resolvable addresses and
465 * with that having LE privacy enabled, then controllers with
466 * Extended Scanner Filter Policies support can now enable support
467 * for handling directed advertising.
469 * So instead of using filter polices 0x00 (no whitelist)
470 * and 0x01 (whitelist enabled) use the new filter policies
471 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
473 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
) &&
474 (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
))
475 filter_policy
|= 0x02;
477 memset(¶m_cp
, 0, sizeof(param_cp
));
478 param_cp
.type
= LE_SCAN_PASSIVE
;
479 param_cp
.interval
= cpu_to_le16(hdev
->le_scan_interval
);
480 param_cp
.window
= cpu_to_le16(hdev
->le_scan_window
);
481 param_cp
.own_address_type
= own_addr_type
;
482 param_cp
.filter_policy
= filter_policy
;
483 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
486 memset(&enable_cp
, 0, sizeof(enable_cp
));
487 enable_cp
.enable
= LE_SCAN_ENABLE
;
488 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
489 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
493 static void set_random_addr(struct hci_request
*req
, bdaddr_t
*rpa
)
495 struct hci_dev
*hdev
= req
->hdev
;
497 /* If we're advertising or initiating an LE connection we can't
498 * go ahead and change the random address at this time. This is
499 * because the eventual initiator address used for the
500 * subsequently created connection will be undefined (some
501 * controllers use the new address and others the one we had
502 * when the operation started).
504 * In this kind of scenario skip the update and let the random
505 * address be updated at the next cycle.
507 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
) ||
508 hci_lookup_le_connect(hdev
)) {
509 BT_DBG("Deferring random address update");
510 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
514 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, rpa
);
517 int hci_update_random_address(struct hci_request
*req
, bool require_privacy
,
520 struct hci_dev
*hdev
= req
->hdev
;
523 /* If privacy is enabled use a resolvable private address. If
524 * current RPA has expired or there is something else than
525 * the current RPA in use, then generate a new one.
527 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
530 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
532 if (!hci_dev_test_and_clear_flag(hdev
, HCI_RPA_EXPIRED
) &&
533 !bacmp(&hdev
->random_addr
, &hdev
->rpa
))
536 err
= smp_generate_rpa(hdev
, hdev
->irk
, &hdev
->rpa
);
538 BT_ERR("%s failed to generate new RPA", hdev
->name
);
542 set_random_addr(req
, &hdev
->rpa
);
544 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
545 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
550 /* In case of required privacy without resolvable private address,
551 * use an non-resolvable private address. This is useful for active
552 * scanning and non-connectable advertising.
554 if (require_privacy
) {
558 /* The non-resolvable private address is generated
559 * from random six bytes with the two most significant
562 get_random_bytes(&nrpa
, 6);
565 /* The non-resolvable private address shall not be
566 * equal to the public address.
568 if (bacmp(&hdev
->bdaddr
, &nrpa
))
572 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
573 set_random_addr(req
, &nrpa
);
577 /* If forcing static address is in use or there is no public
578 * address use the static address as random address (but skip
579 * the HCI command if the current random address is already the
582 * In case BR/EDR has been disabled on a dual-mode controller
583 * and a static address has been configured, then use that
584 * address instead of the public BR/EDR address.
586 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
587 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
588 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
589 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
590 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
591 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
592 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
597 /* Neither privacy nor static address is being used so use a
600 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
605 static bool disconnected_whitelist_entries(struct hci_dev
*hdev
)
607 struct bdaddr_list
*b
;
609 list_for_each_entry(b
, &hdev
->whitelist
, list
) {
610 struct hci_conn
*conn
;
612 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &b
->bdaddr
);
616 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
623 void __hci_update_page_scan(struct hci_request
*req
)
625 struct hci_dev
*hdev
= req
->hdev
;
628 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
631 if (!hdev_is_powered(hdev
))
634 if (mgmt_powering_down(hdev
))
637 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
) ||
638 disconnected_whitelist_entries(hdev
))
641 scan
= SCAN_DISABLED
;
643 if (test_bit(HCI_PSCAN
, &hdev
->flags
) == !!(scan
& SCAN_PAGE
))
646 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
647 scan
|= SCAN_INQUIRY
;
649 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
652 void hci_update_page_scan(struct hci_dev
*hdev
)
654 struct hci_request req
;
656 hci_req_init(&req
, hdev
);
657 __hci_update_page_scan(&req
);
658 hci_req_run(&req
, NULL
);
661 /* This function controls the background scanning based on hdev->pend_le_conns
662 * list. If there are pending LE connection we start the background scanning,
663 * otherwise we stop it.
665 * This function requires the caller holds hdev->lock.
667 void __hci_update_background_scan(struct hci_request
*req
)
669 struct hci_dev
*hdev
= req
->hdev
;
671 if (!test_bit(HCI_UP
, &hdev
->flags
) ||
672 test_bit(HCI_INIT
, &hdev
->flags
) ||
673 hci_dev_test_flag(hdev
, HCI_SETUP
) ||
674 hci_dev_test_flag(hdev
, HCI_CONFIG
) ||
675 hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) ||
676 hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
679 /* No point in doing scanning if LE support hasn't been enabled */
680 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
683 /* If discovery is active don't interfere with it */
684 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
)
687 /* Reset RSSI and UUID filters when starting background scanning
688 * since these filters are meant for service discovery only.
690 * The Start Discovery and Start Service Discovery operations
691 * ensure to set proper values for RSSI threshold and UUID
692 * filter list. So it is safe to just reset them here.
694 hci_discovery_filter_clear(hdev
);
696 if (list_empty(&hdev
->pend_le_conns
) &&
697 list_empty(&hdev
->pend_le_reports
)) {
698 /* If there is no pending LE connections or devices
699 * to be scanned for, we should stop the background
703 /* If controller is not scanning we are done. */
704 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
707 hci_req_add_le_scan_disable(req
);
709 BT_DBG("%s stopping background scanning", hdev
->name
);
711 /* If there is at least one pending LE connection, we should
712 * keep the background scan running.
715 /* If controller is connecting, we should not start scanning
716 * since some controllers are not able to scan and connect at
719 if (hci_lookup_le_connect(hdev
))
722 /* If controller is currently scanning, we stop it to ensure we
723 * don't miss any advertising (due to duplicates filter).
725 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
726 hci_req_add_le_scan_disable(req
);
728 hci_req_add_le_passive_scan(req
);
730 BT_DBG("%s starting background scanning", hdev
->name
);
734 void __hci_abort_conn(struct hci_request
*req
, struct hci_conn
*conn
,
737 switch (conn
->state
) {
740 if (conn
->type
== AMP_LINK
) {
741 struct hci_cp_disconn_phy_link cp
;
743 cp
.phy_handle
= HCI_PHY_HANDLE(conn
->handle
);
745 hci_req_add(req
, HCI_OP_DISCONN_PHY_LINK
, sizeof(cp
),
748 struct hci_cp_disconnect dc
;
750 dc
.handle
= cpu_to_le16(conn
->handle
);
752 hci_req_add(req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
755 conn
->state
= BT_DISCONN
;
759 if (conn
->type
== LE_LINK
) {
760 if (test_bit(HCI_CONN_SCANNING
, &conn
->flags
))
762 hci_req_add(req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
764 } else if (conn
->type
== ACL_LINK
) {
765 if (req
->hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
767 hci_req_add(req
, HCI_OP_CREATE_CONN_CANCEL
,
772 if (conn
->type
== ACL_LINK
) {
773 struct hci_cp_reject_conn_req rej
;
775 bacpy(&rej
.bdaddr
, &conn
->dst
);
778 hci_req_add(req
, HCI_OP_REJECT_CONN_REQ
,
780 } else if (conn
->type
== SCO_LINK
|| conn
->type
== ESCO_LINK
) {
781 struct hci_cp_reject_sync_conn_req rej
;
783 bacpy(&rej
.bdaddr
, &conn
->dst
);
785 /* SCO rejection has its own limited set of
786 * allowed error values (0x0D-0x0F) which isn't
787 * compatible with most values passed to this
788 * function. To be safe hard-code one of the
789 * values that's suitable for SCO.
791 rej
.reason
= HCI_ERROR_REMOTE_LOW_RESOURCES
;
793 hci_req_add(req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
798 conn
->state
= BT_CLOSED
;
803 static void abort_conn_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
806 BT_DBG("Failed to abort connection: status 0x%2.2x", status
);
809 int hci_abort_conn(struct hci_conn
*conn
, u8 reason
)
811 struct hci_request req
;
814 hci_req_init(&req
, conn
->hdev
);
816 __hci_abort_conn(&req
, conn
, reason
);
818 err
= hci_req_run(&req
, abort_conn_complete
);
819 if (err
&& err
!= -ENODATA
) {
820 BT_ERR("Failed to run HCI request: err %d", err
);
827 static void update_bg_scan(struct hci_request
*req
, unsigned long opt
)
829 hci_dev_lock(req
->hdev
);
830 __hci_update_background_scan(req
);
831 hci_dev_unlock(req
->hdev
);
834 static void bg_scan_update(struct work_struct
*work
)
836 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
839 hci_req_sync(hdev
, update_bg_scan
, 0, HCI_CMD_TIMEOUT
);
842 void hci_request_setup(struct hci_dev
*hdev
)
844 INIT_WORK(&hdev
->bg_scan_update
, bg_scan_update
);
847 void hci_request_cancel_all(struct hci_dev
*hdev
)
849 cancel_work_sync(&hdev
->bg_scan_update
);