2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands
[] = {
41 MGMT_OP_READ_INDEX_LIST
,
44 MGMT_OP_SET_DISCOVERABLE
,
45 MGMT_OP_SET_CONNECTABLE
,
46 MGMT_OP_SET_FAST_CONNECTABLE
,
48 MGMT_OP_SET_LINK_SECURITY
,
52 MGMT_OP_SET_DEV_CLASS
,
53 MGMT_OP_SET_LOCAL_NAME
,
56 MGMT_OP_LOAD_LINK_KEYS
,
57 MGMT_OP_LOAD_LONG_TERM_KEYS
,
59 MGMT_OP_GET_CONNECTIONS
,
60 MGMT_OP_PIN_CODE_REPLY
,
61 MGMT_OP_PIN_CODE_NEG_REPLY
,
62 MGMT_OP_SET_IO_CAPABILITY
,
64 MGMT_OP_CANCEL_PAIR_DEVICE
,
65 MGMT_OP_UNPAIR_DEVICE
,
66 MGMT_OP_USER_CONFIRM_REPLY
,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
68 MGMT_OP_USER_PASSKEY_REPLY
,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
70 MGMT_OP_READ_LOCAL_OOB_DATA
,
71 MGMT_OP_ADD_REMOTE_OOB_DATA
,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
73 MGMT_OP_START_DISCOVERY
,
74 MGMT_OP_STOP_DISCOVERY
,
77 MGMT_OP_UNBLOCK_DEVICE
,
78 MGMT_OP_SET_DEVICE_ID
,
79 MGMT_OP_SET_ADVERTISING
,
81 MGMT_OP_SET_STATIC_ADDRESS
,
82 MGMT_OP_SET_SCAN_PARAMS
,
83 MGMT_OP_SET_SECURE_CONN
,
84 MGMT_OP_SET_DEBUG_KEYS
,
87 MGMT_OP_GET_CONN_INFO
,
88 MGMT_OP_GET_CLOCK_INFO
,
90 MGMT_OP_REMOVE_DEVICE
,
93 static const u16 mgmt_events
[] = {
94 MGMT_EV_CONTROLLER_ERROR
,
96 MGMT_EV_INDEX_REMOVED
,
98 MGMT_EV_CLASS_OF_DEV_CHANGED
,
99 MGMT_EV_LOCAL_NAME_CHANGED
,
100 MGMT_EV_NEW_LINK_KEY
,
101 MGMT_EV_NEW_LONG_TERM_KEY
,
102 MGMT_EV_DEVICE_CONNECTED
,
103 MGMT_EV_DEVICE_DISCONNECTED
,
104 MGMT_EV_CONNECT_FAILED
,
105 MGMT_EV_PIN_CODE_REQUEST
,
106 MGMT_EV_USER_CONFIRM_REQUEST
,
107 MGMT_EV_USER_PASSKEY_REQUEST
,
109 MGMT_EV_DEVICE_FOUND
,
111 MGMT_EV_DEVICE_BLOCKED
,
112 MGMT_EV_DEVICE_UNBLOCKED
,
113 MGMT_EV_DEVICE_UNPAIRED
,
114 MGMT_EV_PASSKEY_NOTIFY
,
117 MGMT_EV_DEVICE_ADDED
,
118 MGMT_EV_DEVICE_REMOVED
,
119 MGMT_EV_NEW_CONN_PARAM
,
122 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
124 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
125 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
128 struct list_head list
;
136 /* HCI to MGMT error code conversion table */
137 static u8 mgmt_status_table
[] = {
139 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
140 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
141 MGMT_STATUS_FAILED
, /* Hardware Failure */
142 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
143 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
144 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
145 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
146 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
147 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
148 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
149 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
150 MGMT_STATUS_BUSY
, /* Command Disallowed */
151 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
152 MGMT_STATUS_REJECTED
, /* Rejected Security */
153 MGMT_STATUS_REJECTED
, /* Rejected Personal */
154 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
155 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
156 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
157 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
158 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
159 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
160 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
161 MGMT_STATUS_BUSY
, /* Repeated Attempts */
162 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
163 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
164 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
165 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
166 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
167 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
168 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
169 MGMT_STATUS_FAILED
, /* Unspecified Error */
170 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
171 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
172 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
173 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
174 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
175 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
176 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
177 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
178 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
179 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
180 MGMT_STATUS_FAILED
, /* Transaction Collision */
181 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
182 MGMT_STATUS_REJECTED
, /* QoS Rejected */
183 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
184 MGMT_STATUS_REJECTED
, /* Insufficient Security */
185 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
186 MGMT_STATUS_BUSY
, /* Role Switch Pending */
187 MGMT_STATUS_FAILED
, /* Slot Violation */
188 MGMT_STATUS_FAILED
, /* Role Switch Failed */
189 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
190 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
191 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
192 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
193 MGMT_STATUS_BUSY
, /* Controller Busy */
194 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
195 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
196 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
197 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
198 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
201 static u8
mgmt_status(u8 hci_status
)
203 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
204 return mgmt_status_table
[hci_status
];
206 return MGMT_STATUS_FAILED
;
209 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
212 struct mgmt_hdr
*hdr
;
213 struct mgmt_ev_cmd_status
*ev
;
216 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
218 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
222 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
224 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
225 hdr
->index
= cpu_to_le16(index
);
226 hdr
->len
= cpu_to_le16(sizeof(*ev
));
228 ev
= (void *) skb_put(skb
, sizeof(*ev
));
230 ev
->opcode
= cpu_to_le16(cmd
);
232 err
= sock_queue_rcv_skb(sk
, skb
);
239 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
240 void *rp
, size_t rp_len
)
243 struct mgmt_hdr
*hdr
;
244 struct mgmt_ev_cmd_complete
*ev
;
247 BT_DBG("sock %p", sk
);
249 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
253 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
255 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
256 hdr
->index
= cpu_to_le16(index
);
257 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
259 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
260 ev
->opcode
= cpu_to_le16(cmd
);
264 memcpy(ev
->data
, rp
, rp_len
);
266 err
= sock_queue_rcv_skb(sk
, skb
);
273 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
276 struct mgmt_rp_read_version rp
;
278 BT_DBG("sock %p", sk
);
280 rp
.version
= MGMT_VERSION
;
281 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
283 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
287 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
290 struct mgmt_rp_read_commands
*rp
;
291 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
292 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
297 BT_DBG("sock %p", sk
);
299 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
301 rp
= kmalloc(rp_size
, GFP_KERNEL
);
305 rp
->num_commands
= cpu_to_le16(num_commands
);
306 rp
->num_events
= cpu_to_le16(num_events
);
308 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
309 put_unaligned_le16(mgmt_commands
[i
], opcode
);
311 for (i
= 0; i
< num_events
; i
++, opcode
++)
312 put_unaligned_le16(mgmt_events
[i
], opcode
);
314 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
321 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
324 struct mgmt_rp_read_index_list
*rp
;
330 BT_DBG("sock %p", sk
);
332 read_lock(&hci_dev_list_lock
);
335 list_for_each_entry(d
, &hci_dev_list
, list
) {
336 if (d
->dev_type
== HCI_BREDR
)
340 rp_len
= sizeof(*rp
) + (2 * count
);
341 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
343 read_unlock(&hci_dev_list_lock
);
348 list_for_each_entry(d
, &hci_dev_list
, list
) {
349 if (test_bit(HCI_SETUP
, &d
->dev_flags
))
352 if (test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
355 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
358 if (d
->dev_type
== HCI_BREDR
) {
359 rp
->index
[count
++] = cpu_to_le16(d
->id
);
360 BT_DBG("Added hci%u", d
->id
);
364 rp
->num_controllers
= cpu_to_le16(count
);
365 rp_len
= sizeof(*rp
) + (2 * count
);
367 read_unlock(&hci_dev_list_lock
);
369 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
377 static u32
get_supported_settings(struct hci_dev
*hdev
)
381 settings
|= MGMT_SETTING_POWERED
;
382 settings
|= MGMT_SETTING_PAIRABLE
;
383 settings
|= MGMT_SETTING_DEBUG_KEYS
;
385 if (lmp_bredr_capable(hdev
)) {
386 settings
|= MGMT_SETTING_CONNECTABLE
;
387 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
388 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
389 settings
|= MGMT_SETTING_DISCOVERABLE
;
390 settings
|= MGMT_SETTING_BREDR
;
391 settings
|= MGMT_SETTING_LINK_SECURITY
;
393 if (lmp_ssp_capable(hdev
)) {
394 settings
|= MGMT_SETTING_SSP
;
395 settings
|= MGMT_SETTING_HS
;
398 if (lmp_sc_capable(hdev
) ||
399 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
400 settings
|= MGMT_SETTING_SECURE_CONN
;
403 if (lmp_le_capable(hdev
)) {
404 settings
|= MGMT_SETTING_LE
;
405 settings
|= MGMT_SETTING_ADVERTISING
;
406 settings
|= MGMT_SETTING_PRIVACY
;
412 static u32
get_current_settings(struct hci_dev
*hdev
)
416 if (hdev_is_powered(hdev
))
417 settings
|= MGMT_SETTING_POWERED
;
419 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
420 settings
|= MGMT_SETTING_CONNECTABLE
;
422 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
423 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
425 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
426 settings
|= MGMT_SETTING_DISCOVERABLE
;
428 if (test_bit(HCI_PAIRABLE
, &hdev
->dev_flags
))
429 settings
|= MGMT_SETTING_PAIRABLE
;
431 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
432 settings
|= MGMT_SETTING_BREDR
;
434 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
435 settings
|= MGMT_SETTING_LE
;
437 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
438 settings
|= MGMT_SETTING_LINK_SECURITY
;
440 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
441 settings
|= MGMT_SETTING_SSP
;
443 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
444 settings
|= MGMT_SETTING_HS
;
446 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
447 settings
|= MGMT_SETTING_ADVERTISING
;
449 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
450 settings
|= MGMT_SETTING_SECURE_CONN
;
452 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
453 settings
|= MGMT_SETTING_DEBUG_KEYS
;
455 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
456 settings
|= MGMT_SETTING_PRIVACY
;
461 #define PNP_INFO_SVCLASS_ID 0x1200
463 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
465 u8
*ptr
= data
, *uuids_start
= NULL
;
466 struct bt_uuid
*uuid
;
471 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
474 if (uuid
->size
!= 16)
477 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
481 if (uuid16
== PNP_INFO_SVCLASS_ID
)
487 uuids_start
[1] = EIR_UUID16_ALL
;
491 /* Stop if not enough space to put next UUID */
492 if ((ptr
- data
) + sizeof(u16
) > len
) {
493 uuids_start
[1] = EIR_UUID16_SOME
;
497 *ptr
++ = (uuid16
& 0x00ff);
498 *ptr
++ = (uuid16
& 0xff00) >> 8;
499 uuids_start
[0] += sizeof(uuid16
);
505 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
507 u8
*ptr
= data
, *uuids_start
= NULL
;
508 struct bt_uuid
*uuid
;
513 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
514 if (uuid
->size
!= 32)
520 uuids_start
[1] = EIR_UUID32_ALL
;
524 /* Stop if not enough space to put next UUID */
525 if ((ptr
- data
) + sizeof(u32
) > len
) {
526 uuids_start
[1] = EIR_UUID32_SOME
;
530 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
532 uuids_start
[0] += sizeof(u32
);
538 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
540 u8
*ptr
= data
, *uuids_start
= NULL
;
541 struct bt_uuid
*uuid
;
546 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
547 if (uuid
->size
!= 128)
553 uuids_start
[1] = EIR_UUID128_ALL
;
557 /* Stop if not enough space to put next UUID */
558 if ((ptr
- data
) + 16 > len
) {
559 uuids_start
[1] = EIR_UUID128_SOME
;
563 memcpy(ptr
, uuid
->uuid
, 16);
565 uuids_start
[0] += 16;
571 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
573 struct pending_cmd
*cmd
;
575 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
576 if (cmd
->opcode
== opcode
)
583 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
584 struct hci_dev
*hdev
,
587 struct pending_cmd
*cmd
;
589 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
590 if (cmd
->user_data
!= data
)
592 if (cmd
->opcode
== opcode
)
599 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
604 name_len
= strlen(hdev
->dev_name
);
606 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
608 if (name_len
> max_len
) {
610 ptr
[1] = EIR_NAME_SHORT
;
612 ptr
[1] = EIR_NAME_COMPLETE
;
614 ptr
[0] = name_len
+ 1;
616 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
618 ad_len
+= (name_len
+ 2);
619 ptr
+= (name_len
+ 2);
625 static void update_scan_rsp_data(struct hci_request
*req
)
627 struct hci_dev
*hdev
= req
->hdev
;
628 struct hci_cp_le_set_scan_rsp_data cp
;
631 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
634 memset(&cp
, 0, sizeof(cp
));
636 len
= create_scan_rsp_data(hdev
, cp
.data
);
638 if (hdev
->scan_rsp_data_len
== len
&&
639 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
642 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
643 hdev
->scan_rsp_data_len
= len
;
647 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
650 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
652 struct pending_cmd
*cmd
;
654 /* If there's a pending mgmt command the flags will not yet have
655 * their final values, so check for this first.
657 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
659 struct mgmt_mode
*cp
= cmd
->param
;
661 return LE_AD_GENERAL
;
662 else if (cp
->val
== 0x02)
663 return LE_AD_LIMITED
;
665 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
666 return LE_AD_LIMITED
;
667 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
668 return LE_AD_GENERAL
;
674 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
676 u8 ad_len
= 0, flags
= 0;
678 flags
|= get_adv_discov_flags(hdev
);
680 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
681 flags
|= LE_AD_NO_BREDR
;
684 BT_DBG("adv flags 0x%02x", flags
);
694 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
696 ptr
[1] = EIR_TX_POWER
;
697 ptr
[2] = (u8
) hdev
->adv_tx_power
;
706 static void update_adv_data(struct hci_request
*req
)
708 struct hci_dev
*hdev
= req
->hdev
;
709 struct hci_cp_le_set_adv_data cp
;
712 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
715 memset(&cp
, 0, sizeof(cp
));
717 len
= create_adv_data(hdev
, cp
.data
);
719 if (hdev
->adv_data_len
== len
&&
720 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
723 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
724 hdev
->adv_data_len
= len
;
728 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
731 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
736 name_len
= strlen(hdev
->dev_name
);
742 ptr
[1] = EIR_NAME_SHORT
;
744 ptr
[1] = EIR_NAME_COMPLETE
;
746 /* EIR Data length */
747 ptr
[0] = name_len
+ 1;
749 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
751 ptr
+= (name_len
+ 2);
754 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
756 ptr
[1] = EIR_TX_POWER
;
757 ptr
[2] = (u8
) hdev
->inq_tx_power
;
762 if (hdev
->devid_source
> 0) {
764 ptr
[1] = EIR_DEVICE_ID
;
766 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
767 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
768 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
769 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
774 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
775 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
776 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
779 static void update_eir(struct hci_request
*req
)
781 struct hci_dev
*hdev
= req
->hdev
;
782 struct hci_cp_write_eir cp
;
784 if (!hdev_is_powered(hdev
))
787 if (!lmp_ext_inq_capable(hdev
))
790 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
793 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
796 memset(&cp
, 0, sizeof(cp
));
798 create_eir(hdev
, cp
.data
);
800 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
803 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
805 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
808 static u8
get_service_classes(struct hci_dev
*hdev
)
810 struct bt_uuid
*uuid
;
813 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
814 val
|= uuid
->svc_hint
;
819 static void update_class(struct hci_request
*req
)
821 struct hci_dev
*hdev
= req
->hdev
;
824 BT_DBG("%s", hdev
->name
);
826 if (!hdev_is_powered(hdev
))
829 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
832 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
835 cod
[0] = hdev
->minor_class
;
836 cod
[1] = hdev
->major_class
;
837 cod
[2] = get_service_classes(hdev
);
839 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
842 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
845 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
848 static bool get_connectable(struct hci_dev
*hdev
)
850 struct pending_cmd
*cmd
;
852 /* If there's a pending mgmt command the flag will not yet have
853 * it's final value, so check for this first.
855 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
857 struct mgmt_mode
*cp
= cmd
->param
;
861 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
864 static void enable_advertising(struct hci_request
*req
)
866 struct hci_dev
*hdev
= req
->hdev
;
867 struct hci_cp_le_set_adv_param cp
;
868 u8 own_addr_type
, enable
= 0x01;
871 /* Clear the HCI_ADVERTISING bit temporarily so that the
872 * hci_update_random_address knows that it's safe to go ahead
873 * and write a new random address. The flag will be set back on
874 * as soon as the SET_ADV_ENABLE HCI command completes.
876 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
878 connectable
= get_connectable(hdev
);
880 /* Set require_privacy to true only when non-connectable
881 * advertising is used. In that case it is fine to use a
882 * non-resolvable private address.
884 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
887 memset(&cp
, 0, sizeof(cp
));
888 cp
.min_interval
= cpu_to_le16(0x0800);
889 cp
.max_interval
= cpu_to_le16(0x0800);
890 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
891 cp
.own_address_type
= own_addr_type
;
892 cp
.channel_map
= hdev
->le_adv_channel_map
;
894 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
896 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
899 static void disable_advertising(struct hci_request
*req
)
903 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
906 static void service_cache_off(struct work_struct
*work
)
908 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
910 struct hci_request req
;
912 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
915 hci_req_init(&req
, hdev
);
922 hci_dev_unlock(hdev
);
924 hci_req_run(&req
, NULL
);
927 static void rpa_expired(struct work_struct
*work
)
929 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
931 struct hci_request req
;
935 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
937 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) ||
938 hci_conn_num(hdev
, LE_LINK
) > 0)
941 /* The generation of a new RPA and programming it into the
942 * controller happens in the enable_advertising() function.
945 hci_req_init(&req
, hdev
);
947 disable_advertising(&req
);
948 enable_advertising(&req
);
950 hci_req_run(&req
, NULL
);
953 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
955 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
958 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
959 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
961 /* Non-mgmt controlled devices get this bit set
962 * implicitly so that pairing works for them, however
963 * for mgmt we require user-space to explicitly enable
966 clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
969 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
970 void *data
, u16 data_len
)
972 struct mgmt_rp_read_info rp
;
974 BT_DBG("sock %p %s", sk
, hdev
->name
);
978 memset(&rp
, 0, sizeof(rp
));
980 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
982 rp
.version
= hdev
->hci_ver
;
983 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
985 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
986 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
988 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
990 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
991 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
993 hci_dev_unlock(hdev
);
995 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
999 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1006 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1007 struct hci_dev
*hdev
, void *data
,
1010 struct pending_cmd
*cmd
;
1012 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1016 cmd
->opcode
= opcode
;
1017 cmd
->index
= hdev
->id
;
1019 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
1026 memcpy(cmd
->param
, data
, len
);
1031 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1036 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1037 void (*cb
)(struct pending_cmd
*cmd
,
1041 struct pending_cmd
*cmd
, *tmp
;
1043 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1044 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1051 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1053 list_del(&cmd
->list
);
1054 mgmt_pending_free(cmd
);
1057 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1059 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1061 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1065 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1067 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1069 if (hci_conn_count(hdev
) == 0) {
1070 cancel_delayed_work(&hdev
->power_off
);
1071 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1075 static void hci_stop_discovery(struct hci_request
*req
)
1077 struct hci_dev
*hdev
= req
->hdev
;
1078 struct hci_cp_remote_name_req_cancel cp
;
1079 struct inquiry_entry
*e
;
1081 switch (hdev
->discovery
.state
) {
1082 case DISCOVERY_FINDING
:
1083 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1084 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1086 cancel_delayed_work(&hdev
->le_scan_disable
);
1087 hci_req_add_le_scan_disable(req
);
1092 case DISCOVERY_RESOLVING
:
1093 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1098 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1099 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1105 /* Passive scanning */
1106 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1107 hci_req_add_le_scan_disable(req
);
1112 static int clean_up_hci_state(struct hci_dev
*hdev
)
1114 struct hci_request req
;
1115 struct hci_conn
*conn
;
1117 hci_req_init(&req
, hdev
);
1119 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1120 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1122 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1125 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1126 disable_advertising(&req
);
1128 hci_stop_discovery(&req
);
1130 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1131 struct hci_cp_disconnect dc
;
1132 struct hci_cp_reject_conn_req rej
;
1134 switch (conn
->state
) {
1137 dc
.handle
= cpu_to_le16(conn
->handle
);
1138 dc
.reason
= 0x15; /* Terminated due to Power Off */
1139 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1142 if (conn
->type
== LE_LINK
)
1143 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1145 else if (conn
->type
== ACL_LINK
)
1146 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1150 bacpy(&rej
.bdaddr
, &conn
->dst
);
1151 rej
.reason
= 0x15; /* Terminated due to Power Off */
1152 if (conn
->type
== ACL_LINK
)
1153 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1155 else if (conn
->type
== SCO_LINK
)
1156 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1162 return hci_req_run(&req
, clean_up_hci_complete
);
1165 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1168 struct mgmt_mode
*cp
= data
;
1169 struct pending_cmd
*cmd
;
1172 BT_DBG("request for %s", hdev
->name
);
1174 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1175 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1176 MGMT_STATUS_INVALID_PARAMS
);
1180 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1181 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1186 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1187 cancel_delayed_work(&hdev
->power_off
);
1190 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1192 err
= mgmt_powered(hdev
, 1);
1197 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1198 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1202 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1209 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1212 /* Disconnect connections, stop scans, etc */
1213 err
= clean_up_hci_state(hdev
);
1215 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1216 HCI_POWER_OFF_TIMEOUT
);
1218 /* ENODATA means there were no HCI commands queued */
1219 if (err
== -ENODATA
) {
1220 cancel_delayed_work(&hdev
->power_off
);
1221 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1227 hci_dev_unlock(hdev
);
1231 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
1232 struct sock
*skip_sk
)
1234 struct sk_buff
*skb
;
1235 struct mgmt_hdr
*hdr
;
1237 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
1241 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
1242 hdr
->opcode
= cpu_to_le16(event
);
1244 hdr
->index
= cpu_to_le16(hdev
->id
);
1246 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
1247 hdr
->len
= cpu_to_le16(data_len
);
1250 memcpy(skb_put(skb
, data_len
), data
, data_len
);
1253 __net_timestamp(skb
);
1255 hci_send_to_control(skb
, skip_sk
);
1261 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1265 ev
= cpu_to_le32(get_current_settings(hdev
));
1267 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1272 struct hci_dev
*hdev
;
1276 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1278 struct cmd_lookup
*match
= data
;
1280 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1282 list_del(&cmd
->list
);
1284 if (match
->sk
== NULL
) {
1285 match
->sk
= cmd
->sk
;
1286 sock_hold(match
->sk
);
1289 mgmt_pending_free(cmd
);
1292 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1296 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1297 mgmt_pending_remove(cmd
);
1300 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1302 if (!lmp_bredr_capable(hdev
))
1303 return MGMT_STATUS_NOT_SUPPORTED
;
1304 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1305 return MGMT_STATUS_REJECTED
;
1307 return MGMT_STATUS_SUCCESS
;
1310 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1312 if (!lmp_le_capable(hdev
))
1313 return MGMT_STATUS_NOT_SUPPORTED
;
1314 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1315 return MGMT_STATUS_REJECTED
;
1317 return MGMT_STATUS_SUCCESS
;
1320 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1322 struct pending_cmd
*cmd
;
1323 struct mgmt_mode
*cp
;
1324 struct hci_request req
;
1327 BT_DBG("status 0x%02x", status
);
1331 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1336 u8 mgmt_err
= mgmt_status(status
);
1337 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1338 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1344 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1347 if (hdev
->discov_timeout
> 0) {
1348 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1349 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1353 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1357 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1360 new_settings(hdev
, cmd
->sk
);
1362 /* When the discoverable mode gets changed, make sure
1363 * that class of device has the limited discoverable
1364 * bit correctly set.
1366 hci_req_init(&req
, hdev
);
1368 hci_req_run(&req
, NULL
);
1371 mgmt_pending_remove(cmd
);
1374 hci_dev_unlock(hdev
);
1377 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1380 struct mgmt_cp_set_discoverable
*cp
= data
;
1381 struct pending_cmd
*cmd
;
1382 struct hci_request req
;
1387 BT_DBG("request for %s", hdev
->name
);
1389 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1390 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1391 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1392 MGMT_STATUS_REJECTED
);
1394 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1395 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1396 MGMT_STATUS_INVALID_PARAMS
);
1398 timeout
= __le16_to_cpu(cp
->timeout
);
1400 /* Disabling discoverable requires that no timeout is set,
1401 * and enabling limited discoverable requires a timeout.
1403 if ((cp
->val
== 0x00 && timeout
> 0) ||
1404 (cp
->val
== 0x02 && timeout
== 0))
1405 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1406 MGMT_STATUS_INVALID_PARAMS
);
1410 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1411 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1412 MGMT_STATUS_NOT_POWERED
);
1416 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1417 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1418 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1423 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1424 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1425 MGMT_STATUS_REJECTED
);
1429 if (!hdev_is_powered(hdev
)) {
1430 bool changed
= false;
1432 /* Setting limited discoverable when powered off is
1433 * not a valid operation since it requires a timeout
1434 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1436 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1437 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1441 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1446 err
= new_settings(hdev
, sk
);
1451 /* If the current mode is the same, then just update the timeout
1452 * value with the new value. And if only the timeout gets updated,
1453 * then no need for any HCI transactions.
1455 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1456 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1457 &hdev
->dev_flags
)) {
1458 cancel_delayed_work(&hdev
->discov_off
);
1459 hdev
->discov_timeout
= timeout
;
1461 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1462 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1463 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1467 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1471 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1477 /* Cancel any potential discoverable timeout that might be
1478 * still active and store new timeout value. The arming of
1479 * the timeout happens in the complete handler.
1481 cancel_delayed_work(&hdev
->discov_off
);
1482 hdev
->discov_timeout
= timeout
;
1484 /* Limited discoverable mode */
1485 if (cp
->val
== 0x02)
1486 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1488 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1490 hci_req_init(&req
, hdev
);
1492 /* The procedure for LE-only controllers is much simpler - just
1493 * update the advertising data.
1495 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1501 struct hci_cp_write_current_iac_lap hci_cp
;
1503 if (cp
->val
== 0x02) {
1504 /* Limited discoverable mode */
1505 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1506 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1507 hci_cp
.iac_lap
[1] = 0x8b;
1508 hci_cp
.iac_lap
[2] = 0x9e;
1509 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1510 hci_cp
.iac_lap
[4] = 0x8b;
1511 hci_cp
.iac_lap
[5] = 0x9e;
1513 /* General discoverable mode */
1515 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1516 hci_cp
.iac_lap
[1] = 0x8b;
1517 hci_cp
.iac_lap
[2] = 0x9e;
1520 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1521 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1523 scan
|= SCAN_INQUIRY
;
1525 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1528 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1531 update_adv_data(&req
);
1533 err
= hci_req_run(&req
, set_discoverable_complete
);
1535 mgmt_pending_remove(cmd
);
1538 hci_dev_unlock(hdev
);
1542 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1544 struct hci_dev
*hdev
= req
->hdev
;
1545 struct hci_cp_write_page_scan_activity acp
;
1548 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1551 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1555 type
= PAGE_SCAN_TYPE_INTERLACED
;
1557 /* 160 msec page scan interval */
1558 acp
.interval
= cpu_to_le16(0x0100);
1560 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1562 /* default 1.28 sec page scan */
1563 acp
.interval
= cpu_to_le16(0x0800);
1566 acp
.window
= cpu_to_le16(0x0012);
1568 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1569 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1570 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1573 if (hdev
->page_scan_type
!= type
)
1574 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1577 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1579 struct pending_cmd
*cmd
;
1580 struct mgmt_mode
*cp
;
1583 BT_DBG("status 0x%02x", status
);
1587 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1592 u8 mgmt_err
= mgmt_status(status
);
1593 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1599 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1601 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1603 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1606 new_settings(hdev
, cmd
->sk
);
1609 mgmt_pending_remove(cmd
);
1612 hci_dev_unlock(hdev
);
1615 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1616 struct sock
*sk
, u8 val
)
1618 bool changed
= false;
1621 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1625 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1627 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1628 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1631 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1636 return new_settings(hdev
, sk
);
1641 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1644 struct mgmt_mode
*cp
= data
;
1645 struct pending_cmd
*cmd
;
1646 struct hci_request req
;
1650 BT_DBG("request for %s", hdev
->name
);
1652 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1653 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1654 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1655 MGMT_STATUS_REJECTED
);
1657 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1658 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1659 MGMT_STATUS_INVALID_PARAMS
);
1663 if (!hdev_is_powered(hdev
)) {
1664 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1668 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1669 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1670 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1675 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1681 hci_req_init(&req
, hdev
);
1683 /* If BR/EDR is not enabled and we disable advertising as a
1684 * by-product of disabling connectable, we need to update the
1685 * advertising flags.
1687 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1689 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1690 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1692 update_adv_data(&req
);
1693 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1699 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1700 hdev
->discov_timeout
> 0)
1701 cancel_delayed_work(&hdev
->discov_off
);
1704 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1707 /* If we're going from non-connectable to connectable or
1708 * vice-versa when fast connectable is enabled ensure that fast
1709 * connectable gets disabled. write_fast_connectable won't do
1710 * anything if the page scan parameters are already what they
1713 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1714 write_fast_connectable(&req
, false);
1716 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) &&
1717 hci_conn_num(hdev
, LE_LINK
) == 0) {
1718 disable_advertising(&req
);
1719 enable_advertising(&req
);
1722 err
= hci_req_run(&req
, set_connectable_complete
);
1724 mgmt_pending_remove(cmd
);
1725 if (err
== -ENODATA
)
1726 err
= set_connectable_update_settings(hdev
, sk
,
1732 hci_dev_unlock(hdev
);
1736 static int set_pairable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1739 struct mgmt_mode
*cp
= data
;
1743 BT_DBG("request for %s", hdev
->name
);
1745 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1746 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PAIRABLE
,
1747 MGMT_STATUS_INVALID_PARAMS
);
1752 changed
= !test_and_set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1754 changed
= test_and_clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1756 err
= send_settings_rsp(sk
, MGMT_OP_SET_PAIRABLE
, hdev
);
1761 err
= new_settings(hdev
, sk
);
1764 hci_dev_unlock(hdev
);
1768 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1771 struct mgmt_mode
*cp
= data
;
1772 struct pending_cmd
*cmd
;
1776 BT_DBG("request for %s", hdev
->name
);
1778 status
= mgmt_bredr_support(hdev
);
1780 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1783 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1784 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1785 MGMT_STATUS_INVALID_PARAMS
);
1789 if (!hdev_is_powered(hdev
)) {
1790 bool changed
= false;
1792 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1793 &hdev
->dev_flags
)) {
1794 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1798 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1803 err
= new_settings(hdev
, sk
);
1808 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1809 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1816 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1817 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1821 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1827 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1829 mgmt_pending_remove(cmd
);
1834 hci_dev_unlock(hdev
);
1838 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1840 struct mgmt_mode
*cp
= data
;
1841 struct pending_cmd
*cmd
;
1845 BT_DBG("request for %s", hdev
->name
);
1847 status
= mgmt_bredr_support(hdev
);
1849 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1851 if (!lmp_ssp_capable(hdev
))
1852 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1853 MGMT_STATUS_NOT_SUPPORTED
);
1855 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1856 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1857 MGMT_STATUS_INVALID_PARAMS
);
1861 if (!hdev_is_powered(hdev
)) {
1865 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
1868 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
1871 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
1874 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1877 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1882 err
= new_settings(hdev
, sk
);
1887 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
1888 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
1889 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1894 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1895 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1899 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1905 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
1906 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
1907 sizeof(cp
->val
), &cp
->val
);
1909 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1911 mgmt_pending_remove(cmd
);
1916 hci_dev_unlock(hdev
);
1920 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1922 struct mgmt_mode
*cp
= data
;
1927 BT_DBG("request for %s", hdev
->name
);
1929 status
= mgmt_bredr_support(hdev
);
1931 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1933 if (!lmp_ssp_capable(hdev
))
1934 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1935 MGMT_STATUS_NOT_SUPPORTED
);
1937 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
1938 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1939 MGMT_STATUS_REJECTED
);
1941 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1942 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1943 MGMT_STATUS_INVALID_PARAMS
);
1948 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1950 if (hdev_is_powered(hdev
)) {
1951 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1952 MGMT_STATUS_REJECTED
);
1956 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1959 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1964 err
= new_settings(hdev
, sk
);
1967 hci_dev_unlock(hdev
);
1971 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
1973 struct cmd_lookup match
= { NULL
, hdev
};
1976 u8 mgmt_err
= mgmt_status(status
);
1978 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1983 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1985 new_settings(hdev
, match
.sk
);
1990 /* Make sure the controller has a good default for
1991 * advertising data. Restrict the update to when LE
1992 * has actually been enabled. During power on, the
1993 * update in powered_update_hci will take care of it.
1995 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1996 struct hci_request req
;
2000 hci_req_init(&req
, hdev
);
2001 update_adv_data(&req
);
2002 update_scan_rsp_data(&req
);
2003 hci_req_run(&req
, NULL
);
2005 hci_dev_unlock(hdev
);
2009 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2011 struct mgmt_mode
*cp
= data
;
2012 struct hci_cp_write_le_host_supported hci_cp
;
2013 struct pending_cmd
*cmd
;
2014 struct hci_request req
;
2018 BT_DBG("request for %s", hdev
->name
);
2020 if (!lmp_le_capable(hdev
))
2021 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2022 MGMT_STATUS_NOT_SUPPORTED
);
2024 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2025 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2026 MGMT_STATUS_INVALID_PARAMS
);
2028 /* LE-only devices do not allow toggling LE on/off */
2029 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2030 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2031 MGMT_STATUS_REJECTED
);
2036 enabled
= lmp_host_le_capable(hdev
);
2038 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2039 bool changed
= false;
2041 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2042 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2046 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2047 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2051 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2056 err
= new_settings(hdev
, sk
);
2061 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2062 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2063 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2068 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2074 hci_req_init(&req
, hdev
);
2076 memset(&hci_cp
, 0, sizeof(hci_cp
));
2080 hci_cp
.simul
= lmp_le_br_capable(hdev
);
2082 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
2083 disable_advertising(&req
);
2086 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2089 err
= hci_req_run(&req
, le_enable_complete
);
2091 mgmt_pending_remove(cmd
);
2094 hci_dev_unlock(hdev
);
2098 /* This is a helper function to test for pending mgmt commands that can
2099 * cause CoD or EIR HCI commands. We can only allow one such pending
2100 * mgmt command at a time since otherwise we cannot easily track what
2101 * the current values are, will be, and based on that calculate if a new
2102 * HCI command needs to be sent and if yes with what value.
2104 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2106 struct pending_cmd
*cmd
;
2108 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2109 switch (cmd
->opcode
) {
2110 case MGMT_OP_ADD_UUID
:
2111 case MGMT_OP_REMOVE_UUID
:
2112 case MGMT_OP_SET_DEV_CLASS
:
2113 case MGMT_OP_SET_POWERED
:
2121 static const u8 bluetooth_base_uuid
[] = {
2122 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2123 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2126 static u8
get_uuid_size(const u8
*uuid
)
2130 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2133 val
= get_unaligned_le32(&uuid
[12]);
2140 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2142 struct pending_cmd
*cmd
;
2146 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2150 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2151 hdev
->dev_class
, 3);
2153 mgmt_pending_remove(cmd
);
2156 hci_dev_unlock(hdev
);
2159 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2161 BT_DBG("status 0x%02x", status
);
2163 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2166 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2168 struct mgmt_cp_add_uuid
*cp
= data
;
2169 struct pending_cmd
*cmd
;
2170 struct hci_request req
;
2171 struct bt_uuid
*uuid
;
2174 BT_DBG("request for %s", hdev
->name
);
2178 if (pending_eir_or_class(hdev
)) {
2179 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2184 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2190 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2191 uuid
->svc_hint
= cp
->svc_hint
;
2192 uuid
->size
= get_uuid_size(cp
->uuid
);
2194 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2196 hci_req_init(&req
, hdev
);
2201 err
= hci_req_run(&req
, add_uuid_complete
);
2203 if (err
!= -ENODATA
)
2206 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2207 hdev
->dev_class
, 3);
2211 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2220 hci_dev_unlock(hdev
);
2224 static bool enable_service_cache(struct hci_dev
*hdev
)
2226 if (!hdev_is_powered(hdev
))
2229 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2230 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2238 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2240 BT_DBG("status 0x%02x", status
);
2242 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2245 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2248 struct mgmt_cp_remove_uuid
*cp
= data
;
2249 struct pending_cmd
*cmd
;
2250 struct bt_uuid
*match
, *tmp
;
2251 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2252 struct hci_request req
;
2255 BT_DBG("request for %s", hdev
->name
);
2259 if (pending_eir_or_class(hdev
)) {
2260 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2265 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2266 hci_uuids_clear(hdev
);
2268 if (enable_service_cache(hdev
)) {
2269 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2270 0, hdev
->dev_class
, 3);
2279 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2280 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2283 list_del(&match
->list
);
2289 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2290 MGMT_STATUS_INVALID_PARAMS
);
2295 hci_req_init(&req
, hdev
);
2300 err
= hci_req_run(&req
, remove_uuid_complete
);
2302 if (err
!= -ENODATA
)
2305 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2306 hdev
->dev_class
, 3);
2310 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2319 hci_dev_unlock(hdev
);
2323 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2325 BT_DBG("status 0x%02x", status
);
2327 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2330 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2333 struct mgmt_cp_set_dev_class
*cp
= data
;
2334 struct pending_cmd
*cmd
;
2335 struct hci_request req
;
2338 BT_DBG("request for %s", hdev
->name
);
2340 if (!lmp_bredr_capable(hdev
))
2341 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2342 MGMT_STATUS_NOT_SUPPORTED
);
2346 if (pending_eir_or_class(hdev
)) {
2347 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2352 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2353 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2354 MGMT_STATUS_INVALID_PARAMS
);
2358 hdev
->major_class
= cp
->major
;
2359 hdev
->minor_class
= cp
->minor
;
2361 if (!hdev_is_powered(hdev
)) {
2362 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2363 hdev
->dev_class
, 3);
2367 hci_req_init(&req
, hdev
);
2369 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2370 hci_dev_unlock(hdev
);
2371 cancel_delayed_work_sync(&hdev
->service_cache
);
2378 err
= hci_req_run(&req
, set_class_complete
);
2380 if (err
!= -ENODATA
)
2383 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2384 hdev
->dev_class
, 3);
2388 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2397 hci_dev_unlock(hdev
);
2401 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2404 struct mgmt_cp_load_link_keys
*cp
= data
;
2405 u16 key_count
, expected_len
;
2409 BT_DBG("request for %s", hdev
->name
);
2411 if (!lmp_bredr_capable(hdev
))
2412 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2413 MGMT_STATUS_NOT_SUPPORTED
);
2415 key_count
= __le16_to_cpu(cp
->key_count
);
2417 expected_len
= sizeof(*cp
) + key_count
*
2418 sizeof(struct mgmt_link_key_info
);
2419 if (expected_len
!= len
) {
2420 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2422 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2423 MGMT_STATUS_INVALID_PARAMS
);
2426 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2427 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2428 MGMT_STATUS_INVALID_PARAMS
);
2430 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2433 for (i
= 0; i
< key_count
; i
++) {
2434 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2436 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2437 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2438 MGMT_STATUS_INVALID_PARAMS
);
2443 hci_link_keys_clear(hdev
);
2446 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2449 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2453 new_settings(hdev
, NULL
);
2455 for (i
= 0; i
< key_count
; i
++) {
2456 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2458 /* Always ignore debug keys and require a new pairing if
2459 * the user wants to use them.
2461 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2464 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2465 key
->type
, key
->pin_len
, NULL
);
2468 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2470 hci_dev_unlock(hdev
);
2475 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2476 u8 addr_type
, struct sock
*skip_sk
)
2478 struct mgmt_ev_device_unpaired ev
;
2480 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2481 ev
.addr
.type
= addr_type
;
2483 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2487 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2490 struct mgmt_cp_unpair_device
*cp
= data
;
2491 struct mgmt_rp_unpair_device rp
;
2492 struct hci_cp_disconnect dc
;
2493 struct pending_cmd
*cmd
;
2494 struct hci_conn
*conn
;
2497 memset(&rp
, 0, sizeof(rp
));
2498 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2499 rp
.addr
.type
= cp
->addr
.type
;
2501 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2502 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2503 MGMT_STATUS_INVALID_PARAMS
,
2506 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2507 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2508 MGMT_STATUS_INVALID_PARAMS
,
2513 if (!hdev_is_powered(hdev
)) {
2514 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2515 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2519 if (cp
->addr
.type
== BDADDR_BREDR
) {
2520 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2524 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2525 addr_type
= ADDR_LE_DEV_PUBLIC
;
2527 addr_type
= ADDR_LE_DEV_RANDOM
;
2529 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2531 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2533 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2537 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2538 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2542 if (cp
->disconnect
) {
2543 if (cp
->addr
.type
== BDADDR_BREDR
)
2544 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2547 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2554 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2556 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2560 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2567 dc
.handle
= cpu_to_le16(conn
->handle
);
2568 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2569 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2571 mgmt_pending_remove(cmd
);
2574 hci_dev_unlock(hdev
);
2578 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2581 struct mgmt_cp_disconnect
*cp
= data
;
2582 struct mgmt_rp_disconnect rp
;
2583 struct hci_cp_disconnect dc
;
2584 struct pending_cmd
*cmd
;
2585 struct hci_conn
*conn
;
2590 memset(&rp
, 0, sizeof(rp
));
2591 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2592 rp
.addr
.type
= cp
->addr
.type
;
2594 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2595 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2596 MGMT_STATUS_INVALID_PARAMS
,
2601 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2602 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2603 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2607 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2608 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2609 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2613 if (cp
->addr
.type
== BDADDR_BREDR
)
2614 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2617 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2619 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2620 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2621 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2625 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2631 dc
.handle
= cpu_to_le16(conn
->handle
);
2632 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2634 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2636 mgmt_pending_remove(cmd
);
2639 hci_dev_unlock(hdev
);
2643 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2645 switch (link_type
) {
2647 switch (addr_type
) {
2648 case ADDR_LE_DEV_PUBLIC
:
2649 return BDADDR_LE_PUBLIC
;
2652 /* Fallback to LE Random address type */
2653 return BDADDR_LE_RANDOM
;
2657 /* Fallback to BR/EDR type */
2658 return BDADDR_BREDR
;
2662 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2665 struct mgmt_rp_get_connections
*rp
;
2675 if (!hdev_is_powered(hdev
)) {
2676 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2677 MGMT_STATUS_NOT_POWERED
);
2682 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2683 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2687 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2688 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2695 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2696 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2698 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2699 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2700 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2705 rp
->conn_count
= cpu_to_le16(i
);
2707 /* Recalculate length in case of filtered SCO connections, etc */
2708 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2710 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2716 hci_dev_unlock(hdev
);
2720 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2721 struct mgmt_cp_pin_code_neg_reply
*cp
)
2723 struct pending_cmd
*cmd
;
2726 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2731 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2732 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2734 mgmt_pending_remove(cmd
);
2739 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2742 struct hci_conn
*conn
;
2743 struct mgmt_cp_pin_code_reply
*cp
= data
;
2744 struct hci_cp_pin_code_reply reply
;
2745 struct pending_cmd
*cmd
;
2752 if (!hdev_is_powered(hdev
)) {
2753 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2754 MGMT_STATUS_NOT_POWERED
);
2758 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2760 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2761 MGMT_STATUS_NOT_CONNECTED
);
2765 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2766 struct mgmt_cp_pin_code_neg_reply ncp
;
2768 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2770 BT_ERR("PIN code is not 16 bytes long");
2772 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2774 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2775 MGMT_STATUS_INVALID_PARAMS
);
2780 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2786 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2787 reply
.pin_len
= cp
->pin_len
;
2788 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2790 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2792 mgmt_pending_remove(cmd
);
2795 hci_dev_unlock(hdev
);
2799 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2802 struct mgmt_cp_set_io_capability
*cp
= data
;
2806 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
2807 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
2808 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
2812 hdev
->io_capability
= cp
->io_capability
;
2814 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2815 hdev
->io_capability
);
2817 hci_dev_unlock(hdev
);
2819 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
2823 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
2825 struct hci_dev
*hdev
= conn
->hdev
;
2826 struct pending_cmd
*cmd
;
2828 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2829 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2832 if (cmd
->user_data
!= conn
)
2841 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
2843 struct mgmt_rp_pair_device rp
;
2844 struct hci_conn
*conn
= cmd
->user_data
;
2846 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2847 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2849 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
2852 /* So we don't get further callbacks for this connection */
2853 conn
->connect_cfm_cb
= NULL
;
2854 conn
->security_cfm_cb
= NULL
;
2855 conn
->disconn_cfm_cb
= NULL
;
2857 hci_conn_drop(conn
);
2859 mgmt_pending_remove(cmd
);
2862 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2864 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2865 struct pending_cmd
*cmd
;
2867 cmd
= find_pairing(conn
);
2869 pairing_complete(cmd
, status
);
2872 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2874 struct pending_cmd
*cmd
;
2876 BT_DBG("status %u", status
);
2878 cmd
= find_pairing(conn
);
2880 BT_DBG("Unable to find a pending command");
2882 pairing_complete(cmd
, mgmt_status(status
));
2885 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2887 struct pending_cmd
*cmd
;
2889 BT_DBG("status %u", status
);
2894 cmd
= find_pairing(conn
);
2896 BT_DBG("Unable to find a pending command");
2898 pairing_complete(cmd
, mgmt_status(status
));
2901 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2904 struct mgmt_cp_pair_device
*cp
= data
;
2905 struct mgmt_rp_pair_device rp
;
2906 struct pending_cmd
*cmd
;
2907 u8 sec_level
, auth_type
;
2908 struct hci_conn
*conn
;
2913 memset(&rp
, 0, sizeof(rp
));
2914 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2915 rp
.addr
.type
= cp
->addr
.type
;
2917 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2918 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2919 MGMT_STATUS_INVALID_PARAMS
,
2922 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
2923 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2924 MGMT_STATUS_INVALID_PARAMS
,
2929 if (!hdev_is_powered(hdev
)) {
2930 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2931 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2935 sec_level
= BT_SECURITY_MEDIUM
;
2936 auth_type
= HCI_AT_DEDICATED_BONDING
;
2938 if (cp
->addr
.type
== BDADDR_BREDR
) {
2939 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2944 /* Convert from L2CAP channel address type to HCI address type
2946 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2947 addr_type
= ADDR_LE_DEV_PUBLIC
;
2949 addr_type
= ADDR_LE_DEV_RANDOM
;
2951 /* When pairing a new device, it is expected to remember
2952 * this device for future connections. Adding the connection
2953 * parameter information ahead of time allows tracking
2954 * of the slave preferred values and will speed up any
2955 * further connection establishment.
2957 * If connection parameters already exist, then they
2958 * will be kept and this function does nothing.
2960 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2962 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
2963 sec_level
, auth_type
);
2969 if (PTR_ERR(conn
) == -EBUSY
)
2970 status
= MGMT_STATUS_BUSY
;
2972 status
= MGMT_STATUS_CONNECT_FAILED
;
2974 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2980 if (conn
->connect_cfm_cb
) {
2981 hci_conn_drop(conn
);
2982 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2983 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2987 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2990 hci_conn_drop(conn
);
2994 /* For LE, just connecting isn't a proof that the pairing finished */
2995 if (cp
->addr
.type
== BDADDR_BREDR
) {
2996 conn
->connect_cfm_cb
= pairing_complete_cb
;
2997 conn
->security_cfm_cb
= pairing_complete_cb
;
2998 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3000 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3001 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3002 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3005 conn
->io_capability
= cp
->io_cap
;
3006 cmd
->user_data
= conn
;
3008 if (conn
->state
== BT_CONNECTED
&&
3009 hci_conn_security(conn
, sec_level
, auth_type
))
3010 pairing_complete(cmd
, 0);
3015 hci_dev_unlock(hdev
);
3019 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3022 struct mgmt_addr_info
*addr
= data
;
3023 struct pending_cmd
*cmd
;
3024 struct hci_conn
*conn
;
3031 if (!hdev_is_powered(hdev
)) {
3032 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3033 MGMT_STATUS_NOT_POWERED
);
3037 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3039 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3040 MGMT_STATUS_INVALID_PARAMS
);
3044 conn
= cmd
->user_data
;
3046 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3047 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3048 MGMT_STATUS_INVALID_PARAMS
);
3052 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
3054 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3055 addr
, sizeof(*addr
));
3057 hci_dev_unlock(hdev
);
3061 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3062 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3063 u16 hci_op
, __le32 passkey
)
3065 struct pending_cmd
*cmd
;
3066 struct hci_conn
*conn
;
3071 if (!hdev_is_powered(hdev
)) {
3072 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3073 MGMT_STATUS_NOT_POWERED
, addr
,
3078 if (addr
->type
== BDADDR_BREDR
)
3079 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3081 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3084 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3085 MGMT_STATUS_NOT_CONNECTED
, addr
,
3090 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3091 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3093 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3094 MGMT_STATUS_SUCCESS
, addr
,
3097 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3098 MGMT_STATUS_FAILED
, addr
,
3104 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3110 /* Continue with pairing via HCI */
3111 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3112 struct hci_cp_user_passkey_reply cp
;
3114 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3115 cp
.passkey
= passkey
;
3116 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3118 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3122 mgmt_pending_remove(cmd
);
3125 hci_dev_unlock(hdev
);
3129 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3130 void *data
, u16 len
)
3132 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3136 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3137 MGMT_OP_PIN_CODE_NEG_REPLY
,
3138 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3141 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3144 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3148 if (len
!= sizeof(*cp
))
3149 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3150 MGMT_STATUS_INVALID_PARAMS
);
3152 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3153 MGMT_OP_USER_CONFIRM_REPLY
,
3154 HCI_OP_USER_CONFIRM_REPLY
, 0);
3157 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3158 void *data
, u16 len
)
3160 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3164 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3165 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3166 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3169 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3172 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3176 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3177 MGMT_OP_USER_PASSKEY_REPLY
,
3178 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3181 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3182 void *data
, u16 len
)
3184 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3188 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3189 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3190 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3193 static void update_name(struct hci_request
*req
)
3195 struct hci_dev
*hdev
= req
->hdev
;
3196 struct hci_cp_write_local_name cp
;
3198 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3200 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3203 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3205 struct mgmt_cp_set_local_name
*cp
;
3206 struct pending_cmd
*cmd
;
3208 BT_DBG("status 0x%02x", status
);
3212 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3219 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3220 mgmt_status(status
));
3222 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3225 mgmt_pending_remove(cmd
);
3228 hci_dev_unlock(hdev
);
3231 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3234 struct mgmt_cp_set_local_name
*cp
= data
;
3235 struct pending_cmd
*cmd
;
3236 struct hci_request req
;
3243 /* If the old values are the same as the new ones just return a
3244 * direct command complete event.
3246 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3247 !memcmp(hdev
->short_name
, cp
->short_name
,
3248 sizeof(hdev
->short_name
))) {
3249 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3254 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3256 if (!hdev_is_powered(hdev
)) {
3257 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3259 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3264 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3270 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3276 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3278 hci_req_init(&req
, hdev
);
3280 if (lmp_bredr_capable(hdev
)) {
3285 /* The name is stored in the scan response data and so
3286 * no need to udpate the advertising data here.
3288 if (lmp_le_capable(hdev
))
3289 update_scan_rsp_data(&req
);
3291 err
= hci_req_run(&req
, set_name_complete
);
3293 mgmt_pending_remove(cmd
);
3296 hci_dev_unlock(hdev
);
3300 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3301 void *data
, u16 data_len
)
3303 struct pending_cmd
*cmd
;
3306 BT_DBG("%s", hdev
->name
);
3310 if (!hdev_is_powered(hdev
)) {
3311 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3312 MGMT_STATUS_NOT_POWERED
);
3316 if (!lmp_ssp_capable(hdev
)) {
3317 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3318 MGMT_STATUS_NOT_SUPPORTED
);
3322 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3323 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3328 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3334 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3335 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3338 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3341 mgmt_pending_remove(cmd
);
3344 hci_dev_unlock(hdev
);
3348 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3349 void *data
, u16 len
)
3353 BT_DBG("%s ", hdev
->name
);
3357 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3358 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3361 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3362 cp
->hash
, cp
->randomizer
);
3364 status
= MGMT_STATUS_FAILED
;
3366 status
= MGMT_STATUS_SUCCESS
;
3368 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3369 status
, &cp
->addr
, sizeof(cp
->addr
));
3370 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3371 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3374 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3380 status
= MGMT_STATUS_FAILED
;
3382 status
= MGMT_STATUS_SUCCESS
;
3384 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3385 status
, &cp
->addr
, sizeof(cp
->addr
));
3387 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3388 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3389 MGMT_STATUS_INVALID_PARAMS
);
3392 hci_dev_unlock(hdev
);
3396 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3397 void *data
, u16 len
)
3399 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3403 BT_DBG("%s", hdev
->name
);
3407 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3409 status
= MGMT_STATUS_INVALID_PARAMS
;
3411 status
= MGMT_STATUS_SUCCESS
;
3413 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3414 status
, &cp
->addr
, sizeof(cp
->addr
));
3416 hci_dev_unlock(hdev
);
3420 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3422 struct pending_cmd
*cmd
;
3426 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3428 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3432 type
= hdev
->discovery
.type
;
3434 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3435 &type
, sizeof(type
));
3436 mgmt_pending_remove(cmd
);
3441 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3443 unsigned long timeout
= 0;
3445 BT_DBG("status %d", status
);
3449 mgmt_start_discovery_failed(hdev
, status
);
3450 hci_dev_unlock(hdev
);
3455 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3456 hci_dev_unlock(hdev
);
3458 switch (hdev
->discovery
.type
) {
3459 case DISCOV_TYPE_LE
:
3460 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3463 case DISCOV_TYPE_INTERLEAVED
:
3464 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3467 case DISCOV_TYPE_BREDR
:
3471 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3477 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
, timeout
);
3480 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3481 void *data
, u16 len
)
3483 struct mgmt_cp_start_discovery
*cp
= data
;
3484 struct pending_cmd
*cmd
;
3485 struct hci_cp_le_set_scan_param param_cp
;
3486 struct hci_cp_le_set_scan_enable enable_cp
;
3487 struct hci_cp_inquiry inq_cp
;
3488 struct hci_request req
;
3489 /* General inquiry access code (GIAC) */
3490 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3491 u8 status
, own_addr_type
;
3494 BT_DBG("%s", hdev
->name
);
3498 if (!hdev_is_powered(hdev
)) {
3499 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3500 MGMT_STATUS_NOT_POWERED
);
3504 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3505 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3510 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3511 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3516 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3522 hdev
->discovery
.type
= cp
->type
;
3524 hci_req_init(&req
, hdev
);
3526 switch (hdev
->discovery
.type
) {
3527 case DISCOV_TYPE_BREDR
:
3528 status
= mgmt_bredr_support(hdev
);
3530 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3532 mgmt_pending_remove(cmd
);
3536 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3537 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3539 mgmt_pending_remove(cmd
);
3543 hci_inquiry_cache_flush(hdev
);
3545 memset(&inq_cp
, 0, sizeof(inq_cp
));
3546 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3547 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3548 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3551 case DISCOV_TYPE_LE
:
3552 case DISCOV_TYPE_INTERLEAVED
:
3553 status
= mgmt_le_support(hdev
);
3555 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3557 mgmt_pending_remove(cmd
);
3561 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3562 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3563 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3564 MGMT_STATUS_NOT_SUPPORTED
);
3565 mgmt_pending_remove(cmd
);
3569 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3570 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3571 MGMT_STATUS_REJECTED
);
3572 mgmt_pending_remove(cmd
);
3576 /* If controller is scanning, it means the background scanning
3577 * is running. Thus, we should temporarily stop it in order to
3578 * set the discovery scanning parameters.
3580 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3581 hci_req_add_le_scan_disable(&req
);
3583 memset(¶m_cp
, 0, sizeof(param_cp
));
3585 /* All active scans will be done with either a resolvable
3586 * private address (when privacy feature has been enabled)
3587 * or unresolvable private address.
3589 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3591 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3592 MGMT_STATUS_FAILED
);
3593 mgmt_pending_remove(cmd
);
3597 param_cp
.type
= LE_SCAN_ACTIVE
;
3598 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3599 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3600 param_cp
.own_address_type
= own_addr_type
;
3601 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3604 memset(&enable_cp
, 0, sizeof(enable_cp
));
3605 enable_cp
.enable
= LE_SCAN_ENABLE
;
3606 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3607 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3612 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3613 MGMT_STATUS_INVALID_PARAMS
);
3614 mgmt_pending_remove(cmd
);
3618 err
= hci_req_run(&req
, start_discovery_complete
);
3620 mgmt_pending_remove(cmd
);
3622 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3625 hci_dev_unlock(hdev
);
3629 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3631 struct pending_cmd
*cmd
;
3634 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3638 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3639 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3640 mgmt_pending_remove(cmd
);
3645 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3647 BT_DBG("status %d", status
);
3652 mgmt_stop_discovery_failed(hdev
, status
);
3656 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3659 hci_dev_unlock(hdev
);
3662 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3665 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3666 struct pending_cmd
*cmd
;
3667 struct hci_request req
;
3670 BT_DBG("%s", hdev
->name
);
3674 if (!hci_discovery_active(hdev
)) {
3675 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3676 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3677 sizeof(mgmt_cp
->type
));
3681 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3682 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3683 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3684 sizeof(mgmt_cp
->type
));
3688 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3694 hci_req_init(&req
, hdev
);
3696 hci_stop_discovery(&req
);
3698 err
= hci_req_run(&req
, stop_discovery_complete
);
3700 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3704 mgmt_pending_remove(cmd
);
3706 /* If no HCI commands were sent we're done */
3707 if (err
== -ENODATA
) {
3708 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
3709 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3710 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3714 hci_dev_unlock(hdev
);
3718 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3721 struct mgmt_cp_confirm_name
*cp
= data
;
3722 struct inquiry_entry
*e
;
3725 BT_DBG("%s", hdev
->name
);
3729 if (!hci_discovery_active(hdev
)) {
3730 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3731 MGMT_STATUS_FAILED
, &cp
->addr
,
3736 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3738 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3739 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3744 if (cp
->name_known
) {
3745 e
->name_state
= NAME_KNOWN
;
3748 e
->name_state
= NAME_NEEDED
;
3749 hci_inquiry_cache_update_resolve(hdev
, e
);
3752 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3756 hci_dev_unlock(hdev
);
3760 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3763 struct mgmt_cp_block_device
*cp
= data
;
3767 BT_DBG("%s", hdev
->name
);
3769 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3770 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3771 MGMT_STATUS_INVALID_PARAMS
,
3772 &cp
->addr
, sizeof(cp
->addr
));
3776 err
= hci_blacklist_add(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3778 status
= MGMT_STATUS_FAILED
;
3782 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3784 status
= MGMT_STATUS_SUCCESS
;
3787 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3788 &cp
->addr
, sizeof(cp
->addr
));
3790 hci_dev_unlock(hdev
);
3795 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3798 struct mgmt_cp_unblock_device
*cp
= data
;
3802 BT_DBG("%s", hdev
->name
);
3804 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3805 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3806 MGMT_STATUS_INVALID_PARAMS
,
3807 &cp
->addr
, sizeof(cp
->addr
));
3811 err
= hci_blacklist_del(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3813 status
= MGMT_STATUS_INVALID_PARAMS
;
3817 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3819 status
= MGMT_STATUS_SUCCESS
;
3822 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3823 &cp
->addr
, sizeof(cp
->addr
));
3825 hci_dev_unlock(hdev
);
3830 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3833 struct mgmt_cp_set_device_id
*cp
= data
;
3834 struct hci_request req
;
3838 BT_DBG("%s", hdev
->name
);
3840 source
= __le16_to_cpu(cp
->source
);
3842 if (source
> 0x0002)
3843 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3844 MGMT_STATUS_INVALID_PARAMS
);
3848 hdev
->devid_source
= source
;
3849 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3850 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3851 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3853 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
3855 hci_req_init(&req
, hdev
);
3857 hci_req_run(&req
, NULL
);
3859 hci_dev_unlock(hdev
);
3864 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
3866 struct cmd_lookup match
= { NULL
, hdev
};
3869 u8 mgmt_err
= mgmt_status(status
);
3871 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3872 cmd_status_rsp
, &mgmt_err
);
3876 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3879 new_settings(hdev
, match
.sk
);
3885 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3888 struct mgmt_mode
*cp
= data
;
3889 struct pending_cmd
*cmd
;
3890 struct hci_request req
;
3891 u8 val
, enabled
, status
;
3894 BT_DBG("request for %s", hdev
->name
);
3896 status
= mgmt_le_support(hdev
);
3898 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3901 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3902 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3903 MGMT_STATUS_INVALID_PARAMS
);
3908 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3910 /* The following conditions are ones which mean that we should
3911 * not do any HCI communication but directly send a mgmt
3912 * response to user space (after toggling the flag if
3915 if (!hdev_is_powered(hdev
) || val
== enabled
||
3916 hci_conn_num(hdev
, LE_LINK
) > 0) {
3917 bool changed
= false;
3919 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3920 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3924 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
3929 err
= new_settings(hdev
, sk
);
3934 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
3935 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
3936 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3941 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
3947 hci_req_init(&req
, hdev
);
3950 enable_advertising(&req
);
3952 disable_advertising(&req
);
3954 err
= hci_req_run(&req
, set_advertising_complete
);
3956 mgmt_pending_remove(cmd
);
3959 hci_dev_unlock(hdev
);
3963 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
3964 void *data
, u16 len
)
3966 struct mgmt_cp_set_static_address
*cp
= data
;
3969 BT_DBG("%s", hdev
->name
);
3971 if (!lmp_le_capable(hdev
))
3972 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3973 MGMT_STATUS_NOT_SUPPORTED
);
3975 if (hdev_is_powered(hdev
))
3976 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3977 MGMT_STATUS_REJECTED
);
3979 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
3980 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
3981 return cmd_status(sk
, hdev
->id
,
3982 MGMT_OP_SET_STATIC_ADDRESS
,
3983 MGMT_STATUS_INVALID_PARAMS
);
3985 /* Two most significant bits shall be set */
3986 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
3987 return cmd_status(sk
, hdev
->id
,
3988 MGMT_OP_SET_STATIC_ADDRESS
,
3989 MGMT_STATUS_INVALID_PARAMS
);
3994 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
3996 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
3998 hci_dev_unlock(hdev
);
4003 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4004 void *data
, u16 len
)
4006 struct mgmt_cp_set_scan_params
*cp
= data
;
4007 __u16 interval
, window
;
4010 BT_DBG("%s", hdev
->name
);
4012 if (!lmp_le_capable(hdev
))
4013 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4014 MGMT_STATUS_NOT_SUPPORTED
);
4016 interval
= __le16_to_cpu(cp
->interval
);
4018 if (interval
< 0x0004 || interval
> 0x4000)
4019 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4020 MGMT_STATUS_INVALID_PARAMS
);
4022 window
= __le16_to_cpu(cp
->window
);
4024 if (window
< 0x0004 || window
> 0x4000)
4025 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4026 MGMT_STATUS_INVALID_PARAMS
);
4028 if (window
> interval
)
4029 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4030 MGMT_STATUS_INVALID_PARAMS
);
4034 hdev
->le_scan_interval
= interval
;
4035 hdev
->le_scan_window
= window
;
4037 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4039 /* If background scan is running, restart it so new parameters are
4042 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4043 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4044 struct hci_request req
;
4046 hci_req_init(&req
, hdev
);
4048 hci_req_add_le_scan_disable(&req
);
4049 hci_req_add_le_passive_scan(&req
);
4051 hci_req_run(&req
, NULL
);
4054 hci_dev_unlock(hdev
);
4059 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4061 struct pending_cmd
*cmd
;
4063 BT_DBG("status 0x%02x", status
);
4067 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4072 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4073 mgmt_status(status
));
4075 struct mgmt_mode
*cp
= cmd
->param
;
4078 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4080 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4082 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4083 new_settings(hdev
, cmd
->sk
);
4086 mgmt_pending_remove(cmd
);
4089 hci_dev_unlock(hdev
);
4092 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4093 void *data
, u16 len
)
4095 struct mgmt_mode
*cp
= data
;
4096 struct pending_cmd
*cmd
;
4097 struct hci_request req
;
4100 BT_DBG("%s", hdev
->name
);
4102 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4103 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4104 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4105 MGMT_STATUS_NOT_SUPPORTED
);
4107 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4108 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4109 MGMT_STATUS_INVALID_PARAMS
);
4111 if (!hdev_is_powered(hdev
))
4112 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4113 MGMT_STATUS_NOT_POWERED
);
4115 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4116 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4117 MGMT_STATUS_REJECTED
);
4121 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4122 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4127 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4128 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4133 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4140 hci_req_init(&req
, hdev
);
4142 write_fast_connectable(&req
, cp
->val
);
4144 err
= hci_req_run(&req
, fast_connectable_complete
);
4146 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4147 MGMT_STATUS_FAILED
);
4148 mgmt_pending_remove(cmd
);
4152 hci_dev_unlock(hdev
);
4157 static void set_bredr_scan(struct hci_request
*req
)
4159 struct hci_dev
*hdev
= req
->hdev
;
4162 /* Ensure that fast connectable is disabled. This function will
4163 * not do anything if the page scan parameters are already what
4166 write_fast_connectable(req
, false);
4168 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4170 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
4171 scan
|= SCAN_INQUIRY
;
4174 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
4177 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4179 struct pending_cmd
*cmd
;
4181 BT_DBG("status 0x%02x", status
);
4185 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4190 u8 mgmt_err
= mgmt_status(status
);
4192 /* We need to restore the flag if related HCI commands
4195 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4197 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4199 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4200 new_settings(hdev
, cmd
->sk
);
4203 mgmt_pending_remove(cmd
);
4206 hci_dev_unlock(hdev
);
4209 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4211 struct mgmt_mode
*cp
= data
;
4212 struct pending_cmd
*cmd
;
4213 struct hci_request req
;
4216 BT_DBG("request for %s", hdev
->name
);
4218 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4219 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4220 MGMT_STATUS_NOT_SUPPORTED
);
4222 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4223 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4224 MGMT_STATUS_REJECTED
);
4226 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4227 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4228 MGMT_STATUS_INVALID_PARAMS
);
4232 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4233 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4237 if (!hdev_is_powered(hdev
)) {
4239 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4240 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4241 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4242 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4243 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4246 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4248 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4252 err
= new_settings(hdev
, sk
);
4256 /* Reject disabling when powered on */
4258 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4259 MGMT_STATUS_REJECTED
);
4263 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4264 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4269 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4275 /* We need to flip the bit already here so that update_adv_data
4276 * generates the correct flags.
4278 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4280 hci_req_init(&req
, hdev
);
4282 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4283 set_bredr_scan(&req
);
4285 /* Since only the advertising data flags will change, there
4286 * is no need to update the scan response data.
4288 update_adv_data(&req
);
4290 err
= hci_req_run(&req
, set_bredr_complete
);
4292 mgmt_pending_remove(cmd
);
4295 hci_dev_unlock(hdev
);
4299 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4300 void *data
, u16 len
)
4302 struct mgmt_mode
*cp
= data
;
4303 struct pending_cmd
*cmd
;
4307 BT_DBG("request for %s", hdev
->name
);
4309 status
= mgmt_bredr_support(hdev
);
4311 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4314 if (!lmp_sc_capable(hdev
) &&
4315 !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
4316 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4317 MGMT_STATUS_NOT_SUPPORTED
);
4319 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4320 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4321 MGMT_STATUS_INVALID_PARAMS
);
4325 if (!hdev_is_powered(hdev
)) {
4329 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4331 if (cp
->val
== 0x02)
4332 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4334 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4336 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4338 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4341 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4346 err
= new_settings(hdev
, sk
);
4351 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4352 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4359 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4360 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4361 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4365 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4371 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4373 mgmt_pending_remove(cmd
);
4377 if (cp
->val
== 0x02)
4378 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4380 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4383 hci_dev_unlock(hdev
);
4387 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4388 void *data
, u16 len
)
4390 struct mgmt_mode
*cp
= data
;
4391 bool changed
, use_changed
;
4394 BT_DBG("request for %s", hdev
->name
);
4396 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4397 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4398 MGMT_STATUS_INVALID_PARAMS
);
4403 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4406 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4409 if (cp
->val
== 0x02)
4410 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4413 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4416 if (hdev_is_powered(hdev
) && use_changed
&&
4417 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4418 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4419 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4420 sizeof(mode
), &mode
);
4423 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4428 err
= new_settings(hdev
, sk
);
4431 hci_dev_unlock(hdev
);
4435 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4438 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4442 BT_DBG("request for %s", hdev
->name
);
4444 if (!lmp_le_capable(hdev
))
4445 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4446 MGMT_STATUS_NOT_SUPPORTED
);
4448 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4449 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4450 MGMT_STATUS_INVALID_PARAMS
);
4452 if (hdev_is_powered(hdev
))
4453 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4454 MGMT_STATUS_REJECTED
);
4458 /* If user space supports this command it is also expected to
4459 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4461 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4464 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4465 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4466 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4468 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4469 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4470 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4473 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4478 err
= new_settings(hdev
, sk
);
4481 hci_dev_unlock(hdev
);
4485 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4487 switch (irk
->addr
.type
) {
4488 case BDADDR_LE_PUBLIC
:
4491 case BDADDR_LE_RANDOM
:
4492 /* Two most significant bits shall be set */
4493 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4501 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4504 struct mgmt_cp_load_irks
*cp
= cp_data
;
4505 u16 irk_count
, expected_len
;
4508 BT_DBG("request for %s", hdev
->name
);
4510 if (!lmp_le_capable(hdev
))
4511 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4512 MGMT_STATUS_NOT_SUPPORTED
);
4514 irk_count
= __le16_to_cpu(cp
->irk_count
);
4516 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4517 if (expected_len
!= len
) {
4518 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4520 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4521 MGMT_STATUS_INVALID_PARAMS
);
4524 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4526 for (i
= 0; i
< irk_count
; i
++) {
4527 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4529 if (!irk_is_valid(key
))
4530 return cmd_status(sk
, hdev
->id
,
4532 MGMT_STATUS_INVALID_PARAMS
);
4537 hci_smp_irks_clear(hdev
);
4539 for (i
= 0; i
< irk_count
; i
++) {
4540 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4543 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4544 addr_type
= ADDR_LE_DEV_PUBLIC
;
4546 addr_type
= ADDR_LE_DEV_RANDOM
;
4548 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4552 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4554 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4556 hci_dev_unlock(hdev
);
4561 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4563 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4566 switch (key
->addr
.type
) {
4567 case BDADDR_LE_PUBLIC
:
4570 case BDADDR_LE_RANDOM
:
4571 /* Two most significant bits shall be set */
4572 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4580 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4581 void *cp_data
, u16 len
)
4583 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4584 u16 key_count
, expected_len
;
4587 BT_DBG("request for %s", hdev
->name
);
4589 if (!lmp_le_capable(hdev
))
4590 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4591 MGMT_STATUS_NOT_SUPPORTED
);
4593 key_count
= __le16_to_cpu(cp
->key_count
);
4595 expected_len
= sizeof(*cp
) + key_count
*
4596 sizeof(struct mgmt_ltk_info
);
4597 if (expected_len
!= len
) {
4598 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4600 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4601 MGMT_STATUS_INVALID_PARAMS
);
4604 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4606 for (i
= 0; i
< key_count
; i
++) {
4607 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4609 if (!ltk_is_valid(key
))
4610 return cmd_status(sk
, hdev
->id
,
4611 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4612 MGMT_STATUS_INVALID_PARAMS
);
4617 hci_smp_ltks_clear(hdev
);
4619 for (i
= 0; i
< key_count
; i
++) {
4620 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4621 u8 type
, addr_type
, authenticated
;
4623 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4624 addr_type
= ADDR_LE_DEV_PUBLIC
;
4626 addr_type
= ADDR_LE_DEV_RANDOM
;
4631 type
= SMP_LTK_SLAVE
;
4633 switch (key
->type
) {
4634 case MGMT_LTK_UNAUTHENTICATED
:
4635 authenticated
= 0x00;
4637 case MGMT_LTK_AUTHENTICATED
:
4638 authenticated
= 0x01;
4644 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4645 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
4649 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4652 hci_dev_unlock(hdev
);
4657 struct cmd_conn_lookup
{
4658 struct hci_conn
*conn
;
4659 bool valid_tx_power
;
4663 static void get_conn_info_complete(struct pending_cmd
*cmd
, void *data
)
4665 struct cmd_conn_lookup
*match
= data
;
4666 struct mgmt_cp_get_conn_info
*cp
;
4667 struct mgmt_rp_get_conn_info rp
;
4668 struct hci_conn
*conn
= cmd
->user_data
;
4670 if (conn
!= match
->conn
)
4673 cp
= (struct mgmt_cp_get_conn_info
*) cmd
->param
;
4675 memset(&rp
, 0, sizeof(rp
));
4676 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4677 rp
.addr
.type
= cp
->addr
.type
;
4679 if (!match
->mgmt_status
) {
4680 rp
.rssi
= conn
->rssi
;
4682 if (match
->valid_tx_power
) {
4683 rp
.tx_power
= conn
->tx_power
;
4684 rp
.max_tx_power
= conn
->max_tx_power
;
4686 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4687 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4691 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4692 match
->mgmt_status
, &rp
, sizeof(rp
));
4694 hci_conn_drop(conn
);
4696 mgmt_pending_remove(cmd
);
4699 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 status
)
4701 struct hci_cp_read_rssi
*cp
;
4702 struct hci_conn
*conn
;
4703 struct cmd_conn_lookup match
;
4706 BT_DBG("status 0x%02x", status
);
4710 /* TX power data is valid in case request completed successfully,
4711 * otherwise we assume it's not valid. At the moment we assume that
4712 * either both or none of current and max values are valid to keep code
4715 match
.valid_tx_power
= !status
;
4717 /* Commands sent in request are either Read RSSI or Read Transmit Power
4718 * Level so we check which one was last sent to retrieve connection
4719 * handle. Both commands have handle as first parameter so it's safe to
4720 * cast data on the same command struct.
4722 * First command sent is always Read RSSI and we fail only if it fails.
4723 * In other case we simply override error to indicate success as we
4724 * already remembered if TX power value is actually valid.
4726 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4728 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4733 BT_ERR("invalid sent_cmd in response");
4737 handle
= __le16_to_cpu(cp
->handle
);
4738 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4740 BT_ERR("unknown handle (%d) in response", handle
);
4745 match
.mgmt_status
= mgmt_status(status
);
4747 /* Cache refresh is complete, now reply for mgmt request for given
4750 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO
, hdev
,
4751 get_conn_info_complete
, &match
);
4754 hci_dev_unlock(hdev
);
4757 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4760 struct mgmt_cp_get_conn_info
*cp
= data
;
4761 struct mgmt_rp_get_conn_info rp
;
4762 struct hci_conn
*conn
;
4763 unsigned long conn_info_age
;
4766 BT_DBG("%s", hdev
->name
);
4768 memset(&rp
, 0, sizeof(rp
));
4769 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4770 rp
.addr
.type
= cp
->addr
.type
;
4772 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4773 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4774 MGMT_STATUS_INVALID_PARAMS
,
4779 if (!hdev_is_powered(hdev
)) {
4780 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4781 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
4785 if (cp
->addr
.type
== BDADDR_BREDR
)
4786 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4789 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
4791 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4792 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4793 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
4797 /* To avoid client trying to guess when to poll again for information we
4798 * calculate conn info age as random value between min/max set in hdev.
4800 conn_info_age
= hdev
->conn_info_min_age
+
4801 prandom_u32_max(hdev
->conn_info_max_age
-
4802 hdev
->conn_info_min_age
);
4804 /* Query controller to refresh cached values if they are too old or were
4807 if (time_after(jiffies
, conn
->conn_info_timestamp
+
4808 msecs_to_jiffies(conn_info_age
)) ||
4809 !conn
->conn_info_timestamp
) {
4810 struct hci_request req
;
4811 struct hci_cp_read_tx_power req_txp_cp
;
4812 struct hci_cp_read_rssi req_rssi_cp
;
4813 struct pending_cmd
*cmd
;
4815 hci_req_init(&req
, hdev
);
4816 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
4817 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
4820 /* For LE links TX power does not change thus we don't need to
4821 * query for it once value is known.
4823 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
4824 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
4825 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4826 req_txp_cp
.type
= 0x00;
4827 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4828 sizeof(req_txp_cp
), &req_txp_cp
);
4831 /* Max TX power needs to be read only once per connection */
4832 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
4833 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4834 req_txp_cp
.type
= 0x01;
4835 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4836 sizeof(req_txp_cp
), &req_txp_cp
);
4839 err
= hci_req_run(&req
, conn_info_refresh_complete
);
4843 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
4850 hci_conn_hold(conn
);
4851 cmd
->user_data
= conn
;
4853 conn
->conn_info_timestamp
= jiffies
;
4855 /* Cache is valid, just reply with values cached in hci_conn */
4856 rp
.rssi
= conn
->rssi
;
4857 rp
.tx_power
= conn
->tx_power
;
4858 rp
.max_tx_power
= conn
->max_tx_power
;
4860 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4861 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
4865 hci_dev_unlock(hdev
);
4869 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
)
4871 struct mgmt_cp_get_clock_info
*cp
;
4872 struct mgmt_rp_get_clock_info rp
;
4873 struct hci_cp_read_clock
*hci_cp
;
4874 struct pending_cmd
*cmd
;
4875 struct hci_conn
*conn
;
4877 BT_DBG("%s status %u", hdev
->name
, status
);
4881 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
4885 if (hci_cp
->which
) {
4886 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
4887 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4892 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
4898 memset(&rp
, 0, sizeof(rp
));
4899 memcpy(&rp
.addr
, &cp
->addr
, sizeof(rp
.addr
));
4904 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
4907 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
4908 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
4912 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
4914 mgmt_pending_remove(cmd
);
4916 hci_conn_drop(conn
);
4919 hci_dev_unlock(hdev
);
4922 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4925 struct mgmt_cp_get_clock_info
*cp
= data
;
4926 struct mgmt_rp_get_clock_info rp
;
4927 struct hci_cp_read_clock hci_cp
;
4928 struct pending_cmd
*cmd
;
4929 struct hci_request req
;
4930 struct hci_conn
*conn
;
4933 BT_DBG("%s", hdev
->name
);
4935 memset(&rp
, 0, sizeof(rp
));
4936 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4937 rp
.addr
.type
= cp
->addr
.type
;
4939 if (cp
->addr
.type
!= BDADDR_BREDR
)
4940 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
4941 MGMT_STATUS_INVALID_PARAMS
,
4946 if (!hdev_is_powered(hdev
)) {
4947 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
4948 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
4952 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4953 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4955 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4956 err
= cmd_complete(sk
, hdev
->id
,
4957 MGMT_OP_GET_CLOCK_INFO
,
4958 MGMT_STATUS_NOT_CONNECTED
,
4966 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
4972 hci_req_init(&req
, hdev
);
4974 memset(&hci_cp
, 0, sizeof(hci_cp
));
4975 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
4978 hci_conn_hold(conn
);
4979 cmd
->user_data
= conn
;
4981 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
4982 hci_cp
.which
= 0x01; /* Piconet clock */
4983 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
4986 err
= hci_req_run(&req
, get_clock_info_complete
);
4988 mgmt_pending_remove(cmd
);
4991 hci_dev_unlock(hdev
);
4995 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
4996 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
4998 struct mgmt_ev_device_added ev
;
5000 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5001 ev
.addr
.type
= type
;
5004 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5007 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5008 void *data
, u16 len
)
5010 struct mgmt_cp_add_device
*cp
= data
;
5011 u8 auto_conn
, addr_type
;
5014 BT_DBG("%s", hdev
->name
);
5016 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5017 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5018 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5019 MGMT_STATUS_INVALID_PARAMS
,
5020 &cp
->addr
, sizeof(cp
->addr
));
5022 if (cp
->action
!= 0x00 && cp
->action
!= 0x01)
5023 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5024 MGMT_STATUS_INVALID_PARAMS
,
5025 &cp
->addr
, sizeof(cp
->addr
));
5029 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5030 addr_type
= ADDR_LE_DEV_PUBLIC
;
5032 addr_type
= ADDR_LE_DEV_RANDOM
;
5035 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5037 auto_conn
= HCI_AUTO_CONN_DISABLED
;
5039 /* If the connection parameters don't exist for this device,
5040 * they will be created and configured with defaults.
5042 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5044 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5046 &cp
->addr
, sizeof(cp
->addr
));
5050 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5052 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5053 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5056 hci_dev_unlock(hdev
);
5060 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5061 bdaddr_t
*bdaddr
, u8 type
)
5063 struct mgmt_ev_device_removed ev
;
5065 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5066 ev
.addr
.type
= type
;
5068 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5071 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5072 void *data
, u16 len
)
5074 struct mgmt_cp_remove_device
*cp
= data
;
5077 BT_DBG("%s", hdev
->name
);
5081 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5084 if (!bdaddr_type_is_le(cp
->addr
.type
)) {
5085 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5086 MGMT_STATUS_INVALID_PARAMS
,
5087 &cp
->addr
, sizeof(cp
->addr
));
5091 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5092 addr_type
= ADDR_LE_DEV_PUBLIC
;
5094 addr_type
= ADDR_LE_DEV_RANDOM
;
5096 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
5098 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5100 if (cp
->addr
.type
) {
5101 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5102 MGMT_STATUS_INVALID_PARAMS
,
5103 &cp
->addr
, sizeof(cp
->addr
));
5107 hci_conn_params_clear_all(hdev
);
5110 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5111 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5114 hci_dev_unlock(hdev
);
5118 static const struct mgmt_handler
{
5119 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5123 } mgmt_handlers
[] = {
5124 { NULL
}, /* 0x0000 (no command) */
5125 { read_version
, false, MGMT_READ_VERSION_SIZE
},
5126 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
5127 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
5128 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
5129 { set_powered
, false, MGMT_SETTING_SIZE
},
5130 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
5131 { set_connectable
, false, MGMT_SETTING_SIZE
},
5132 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
5133 { set_pairable
, false, MGMT_SETTING_SIZE
},
5134 { set_link_security
, false, MGMT_SETTING_SIZE
},
5135 { set_ssp
, false, MGMT_SETTING_SIZE
},
5136 { set_hs
, false, MGMT_SETTING_SIZE
},
5137 { set_le
, false, MGMT_SETTING_SIZE
},
5138 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
5139 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
5140 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
5141 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
5142 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
5143 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
5144 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
5145 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
5146 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
5147 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
5148 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
5149 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
5150 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
5151 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
5152 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
5153 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
5154 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
5155 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
5156 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
5157 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
5158 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
5159 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
5160 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
5161 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
5162 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
5163 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
5164 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
5165 { set_advertising
, false, MGMT_SETTING_SIZE
},
5166 { set_bredr
, false, MGMT_SETTING_SIZE
},
5167 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
5168 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
5169 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
5170 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
5171 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
5172 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
5173 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
5174 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
5175 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
5176 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
5179 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
5183 struct mgmt_hdr
*hdr
;
5184 u16 opcode
, index
, len
;
5185 struct hci_dev
*hdev
= NULL
;
5186 const struct mgmt_handler
*handler
;
5189 BT_DBG("got %zu bytes", msglen
);
5191 if (msglen
< sizeof(*hdr
))
5194 buf
= kmalloc(msglen
, GFP_KERNEL
);
5198 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
5204 opcode
= __le16_to_cpu(hdr
->opcode
);
5205 index
= __le16_to_cpu(hdr
->index
);
5206 len
= __le16_to_cpu(hdr
->len
);
5208 if (len
!= msglen
- sizeof(*hdr
)) {
5213 if (index
!= MGMT_INDEX_NONE
) {
5214 hdev
= hci_dev_get(index
);
5216 err
= cmd_status(sk
, index
, opcode
,
5217 MGMT_STATUS_INVALID_INDEX
);
5221 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5222 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) ||
5223 test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
)) {
5224 err
= cmd_status(sk
, index
, opcode
,
5225 MGMT_STATUS_INVALID_INDEX
);
5230 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
5231 mgmt_handlers
[opcode
].func
== NULL
) {
5232 BT_DBG("Unknown op %u", opcode
);
5233 err
= cmd_status(sk
, index
, opcode
,
5234 MGMT_STATUS_UNKNOWN_COMMAND
);
5238 if ((hdev
&& opcode
< MGMT_OP_READ_INFO
) ||
5239 (!hdev
&& opcode
>= MGMT_OP_READ_INFO
)) {
5240 err
= cmd_status(sk
, index
, opcode
,
5241 MGMT_STATUS_INVALID_INDEX
);
5245 handler
= &mgmt_handlers
[opcode
];
5247 if ((handler
->var_len
&& len
< handler
->data_len
) ||
5248 (!handler
->var_len
&& len
!= handler
->data_len
)) {
5249 err
= cmd_status(sk
, index
, opcode
,
5250 MGMT_STATUS_INVALID_PARAMS
);
5255 mgmt_init_hdev(sk
, hdev
);
5257 cp
= buf
+ sizeof(*hdr
);
5259 err
= handler
->func(sk
, hdev
, cp
, len
);
5273 void mgmt_index_added(struct hci_dev
*hdev
)
5275 if (hdev
->dev_type
!= HCI_BREDR
)
5278 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
5281 void mgmt_index_removed(struct hci_dev
*hdev
)
5283 u8 status
= MGMT_STATUS_INVALID_INDEX
;
5285 if (hdev
->dev_type
!= HCI_BREDR
)
5288 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
5290 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
5293 /* This function requires the caller holds hdev->lock */
5294 static void restart_le_auto_conns(struct hci_dev
*hdev
)
5296 struct hci_conn_params
*p
;
5299 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
5300 if (p
->auto_connect
== HCI_AUTO_CONN_ALWAYS
) {
5301 hci_pend_le_conn_add(hdev
, &p
->addr
, p
->addr_type
);
5306 /* Calling hci_pend_le_conn_add will actually already trigger
5307 * background scanning when needed. So no need to trigger it
5308 * just another time.
5310 * This check is here to avoid an unneeded restart of the
5311 * passive scanning. Since this is during the controller
5312 * power up phase the duplicate filtering is not an issue.
5317 hci_update_background_scan(hdev
);
5320 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
5322 struct cmd_lookup match
= { NULL
, hdev
};
5324 BT_DBG("status 0x%02x", status
);
5328 restart_le_auto_conns(hdev
);
5330 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5332 new_settings(hdev
, match
.sk
);
5334 hci_dev_unlock(hdev
);
5340 static int powered_update_hci(struct hci_dev
*hdev
)
5342 struct hci_request req
;
5345 hci_req_init(&req
, hdev
);
5347 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
5348 !lmp_host_ssp_capable(hdev
)) {
5351 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
5354 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
5355 lmp_bredr_capable(hdev
)) {
5356 struct hci_cp_write_le_host_supported cp
;
5359 cp
.simul
= lmp_le_br_capable(hdev
);
5361 /* Check first if we already have the right
5362 * host state (host features set)
5364 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
5365 cp
.simul
!= lmp_host_le_br_capable(hdev
))
5366 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
5370 if (lmp_le_capable(hdev
)) {
5371 /* Make sure the controller has a good default for
5372 * advertising data. This also applies to the case
5373 * where BR/EDR was toggled during the AUTO_OFF phase.
5375 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
5376 update_adv_data(&req
);
5377 update_scan_rsp_data(&req
);
5380 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5381 enable_advertising(&req
);
5384 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
5385 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
5386 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
5387 sizeof(link_sec
), &link_sec
);
5389 if (lmp_bredr_capable(hdev
)) {
5390 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5391 set_bredr_scan(&req
);
5397 return hci_req_run(&req
, powered_complete
);
5400 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
5402 struct cmd_lookup match
= { NULL
, hdev
};
5403 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
5404 u8 zero_cod
[] = { 0, 0, 0 };
5407 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
5411 if (powered_update_hci(hdev
) == 0)
5414 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
5419 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5420 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
5422 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
5423 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
5424 zero_cod
, sizeof(zero_cod
), NULL
);
5427 err
= new_settings(hdev
, match
.sk
);
5435 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
5437 struct pending_cmd
*cmd
;
5440 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5444 if (err
== -ERFKILL
)
5445 status
= MGMT_STATUS_RFKILLED
;
5447 status
= MGMT_STATUS_FAILED
;
5449 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
5451 mgmt_pending_remove(cmd
);
5454 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
5456 struct hci_request req
;
5460 /* When discoverable timeout triggers, then just make sure
5461 * the limited discoverable flag is cleared. Even in the case
5462 * of a timeout triggered from general discoverable, it is
5463 * safe to unconditionally clear the flag.
5465 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5466 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5468 hci_req_init(&req
, hdev
);
5469 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
5470 u8 scan
= SCAN_PAGE
;
5471 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
5472 sizeof(scan
), &scan
);
5475 update_adv_data(&req
);
5476 hci_req_run(&req
, NULL
);
5478 hdev
->discov_timeout
= 0;
5480 new_settings(hdev
, NULL
);
5482 hci_dev_unlock(hdev
);
5485 void mgmt_discoverable(struct hci_dev
*hdev
, u8 discoverable
)
5489 /* Nothing needed here if there's a pending command since that
5490 * commands request completion callback takes care of everything
5493 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
5496 /* Powering off may clear the scan mode - don't let that interfere */
5497 if (!discoverable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5501 changed
= !test_and_set_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5503 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5504 changed
= test_and_clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5508 struct hci_request req
;
5510 /* In case this change in discoverable was triggered by
5511 * a disabling of connectable there could be a need to
5512 * update the advertising flags.
5514 hci_req_init(&req
, hdev
);
5515 update_adv_data(&req
);
5516 hci_req_run(&req
, NULL
);
5518 new_settings(hdev
, NULL
);
5522 void mgmt_connectable(struct hci_dev
*hdev
, u8 connectable
)
5526 /* Nothing needed here if there's a pending command since that
5527 * commands request completion callback takes care of everything
5530 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
5533 /* Powering off may clear the scan mode - don't let that interfere */
5534 if (!connectable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5538 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5540 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5543 new_settings(hdev
, NULL
);
5546 void mgmt_advertising(struct hci_dev
*hdev
, u8 advertising
)
5548 /* Powering off may stop advertising - don't let that interfere */
5549 if (!advertising
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5553 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5555 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5558 void mgmt_write_scan_failed(struct hci_dev
*hdev
, u8 scan
, u8 status
)
5560 u8 mgmt_err
= mgmt_status(status
);
5562 if (scan
& SCAN_PAGE
)
5563 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE
, hdev
,
5564 cmd_status_rsp
, &mgmt_err
);
5566 if (scan
& SCAN_INQUIRY
)
5567 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE
, hdev
,
5568 cmd_status_rsp
, &mgmt_err
);
5571 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
5574 struct mgmt_ev_new_link_key ev
;
5576 memset(&ev
, 0, sizeof(ev
));
5578 ev
.store_hint
= persistent
;
5579 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5580 ev
.key
.addr
.type
= BDADDR_BREDR
;
5581 ev
.key
.type
= key
->type
;
5582 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
5583 ev
.key
.pin_len
= key
->pin_len
;
5585 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5588 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
5590 if (ltk
->authenticated
)
5591 return MGMT_LTK_AUTHENTICATED
;
5593 return MGMT_LTK_UNAUTHENTICATED
;
5596 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
5598 struct mgmt_ev_new_long_term_key ev
;
5600 memset(&ev
, 0, sizeof(ev
));
5602 /* Devices using resolvable or non-resolvable random addresses
5603 * without providing an indentity resolving key don't require
5604 * to store long term keys. Their addresses will change the
5607 * Only when a remote device provides an identity address
5608 * make sure the long term key is stored. If the remote
5609 * identity is known, the long term keys are internally
5610 * mapped to the identity address. So allow static random
5611 * and public addresses here.
5613 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5614 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5615 ev
.store_hint
= 0x00;
5617 ev
.store_hint
= persistent
;
5619 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5620 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
5621 ev
.key
.type
= mgmt_ltk_type(key
);
5622 ev
.key
.enc_size
= key
->enc_size
;
5623 ev
.key
.ediv
= key
->ediv
;
5624 ev
.key
.rand
= key
->rand
;
5626 if (key
->type
== SMP_LTK
)
5629 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
5631 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5634 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
5636 struct mgmt_ev_new_irk ev
;
5638 memset(&ev
, 0, sizeof(ev
));
5640 /* For identity resolving keys from devices that are already
5641 * using a public address or static random address, do not
5642 * ask for storing this key. The identity resolving key really
5643 * is only mandatory for devices using resovlable random
5646 * Storing all identity resolving keys has the downside that
5647 * they will be also loaded on next boot of they system. More
5648 * identity resolving keys, means more time during scanning is
5649 * needed to actually resolve these addresses.
5651 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
5652 ev
.store_hint
= 0x01;
5654 ev
.store_hint
= 0x00;
5656 bacpy(&ev
.rpa
, &irk
->rpa
);
5657 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
5658 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
5659 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
5661 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
5664 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
5667 struct mgmt_ev_new_csrk ev
;
5669 memset(&ev
, 0, sizeof(ev
));
5671 /* Devices using resolvable or non-resolvable random addresses
5672 * without providing an indentity resolving key don't require
5673 * to store signature resolving keys. Their addresses will change
5674 * the next time around.
5676 * Only when a remote device provides an identity address
5677 * make sure the signature resolving key is stored. So allow
5678 * static random and public addresses here.
5680 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5681 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5682 ev
.store_hint
= 0x00;
5684 ev
.store_hint
= persistent
;
5686 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
5687 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
5688 ev
.key
.master
= csrk
->master
;
5689 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
5691 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
5694 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5695 u8 bdaddr_type
, u16 min_interval
, u16 max_interval
,
5696 u16 latency
, u16 timeout
)
5698 struct mgmt_ev_new_conn_param ev
;
5700 memset(&ev
, 0, sizeof(ev
));
5701 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5702 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
5703 ev
.store_hint
= 0x00;
5704 ev
.min_interval
= cpu_to_le16(min_interval
);
5705 ev
.max_interval
= cpu_to_le16(max_interval
);
5706 ev
.latency
= cpu_to_le16(latency
);
5707 ev
.timeout
= cpu_to_le16(timeout
);
5709 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
5712 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
5715 eir
[eir_len
++] = sizeof(type
) + data_len
;
5716 eir
[eir_len
++] = type
;
5717 memcpy(&eir
[eir_len
], data
, data_len
);
5718 eir_len
+= data_len
;
5723 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5724 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
5728 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
5731 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5732 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5734 ev
->flags
= __cpu_to_le32(flags
);
5737 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
5740 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
5741 eir_len
= eir_append_data(ev
->eir
, eir_len
,
5742 EIR_CLASS_OF_DEV
, dev_class
, 3);
5744 ev
->eir_len
= cpu_to_le16(eir_len
);
5746 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
5747 sizeof(*ev
) + eir_len
, NULL
);
5750 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
5752 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
5753 struct sock
**sk
= data
;
5754 struct mgmt_rp_disconnect rp
;
5756 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5757 rp
.addr
.type
= cp
->addr
.type
;
5759 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
5765 mgmt_pending_remove(cmd
);
5768 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
5770 struct hci_dev
*hdev
= data
;
5771 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
5772 struct mgmt_rp_unpair_device rp
;
5774 memset(&rp
, 0, sizeof(rp
));
5775 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5776 rp
.addr
.type
= cp
->addr
.type
;
5778 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
5780 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
5782 mgmt_pending_remove(cmd
);
5785 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5786 u8 link_type
, u8 addr_type
, u8 reason
,
5787 bool mgmt_connected
)
5789 struct mgmt_ev_device_disconnected ev
;
5790 struct pending_cmd
*power_off
;
5791 struct sock
*sk
= NULL
;
5793 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5795 struct mgmt_mode
*cp
= power_off
->param
;
5797 /* The connection is still in hci_conn_hash so test for 1
5798 * instead of 0 to know if this is the last one.
5800 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
5801 cancel_delayed_work(&hdev
->power_off
);
5802 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5806 if (!mgmt_connected
)
5809 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
5812 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
5814 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5815 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5818 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
5823 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5827 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5828 u8 link_type
, u8 addr_type
, u8 status
)
5830 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
5831 struct mgmt_cp_disconnect
*cp
;
5832 struct mgmt_rp_disconnect rp
;
5833 struct pending_cmd
*cmd
;
5835 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5838 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
5844 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
5847 if (cp
->addr
.type
!= bdaddr_type
)
5850 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5851 rp
.addr
.type
= bdaddr_type
;
5853 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
5854 mgmt_status(status
), &rp
, sizeof(rp
));
5856 mgmt_pending_remove(cmd
);
5859 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5860 u8 addr_type
, u8 status
)
5862 struct mgmt_ev_connect_failed ev
;
5863 struct pending_cmd
*power_off
;
5865 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5867 struct mgmt_mode
*cp
= power_off
->param
;
5869 /* The connection is still in hci_conn_hash so test for 1
5870 * instead of 0 to know if this is the last one.
5872 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
5873 cancel_delayed_work(&hdev
->power_off
);
5874 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5878 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5879 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5880 ev
.status
= mgmt_status(status
);
5882 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5885 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
5887 struct mgmt_ev_pin_code_request ev
;
5889 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5890 ev
.addr
.type
= BDADDR_BREDR
;
5893 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
5896 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5899 struct pending_cmd
*cmd
;
5900 struct mgmt_rp_pin_code_reply rp
;
5902 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
5906 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5907 rp
.addr
.type
= BDADDR_BREDR
;
5909 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
5910 mgmt_status(status
), &rp
, sizeof(rp
));
5912 mgmt_pending_remove(cmd
);
5915 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5918 struct pending_cmd
*cmd
;
5919 struct mgmt_rp_pin_code_reply rp
;
5921 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
5925 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5926 rp
.addr
.type
= BDADDR_BREDR
;
5928 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
5929 mgmt_status(status
), &rp
, sizeof(rp
));
5931 mgmt_pending_remove(cmd
);
5934 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5935 u8 link_type
, u8 addr_type
, u32 value
,
5938 struct mgmt_ev_user_confirm_request ev
;
5940 BT_DBG("%s", hdev
->name
);
5942 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5943 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5944 ev
.confirm_hint
= confirm_hint
;
5945 ev
.value
= cpu_to_le32(value
);
5947 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
5951 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5952 u8 link_type
, u8 addr_type
)
5954 struct mgmt_ev_user_passkey_request ev
;
5956 BT_DBG("%s", hdev
->name
);
5958 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5959 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5961 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
5965 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5966 u8 link_type
, u8 addr_type
, u8 status
,
5969 struct pending_cmd
*cmd
;
5970 struct mgmt_rp_user_confirm_reply rp
;
5973 cmd
= mgmt_pending_find(opcode
, hdev
);
5977 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5978 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5979 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
5982 mgmt_pending_remove(cmd
);
5987 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5988 u8 link_type
, u8 addr_type
, u8 status
)
5990 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5991 status
, MGMT_OP_USER_CONFIRM_REPLY
);
5994 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5995 u8 link_type
, u8 addr_type
, u8 status
)
5997 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5999 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6002 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6003 u8 link_type
, u8 addr_type
, u8 status
)
6005 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6006 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6009 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6010 u8 link_type
, u8 addr_type
, u8 status
)
6012 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6014 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6017 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6018 u8 link_type
, u8 addr_type
, u32 passkey
,
6021 struct mgmt_ev_passkey_notify ev
;
6023 BT_DBG("%s", hdev
->name
);
6025 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6026 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6027 ev
.passkey
= __cpu_to_le32(passkey
);
6028 ev
.entered
= entered
;
6030 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6033 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6034 u8 addr_type
, u8 status
)
6036 struct mgmt_ev_auth_failed ev
;
6038 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6039 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6040 ev
.status
= mgmt_status(status
);
6042 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6045 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
6047 struct cmd_lookup match
= { NULL
, hdev
};
6051 u8 mgmt_err
= mgmt_status(status
);
6052 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
6053 cmd_status_rsp
, &mgmt_err
);
6057 if (test_bit(HCI_AUTH
, &hdev
->flags
))
6058 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
6061 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
6064 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
6068 new_settings(hdev
, match
.sk
);
6074 static void clear_eir(struct hci_request
*req
)
6076 struct hci_dev
*hdev
= req
->hdev
;
6077 struct hci_cp_write_eir cp
;
6079 if (!lmp_ext_inq_capable(hdev
))
6082 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
6084 memset(&cp
, 0, sizeof(cp
));
6086 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
6089 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6091 struct cmd_lookup match
= { NULL
, hdev
};
6092 struct hci_request req
;
6093 bool changed
= false;
6096 u8 mgmt_err
= mgmt_status(status
);
6098 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
6099 &hdev
->dev_flags
)) {
6100 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6101 new_settings(hdev
, NULL
);
6104 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
6110 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6112 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6114 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
6117 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6120 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
6123 new_settings(hdev
, match
.sk
);
6128 hci_req_init(&req
, hdev
);
6130 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
6131 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
6132 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
6133 sizeof(enable
), &enable
);
6139 hci_req_run(&req
, NULL
);
6142 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6144 struct cmd_lookup match
= { NULL
, hdev
};
6145 bool changed
= false;
6148 u8 mgmt_err
= mgmt_status(status
);
6151 if (test_and_clear_bit(HCI_SC_ENABLED
,
6153 new_settings(hdev
, NULL
);
6154 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6157 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6158 cmd_status_rsp
, &mgmt_err
);
6163 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6165 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6166 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6169 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6170 settings_rsp
, &match
);
6173 new_settings(hdev
, match
.sk
);
6179 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
6181 struct cmd_lookup
*match
= data
;
6183 if (match
->sk
== NULL
) {
6184 match
->sk
= cmd
->sk
;
6185 sock_hold(match
->sk
);
6189 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
6192 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
6194 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
6195 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
6196 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
6199 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
6206 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
6208 struct mgmt_cp_set_local_name ev
;
6209 struct pending_cmd
*cmd
;
6214 memset(&ev
, 0, sizeof(ev
));
6215 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
6216 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
6218 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
6220 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
6222 /* If this is a HCI command related to powering on the
6223 * HCI dev don't send any mgmt signals.
6225 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
6229 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
6230 cmd
? cmd
->sk
: NULL
);
6233 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
6234 u8
*randomizer192
, u8
*hash256
,
6235 u8
*randomizer256
, u8 status
)
6237 struct pending_cmd
*cmd
;
6239 BT_DBG("%s status %u", hdev
->name
, status
);
6241 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
6246 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
6247 mgmt_status(status
));
6249 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
6250 hash256
&& randomizer256
) {
6251 struct mgmt_rp_read_local_oob_ext_data rp
;
6253 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
6254 memcpy(rp
.randomizer192
, randomizer192
,
6255 sizeof(rp
.randomizer192
));
6257 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
6258 memcpy(rp
.randomizer256
, randomizer256
,
6259 sizeof(rp
.randomizer256
));
6261 cmd_complete(cmd
->sk
, hdev
->id
,
6262 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6265 struct mgmt_rp_read_local_oob_data rp
;
6267 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
6268 memcpy(rp
.randomizer
, randomizer192
,
6269 sizeof(rp
.randomizer
));
6271 cmd_complete(cmd
->sk
, hdev
->id
,
6272 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6277 mgmt_pending_remove(cmd
);
6280 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6281 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
6282 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
6285 struct mgmt_ev_device_found
*ev
= (void *) buf
;
6286 struct smp_irk
*irk
;
6289 if (!hci_discovery_active(hdev
))
6292 /* Make sure that the buffer is big enough. The 5 extra bytes
6293 * are for the potential CoD field.
6295 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
6298 memset(buf
, 0, sizeof(buf
));
6300 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
6302 bacpy(&ev
->addr
.bdaddr
, &irk
->bdaddr
);
6303 ev
->addr
.type
= link_to_bdaddr(link_type
, irk
->addr_type
);
6305 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6306 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6310 ev
->flags
= cpu_to_le32(flags
);
6313 memcpy(ev
->eir
, eir
, eir_len
);
6315 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
6316 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
6319 if (scan_rsp_len
> 0)
6320 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
6322 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
6323 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
6325 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
6328 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6329 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
6331 struct mgmt_ev_device_found
*ev
;
6332 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
6335 ev
= (struct mgmt_ev_device_found
*) buf
;
6337 memset(buf
, 0, sizeof(buf
));
6339 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6340 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6343 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
6346 ev
->eir_len
= cpu_to_le16(eir_len
);
6348 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
6351 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
6353 struct mgmt_ev_discovering ev
;
6354 struct pending_cmd
*cmd
;
6356 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
6359 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
6361 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
6364 u8 type
= hdev
->discovery
.type
;
6366 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
6368 mgmt_pending_remove(cmd
);
6371 memset(&ev
, 0, sizeof(ev
));
6372 ev
.type
= hdev
->discovery
.type
;
6373 ev
.discovering
= discovering
;
6375 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
6378 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
6380 BT_DBG("%s status %u", hdev
->name
, status
);
6382 /* Clear the advertising mgmt setting if we failed to re-enable it */
6384 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6385 new_settings(hdev
, NULL
);
6389 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
6391 struct hci_request req
;
6393 if (hci_conn_num(hdev
, LE_LINK
) > 0)
6396 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6399 hci_req_init(&req
, hdev
);
6400 enable_advertising(&req
);
6402 /* If this fails we have no option but to let user space know
6403 * that we've disabled advertising.
6405 if (hci_req_run(&req
, adv_enable_complete
) < 0) {
6406 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6407 new_settings(hdev
, NULL
);