2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
39 static const u16 mgmt_commands
[] = {
40 MGMT_OP_READ_INDEX_LIST
,
43 MGMT_OP_SET_DISCOVERABLE
,
44 MGMT_OP_SET_CONNECTABLE
,
45 MGMT_OP_SET_FAST_CONNECTABLE
,
47 MGMT_OP_SET_LINK_SECURITY
,
51 MGMT_OP_SET_DEV_CLASS
,
52 MGMT_OP_SET_LOCAL_NAME
,
55 MGMT_OP_LOAD_LINK_KEYS
,
56 MGMT_OP_LOAD_LONG_TERM_KEYS
,
58 MGMT_OP_GET_CONNECTIONS
,
59 MGMT_OP_PIN_CODE_REPLY
,
60 MGMT_OP_PIN_CODE_NEG_REPLY
,
61 MGMT_OP_SET_IO_CAPABILITY
,
63 MGMT_OP_CANCEL_PAIR_DEVICE
,
64 MGMT_OP_UNPAIR_DEVICE
,
65 MGMT_OP_USER_CONFIRM_REPLY
,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
67 MGMT_OP_USER_PASSKEY_REPLY
,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
69 MGMT_OP_READ_LOCAL_OOB_DATA
,
70 MGMT_OP_ADD_REMOTE_OOB_DATA
,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
72 MGMT_OP_START_DISCOVERY
,
73 MGMT_OP_STOP_DISCOVERY
,
76 MGMT_OP_UNBLOCK_DEVICE
,
77 MGMT_OP_SET_DEVICE_ID
,
78 MGMT_OP_SET_ADVERTISING
,
80 MGMT_OP_SET_STATIC_ADDRESS
,
81 MGMT_OP_SET_SCAN_PARAMS
,
82 MGMT_OP_SET_SECURE_CONN
,
83 MGMT_OP_SET_DEBUG_KEYS
,
88 static const u16 mgmt_events
[] = {
89 MGMT_EV_CONTROLLER_ERROR
,
91 MGMT_EV_INDEX_REMOVED
,
93 MGMT_EV_CLASS_OF_DEV_CHANGED
,
94 MGMT_EV_LOCAL_NAME_CHANGED
,
96 MGMT_EV_NEW_LONG_TERM_KEY
,
97 MGMT_EV_DEVICE_CONNECTED
,
98 MGMT_EV_DEVICE_DISCONNECTED
,
99 MGMT_EV_CONNECT_FAILED
,
100 MGMT_EV_PIN_CODE_REQUEST
,
101 MGMT_EV_USER_CONFIRM_REQUEST
,
102 MGMT_EV_USER_PASSKEY_REQUEST
,
104 MGMT_EV_DEVICE_FOUND
,
106 MGMT_EV_DEVICE_BLOCKED
,
107 MGMT_EV_DEVICE_UNBLOCKED
,
108 MGMT_EV_DEVICE_UNPAIRED
,
109 MGMT_EV_PASSKEY_NOTIFY
,
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
119 struct list_head list
;
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table
[] = {
130 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
132 MGMT_STATUS_FAILED
, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
137 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY
, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED
, /* Rejected Security */
144 MGMT_STATUS_REJECTED
, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
152 MGMT_STATUS_BUSY
, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED
, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED
, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED
, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED
, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY
, /* Role Switch Pending */
178 MGMT_STATUS_FAILED
, /* Slot Violation */
179 MGMT_STATUS_FAILED
, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY
, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
192 static u8
mgmt_status(u8 hci_status
)
194 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
195 return mgmt_status_table
[hci_status
];
197 return MGMT_STATUS_FAILED
;
200 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
203 struct mgmt_hdr
*hdr
;
204 struct mgmt_ev_cmd_status
*ev
;
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
209 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
213 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
215 hdr
->opcode
= __constant_cpu_to_le16(MGMT_EV_CMD_STATUS
);
216 hdr
->index
= cpu_to_le16(index
);
217 hdr
->len
= cpu_to_le16(sizeof(*ev
));
219 ev
= (void *) skb_put(skb
, sizeof(*ev
));
221 ev
->opcode
= cpu_to_le16(cmd
);
223 err
= sock_queue_rcv_skb(sk
, skb
);
230 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
231 void *rp
, size_t rp_len
)
234 struct mgmt_hdr
*hdr
;
235 struct mgmt_ev_cmd_complete
*ev
;
238 BT_DBG("sock %p", sk
);
240 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
244 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
246 hdr
->opcode
= __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
247 hdr
->index
= cpu_to_le16(index
);
248 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
250 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
251 ev
->opcode
= cpu_to_le16(cmd
);
255 memcpy(ev
->data
, rp
, rp_len
);
257 err
= sock_queue_rcv_skb(sk
, skb
);
264 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
267 struct mgmt_rp_read_version rp
;
269 BT_DBG("sock %p", sk
);
271 rp
.version
= MGMT_VERSION
;
272 rp
.revision
= __constant_cpu_to_le16(MGMT_REVISION
);
274 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
278 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
281 struct mgmt_rp_read_commands
*rp
;
282 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
283 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
288 BT_DBG("sock %p", sk
);
290 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
292 rp
= kmalloc(rp_size
, GFP_KERNEL
);
296 rp
->num_commands
= __constant_cpu_to_le16(num_commands
);
297 rp
->num_events
= __constant_cpu_to_le16(num_events
);
299 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
300 put_unaligned_le16(mgmt_commands
[i
], opcode
);
302 for (i
= 0; i
< num_events
; i
++, opcode
++)
303 put_unaligned_le16(mgmt_events
[i
], opcode
);
305 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
312 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
315 struct mgmt_rp_read_index_list
*rp
;
321 BT_DBG("sock %p", sk
);
323 read_lock(&hci_dev_list_lock
);
326 list_for_each_entry(d
, &hci_dev_list
, list
) {
327 if (d
->dev_type
== HCI_BREDR
)
331 rp_len
= sizeof(*rp
) + (2 * count
);
332 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
334 read_unlock(&hci_dev_list_lock
);
339 list_for_each_entry(d
, &hci_dev_list
, list
) {
340 if (test_bit(HCI_SETUP
, &d
->dev_flags
))
343 if (test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
346 if (d
->dev_type
== HCI_BREDR
) {
347 rp
->index
[count
++] = cpu_to_le16(d
->id
);
348 BT_DBG("Added hci%u", d
->id
);
352 rp
->num_controllers
= cpu_to_le16(count
);
353 rp_len
= sizeof(*rp
) + (2 * count
);
355 read_unlock(&hci_dev_list_lock
);
357 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
365 static u32
get_supported_settings(struct hci_dev
*hdev
)
369 settings
|= MGMT_SETTING_POWERED
;
370 settings
|= MGMT_SETTING_PAIRABLE
;
371 settings
|= MGMT_SETTING_DEBUG_KEYS
;
373 if (lmp_bredr_capable(hdev
)) {
374 settings
|= MGMT_SETTING_CONNECTABLE
;
375 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
376 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
377 settings
|= MGMT_SETTING_DISCOVERABLE
;
378 settings
|= MGMT_SETTING_BREDR
;
379 settings
|= MGMT_SETTING_LINK_SECURITY
;
381 if (lmp_ssp_capable(hdev
)) {
382 settings
|= MGMT_SETTING_SSP
;
383 settings
|= MGMT_SETTING_HS
;
386 if (lmp_sc_capable(hdev
) ||
387 test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
388 settings
|= MGMT_SETTING_SECURE_CONN
;
391 if (lmp_le_capable(hdev
)) {
392 settings
|= MGMT_SETTING_LE
;
393 settings
|= MGMT_SETTING_ADVERTISING
;
394 settings
|= MGMT_SETTING_PRIVACY
;
400 static u32
get_current_settings(struct hci_dev
*hdev
)
404 if (hdev_is_powered(hdev
))
405 settings
|= MGMT_SETTING_POWERED
;
407 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
408 settings
|= MGMT_SETTING_CONNECTABLE
;
410 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
411 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
413 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
414 settings
|= MGMT_SETTING_DISCOVERABLE
;
416 if (test_bit(HCI_PAIRABLE
, &hdev
->dev_flags
))
417 settings
|= MGMT_SETTING_PAIRABLE
;
419 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
420 settings
|= MGMT_SETTING_BREDR
;
422 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
423 settings
|= MGMT_SETTING_LE
;
425 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
426 settings
|= MGMT_SETTING_LINK_SECURITY
;
428 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
429 settings
|= MGMT_SETTING_SSP
;
431 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
432 settings
|= MGMT_SETTING_HS
;
434 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
435 settings
|= MGMT_SETTING_ADVERTISING
;
437 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
438 settings
|= MGMT_SETTING_SECURE_CONN
;
440 if (test_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
))
441 settings
|= MGMT_SETTING_DEBUG_KEYS
;
443 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
444 settings
|= MGMT_SETTING_PRIVACY
;
449 #define PNP_INFO_SVCLASS_ID 0x1200
451 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
453 u8
*ptr
= data
, *uuids_start
= NULL
;
454 struct bt_uuid
*uuid
;
459 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
462 if (uuid
->size
!= 16)
465 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
469 if (uuid16
== PNP_INFO_SVCLASS_ID
)
475 uuids_start
[1] = EIR_UUID16_ALL
;
479 /* Stop if not enough space to put next UUID */
480 if ((ptr
- data
) + sizeof(u16
) > len
) {
481 uuids_start
[1] = EIR_UUID16_SOME
;
485 *ptr
++ = (uuid16
& 0x00ff);
486 *ptr
++ = (uuid16
& 0xff00) >> 8;
487 uuids_start
[0] += sizeof(uuid16
);
493 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
495 u8
*ptr
= data
, *uuids_start
= NULL
;
496 struct bt_uuid
*uuid
;
501 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
502 if (uuid
->size
!= 32)
508 uuids_start
[1] = EIR_UUID32_ALL
;
512 /* Stop if not enough space to put next UUID */
513 if ((ptr
- data
) + sizeof(u32
) > len
) {
514 uuids_start
[1] = EIR_UUID32_SOME
;
518 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
520 uuids_start
[0] += sizeof(u32
);
526 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
528 u8
*ptr
= data
, *uuids_start
= NULL
;
529 struct bt_uuid
*uuid
;
534 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
535 if (uuid
->size
!= 128)
541 uuids_start
[1] = EIR_UUID128_ALL
;
545 /* Stop if not enough space to put next UUID */
546 if ((ptr
- data
) + 16 > len
) {
547 uuids_start
[1] = EIR_UUID128_SOME
;
551 memcpy(ptr
, uuid
->uuid
, 16);
553 uuids_start
[0] += 16;
559 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
561 struct pending_cmd
*cmd
;
563 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
564 if (cmd
->opcode
== opcode
)
571 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
576 name_len
= strlen(hdev
->dev_name
);
578 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
580 if (name_len
> max_len
) {
582 ptr
[1] = EIR_NAME_SHORT
;
584 ptr
[1] = EIR_NAME_COMPLETE
;
586 ptr
[0] = name_len
+ 1;
588 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
590 ad_len
+= (name_len
+ 2);
591 ptr
+= (name_len
+ 2);
597 static void update_scan_rsp_data(struct hci_request
*req
)
599 struct hci_dev
*hdev
= req
->hdev
;
600 struct hci_cp_le_set_scan_rsp_data cp
;
603 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
606 memset(&cp
, 0, sizeof(cp
));
608 len
= create_scan_rsp_data(hdev
, cp
.data
);
610 if (hdev
->scan_rsp_data_len
== len
&&
611 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
614 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
615 hdev
->scan_rsp_data_len
= len
;
619 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
622 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
624 struct pending_cmd
*cmd
;
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
629 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
631 struct mgmt_mode
*cp
= cmd
->param
;
633 return LE_AD_GENERAL
;
634 else if (cp
->val
== 0x02)
635 return LE_AD_LIMITED
;
637 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
638 return LE_AD_LIMITED
;
639 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
640 return LE_AD_GENERAL
;
646 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
648 u8 ad_len
= 0, flags
= 0;
650 flags
|= get_adv_discov_flags(hdev
);
652 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
653 flags
|= LE_AD_NO_BREDR
;
656 BT_DBG("adv flags 0x%02x", flags
);
666 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
668 ptr
[1] = EIR_TX_POWER
;
669 ptr
[2] = (u8
) hdev
->adv_tx_power
;
678 static void update_adv_data(struct hci_request
*req
)
680 struct hci_dev
*hdev
= req
->hdev
;
681 struct hci_cp_le_set_adv_data cp
;
684 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
687 memset(&cp
, 0, sizeof(cp
));
689 len
= create_adv_data(hdev
, cp
.data
);
691 if (hdev
->adv_data_len
== len
&&
692 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
695 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
696 hdev
->adv_data_len
= len
;
700 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
703 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
708 name_len
= strlen(hdev
->dev_name
);
714 ptr
[1] = EIR_NAME_SHORT
;
716 ptr
[1] = EIR_NAME_COMPLETE
;
718 /* EIR Data length */
719 ptr
[0] = name_len
+ 1;
721 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
723 ptr
+= (name_len
+ 2);
726 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
728 ptr
[1] = EIR_TX_POWER
;
729 ptr
[2] = (u8
) hdev
->inq_tx_power
;
734 if (hdev
->devid_source
> 0) {
736 ptr
[1] = EIR_DEVICE_ID
;
738 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
739 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
740 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
741 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
746 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
747 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
748 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
751 static void update_eir(struct hci_request
*req
)
753 struct hci_dev
*hdev
= req
->hdev
;
754 struct hci_cp_write_eir cp
;
756 if (!hdev_is_powered(hdev
))
759 if (!lmp_ext_inq_capable(hdev
))
762 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
765 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
768 memset(&cp
, 0, sizeof(cp
));
770 create_eir(hdev
, cp
.data
);
772 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
775 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
777 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
780 static u8
get_service_classes(struct hci_dev
*hdev
)
782 struct bt_uuid
*uuid
;
785 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
786 val
|= uuid
->svc_hint
;
791 static void update_class(struct hci_request
*req
)
793 struct hci_dev
*hdev
= req
->hdev
;
796 BT_DBG("%s", hdev
->name
);
798 if (!hdev_is_powered(hdev
))
801 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
804 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
807 cod
[0] = hdev
->minor_class
;
808 cod
[1] = hdev
->major_class
;
809 cod
[2] = get_service_classes(hdev
);
811 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
814 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
817 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
820 static u8
get_adv_type(struct hci_dev
*hdev
)
822 struct pending_cmd
*cmd
;
825 /* If there's a pending mgmt command the flag will not yet have
826 * it's final value, so check for this first.
828 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
830 struct mgmt_mode
*cp
= cmd
->param
;
831 connectable
= !!cp
->val
;
833 connectable
= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
836 return connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
839 static void enable_advertising(struct hci_request
*req
)
841 struct hci_dev
*hdev
= req
->hdev
;
842 struct hci_cp_le_set_adv_param cp
;
843 u8 own_addr_type
, enable
= 0x01;
844 bool require_privacy
;
846 require_privacy
= !test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
848 if (hci_update_random_address(req
, require_privacy
, &own_addr_type
) < 0)
851 memset(&cp
, 0, sizeof(cp
));
852 cp
.min_interval
= __constant_cpu_to_le16(0x0800);
853 cp
.max_interval
= __constant_cpu_to_le16(0x0800);
854 cp
.type
= get_adv_type(hdev
);
855 cp
.own_address_type
= own_addr_type
;
856 cp
.channel_map
= hdev
->le_adv_channel_map
;
858 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
860 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
863 static void disable_advertising(struct hci_request
*req
)
867 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
870 static void service_cache_off(struct work_struct
*work
)
872 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
874 struct hci_request req
;
876 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
879 hci_req_init(&req
, hdev
);
886 hci_dev_unlock(hdev
);
888 hci_req_run(&req
, NULL
);
891 static void rpa_expired(struct work_struct
*work
)
893 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
895 struct hci_request req
;
899 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
901 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) ||
902 hci_conn_num(hdev
, LE_LINK
) > 0)
905 /* The generation of a new RPA and programming it into the
906 * controller happens in the enable_advertising() function.
909 hci_req_init(&req
, hdev
);
911 disable_advertising(&req
);
912 enable_advertising(&req
);
914 hci_req_run(&req
, NULL
);
917 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
919 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
922 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
923 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
925 /* Non-mgmt controlled devices get this bit set
926 * implicitly so that pairing works for them, however
927 * for mgmt we require user-space to explicitly enable
930 clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
933 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
934 void *data
, u16 data_len
)
936 struct mgmt_rp_read_info rp
;
938 BT_DBG("sock %p %s", sk
, hdev
->name
);
942 memset(&rp
, 0, sizeof(rp
));
944 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
946 rp
.version
= hdev
->hci_ver
;
947 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
949 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
950 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
952 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
954 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
955 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
957 hci_dev_unlock(hdev
);
959 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
963 static void mgmt_pending_free(struct pending_cmd
*cmd
)
970 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
971 struct hci_dev
*hdev
, void *data
,
974 struct pending_cmd
*cmd
;
976 cmd
= kmalloc(sizeof(*cmd
), GFP_KERNEL
);
980 cmd
->opcode
= opcode
;
981 cmd
->index
= hdev
->id
;
983 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
990 memcpy(cmd
->param
, data
, len
);
995 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1000 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1001 void (*cb
)(struct pending_cmd
*cmd
,
1005 struct pending_cmd
*cmd
, *tmp
;
1007 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1008 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1015 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1017 list_del(&cmd
->list
);
1018 mgmt_pending_free(cmd
);
1021 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1023 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1025 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1029 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1032 struct mgmt_mode
*cp
= data
;
1033 struct pending_cmd
*cmd
;
1036 BT_DBG("request for %s", hdev
->name
);
1038 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1039 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1040 MGMT_STATUS_INVALID_PARAMS
);
1044 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1045 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1050 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1051 cancel_delayed_work(&hdev
->power_off
);
1054 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1056 err
= mgmt_powered(hdev
, 1);
1061 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1062 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1066 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1073 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1075 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1080 hci_dev_unlock(hdev
);
1084 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
1085 struct sock
*skip_sk
)
1087 struct sk_buff
*skb
;
1088 struct mgmt_hdr
*hdr
;
1090 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
1094 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
1095 hdr
->opcode
= cpu_to_le16(event
);
1097 hdr
->index
= cpu_to_le16(hdev
->id
);
1099 hdr
->index
= __constant_cpu_to_le16(MGMT_INDEX_NONE
);
1100 hdr
->len
= cpu_to_le16(data_len
);
1103 memcpy(skb_put(skb
, data_len
), data
, data_len
);
1106 __net_timestamp(skb
);
1108 hci_send_to_control(skb
, skip_sk
);
1114 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1118 ev
= cpu_to_le32(get_current_settings(hdev
));
1120 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1125 struct hci_dev
*hdev
;
1129 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1131 struct cmd_lookup
*match
= data
;
1133 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1135 list_del(&cmd
->list
);
1137 if (match
->sk
== NULL
) {
1138 match
->sk
= cmd
->sk
;
1139 sock_hold(match
->sk
);
1142 mgmt_pending_free(cmd
);
1145 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1149 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1150 mgmt_pending_remove(cmd
);
1153 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1155 if (!lmp_bredr_capable(hdev
))
1156 return MGMT_STATUS_NOT_SUPPORTED
;
1157 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1158 return MGMT_STATUS_REJECTED
;
1160 return MGMT_STATUS_SUCCESS
;
1163 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1165 if (!lmp_le_capable(hdev
))
1166 return MGMT_STATUS_NOT_SUPPORTED
;
1167 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1168 return MGMT_STATUS_REJECTED
;
1170 return MGMT_STATUS_SUCCESS
;
1173 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1175 struct pending_cmd
*cmd
;
1176 struct mgmt_mode
*cp
;
1177 struct hci_request req
;
1180 BT_DBG("status 0x%02x", status
);
1184 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1189 u8 mgmt_err
= mgmt_status(status
);
1190 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1191 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1197 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1200 if (hdev
->discov_timeout
> 0) {
1201 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1202 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1206 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1210 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1213 new_settings(hdev
, cmd
->sk
);
1215 /* When the discoverable mode gets changed, make sure
1216 * that class of device has the limited discoverable
1217 * bit correctly set.
1219 hci_req_init(&req
, hdev
);
1221 hci_req_run(&req
, NULL
);
1224 mgmt_pending_remove(cmd
);
1227 hci_dev_unlock(hdev
);
1230 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1233 struct mgmt_cp_set_discoverable
*cp
= data
;
1234 struct pending_cmd
*cmd
;
1235 struct hci_request req
;
1240 BT_DBG("request for %s", hdev
->name
);
1242 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1243 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1244 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1245 MGMT_STATUS_REJECTED
);
1247 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1248 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1249 MGMT_STATUS_INVALID_PARAMS
);
1251 timeout
= __le16_to_cpu(cp
->timeout
);
1253 /* Disabling discoverable requires that no timeout is set,
1254 * and enabling limited discoverable requires a timeout.
1256 if ((cp
->val
== 0x00 && timeout
> 0) ||
1257 (cp
->val
== 0x02 && timeout
== 0))
1258 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1259 MGMT_STATUS_INVALID_PARAMS
);
1263 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1264 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1265 MGMT_STATUS_NOT_POWERED
);
1269 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1270 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1271 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1276 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1277 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1278 MGMT_STATUS_REJECTED
);
1282 if (!hdev_is_powered(hdev
)) {
1283 bool changed
= false;
1285 /* Setting limited discoverable when powered off is
1286 * not a valid operation since it requires a timeout
1287 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1289 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1290 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1294 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1299 err
= new_settings(hdev
, sk
);
1304 /* If the current mode is the same, then just update the timeout
1305 * value with the new value. And if only the timeout gets updated,
1306 * then no need for any HCI transactions.
1308 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1309 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1310 &hdev
->dev_flags
)) {
1311 cancel_delayed_work(&hdev
->discov_off
);
1312 hdev
->discov_timeout
= timeout
;
1314 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1315 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1316 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1320 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1324 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1330 /* Cancel any potential discoverable timeout that might be
1331 * still active and store new timeout value. The arming of
1332 * the timeout happens in the complete handler.
1334 cancel_delayed_work(&hdev
->discov_off
);
1335 hdev
->discov_timeout
= timeout
;
1337 /* Limited discoverable mode */
1338 if (cp
->val
== 0x02)
1339 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1341 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1343 hci_req_init(&req
, hdev
);
1345 /* The procedure for LE-only controllers is much simpler - just
1346 * update the advertising data.
1348 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1354 struct hci_cp_write_current_iac_lap hci_cp
;
1356 if (cp
->val
== 0x02) {
1357 /* Limited discoverable mode */
1358 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1359 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1360 hci_cp
.iac_lap
[1] = 0x8b;
1361 hci_cp
.iac_lap
[2] = 0x9e;
1362 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1363 hci_cp
.iac_lap
[4] = 0x8b;
1364 hci_cp
.iac_lap
[5] = 0x9e;
1366 /* General discoverable mode */
1368 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1369 hci_cp
.iac_lap
[1] = 0x8b;
1370 hci_cp
.iac_lap
[2] = 0x9e;
1373 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1374 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1376 scan
|= SCAN_INQUIRY
;
1378 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1381 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1384 update_adv_data(&req
);
1386 err
= hci_req_run(&req
, set_discoverable_complete
);
1388 mgmt_pending_remove(cmd
);
1391 hci_dev_unlock(hdev
);
1395 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1397 struct hci_dev
*hdev
= req
->hdev
;
1398 struct hci_cp_write_page_scan_activity acp
;
1401 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1404 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1408 type
= PAGE_SCAN_TYPE_INTERLACED
;
1410 /* 160 msec page scan interval */
1411 acp
.interval
= __constant_cpu_to_le16(0x0100);
1413 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1415 /* default 1.28 sec page scan */
1416 acp
.interval
= __constant_cpu_to_le16(0x0800);
1419 acp
.window
= __constant_cpu_to_le16(0x0012);
1421 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1422 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1423 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1426 if (hdev
->page_scan_type
!= type
)
1427 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1430 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1432 struct pending_cmd
*cmd
;
1433 struct mgmt_mode
*cp
;
1436 BT_DBG("status 0x%02x", status
);
1440 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1445 u8 mgmt_err
= mgmt_status(status
);
1446 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1452 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1454 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1456 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1459 new_settings(hdev
, cmd
->sk
);
1462 mgmt_pending_remove(cmd
);
1465 hci_dev_unlock(hdev
);
1468 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1469 struct sock
*sk
, u8 val
)
1471 bool changed
= false;
1474 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1478 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1480 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1481 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1484 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1489 return new_settings(hdev
, sk
);
1494 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1497 struct mgmt_mode
*cp
= data
;
1498 struct pending_cmd
*cmd
;
1499 struct hci_request req
;
1503 BT_DBG("request for %s", hdev
->name
);
1505 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1506 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1507 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1508 MGMT_STATUS_REJECTED
);
1510 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1511 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1512 MGMT_STATUS_INVALID_PARAMS
);
1516 if (!hdev_is_powered(hdev
)) {
1517 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1521 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1522 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1523 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1528 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1534 hci_req_init(&req
, hdev
);
1536 /* If BR/EDR is not enabled and we disable advertising as a
1537 * by-product of disabling connectable, we need to update the
1538 * advertising flags.
1540 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1542 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1543 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1545 update_adv_data(&req
);
1546 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1552 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1553 hdev
->discov_timeout
> 0)
1554 cancel_delayed_work(&hdev
->discov_off
);
1557 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1560 /* If we're going from non-connectable to connectable or
1561 * vice-versa when fast connectable is enabled ensure that fast
1562 * connectable gets disabled. write_fast_connectable won't do
1563 * anything if the page scan parameters are already what they
1566 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1567 write_fast_connectable(&req
, false);
1569 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) &&
1570 hci_conn_num(hdev
, LE_LINK
) == 0) {
1571 disable_advertising(&req
);
1572 enable_advertising(&req
);
1575 err
= hci_req_run(&req
, set_connectable_complete
);
1577 mgmt_pending_remove(cmd
);
1578 if (err
== -ENODATA
)
1579 err
= set_connectable_update_settings(hdev
, sk
,
1585 hci_dev_unlock(hdev
);
1589 static int set_pairable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1592 struct mgmt_mode
*cp
= data
;
1596 BT_DBG("request for %s", hdev
->name
);
1598 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1599 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PAIRABLE
,
1600 MGMT_STATUS_INVALID_PARAMS
);
1605 changed
= !test_and_set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1607 changed
= test_and_clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1609 err
= send_settings_rsp(sk
, MGMT_OP_SET_PAIRABLE
, hdev
);
1614 err
= new_settings(hdev
, sk
);
1617 hci_dev_unlock(hdev
);
1621 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1624 struct mgmt_mode
*cp
= data
;
1625 struct pending_cmd
*cmd
;
1629 BT_DBG("request for %s", hdev
->name
);
1631 status
= mgmt_bredr_support(hdev
);
1633 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1636 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1637 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1638 MGMT_STATUS_INVALID_PARAMS
);
1642 if (!hdev_is_powered(hdev
)) {
1643 bool changed
= false;
1645 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1646 &hdev
->dev_flags
)) {
1647 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1651 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1656 err
= new_settings(hdev
, sk
);
1661 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1662 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1669 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1670 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1674 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1680 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1682 mgmt_pending_remove(cmd
);
1687 hci_dev_unlock(hdev
);
1691 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1693 struct mgmt_mode
*cp
= data
;
1694 struct pending_cmd
*cmd
;
1698 BT_DBG("request for %s", hdev
->name
);
1700 status
= mgmt_bredr_support(hdev
);
1702 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1704 if (!lmp_ssp_capable(hdev
))
1705 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1706 MGMT_STATUS_NOT_SUPPORTED
);
1708 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1709 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1710 MGMT_STATUS_INVALID_PARAMS
);
1714 if (!hdev_is_powered(hdev
)) {
1718 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
1721 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
1724 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
1727 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1730 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1735 err
= new_settings(hdev
, sk
);
1740 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
1741 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
1742 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1747 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1748 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1752 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1758 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1760 mgmt_pending_remove(cmd
);
1765 hci_dev_unlock(hdev
);
1769 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1771 struct mgmt_mode
*cp
= data
;
1776 BT_DBG("request for %s", hdev
->name
);
1778 status
= mgmt_bredr_support(hdev
);
1780 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1782 if (!lmp_ssp_capable(hdev
))
1783 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1784 MGMT_STATUS_NOT_SUPPORTED
);
1786 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
1787 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1788 MGMT_STATUS_REJECTED
);
1790 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1791 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1792 MGMT_STATUS_INVALID_PARAMS
);
1797 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1799 if (hdev_is_powered(hdev
)) {
1800 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1801 MGMT_STATUS_REJECTED
);
1805 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1808 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1813 err
= new_settings(hdev
, sk
);
1816 hci_dev_unlock(hdev
);
1820 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
1822 struct cmd_lookup match
= { NULL
, hdev
};
1825 u8 mgmt_err
= mgmt_status(status
);
1827 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1832 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1834 new_settings(hdev
, match
.sk
);
1839 /* Make sure the controller has a good default for
1840 * advertising data. Restrict the update to when LE
1841 * has actually been enabled. During power on, the
1842 * update in powered_update_hci will take care of it.
1844 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1845 struct hci_request req
;
1849 hci_req_init(&req
, hdev
);
1850 update_adv_data(&req
);
1851 update_scan_rsp_data(&req
);
1852 hci_req_run(&req
, NULL
);
1854 hci_dev_unlock(hdev
);
1858 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1860 struct mgmt_mode
*cp
= data
;
1861 struct hci_cp_write_le_host_supported hci_cp
;
1862 struct pending_cmd
*cmd
;
1863 struct hci_request req
;
1867 BT_DBG("request for %s", hdev
->name
);
1869 if (!lmp_le_capable(hdev
))
1870 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1871 MGMT_STATUS_NOT_SUPPORTED
);
1873 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1874 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1875 MGMT_STATUS_INVALID_PARAMS
);
1877 /* LE-only devices do not allow toggling LE on/off */
1878 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1879 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1880 MGMT_STATUS_REJECTED
);
1885 enabled
= lmp_host_le_capable(hdev
);
1887 if (!hdev_is_powered(hdev
) || val
== enabled
) {
1888 bool changed
= false;
1890 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1891 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
1895 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
1896 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
1900 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1905 err
= new_settings(hdev
, sk
);
1910 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
1911 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
1912 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1917 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
1923 hci_req_init(&req
, hdev
);
1925 memset(&hci_cp
, 0, sizeof(hci_cp
));
1929 hci_cp
.simul
= lmp_le_br_capable(hdev
);
1931 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1932 disable_advertising(&req
);
1935 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
1938 err
= hci_req_run(&req
, le_enable_complete
);
1940 mgmt_pending_remove(cmd
);
1943 hci_dev_unlock(hdev
);
1947 /* This is a helper function to test for pending mgmt commands that can
1948 * cause CoD or EIR HCI commands. We can only allow one such pending
1949 * mgmt command at a time since otherwise we cannot easily track what
1950 * the current values are, will be, and based on that calculate if a new
1951 * HCI command needs to be sent and if yes with what value.
1953 static bool pending_eir_or_class(struct hci_dev
*hdev
)
1955 struct pending_cmd
*cmd
;
1957 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
1958 switch (cmd
->opcode
) {
1959 case MGMT_OP_ADD_UUID
:
1960 case MGMT_OP_REMOVE_UUID
:
1961 case MGMT_OP_SET_DEV_CLASS
:
1962 case MGMT_OP_SET_POWERED
:
1970 static const u8 bluetooth_base_uuid
[] = {
1971 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1972 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1975 static u8
get_uuid_size(const u8
*uuid
)
1979 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
1982 val
= get_unaligned_le32(&uuid
[12]);
1989 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
1991 struct pending_cmd
*cmd
;
1995 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
1999 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2000 hdev
->dev_class
, 3);
2002 mgmt_pending_remove(cmd
);
2005 hci_dev_unlock(hdev
);
2008 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2010 BT_DBG("status 0x%02x", status
);
2012 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2015 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2017 struct mgmt_cp_add_uuid
*cp
= data
;
2018 struct pending_cmd
*cmd
;
2019 struct hci_request req
;
2020 struct bt_uuid
*uuid
;
2023 BT_DBG("request for %s", hdev
->name
);
2027 if (pending_eir_or_class(hdev
)) {
2028 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2033 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2039 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2040 uuid
->svc_hint
= cp
->svc_hint
;
2041 uuid
->size
= get_uuid_size(cp
->uuid
);
2043 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2045 hci_req_init(&req
, hdev
);
2050 err
= hci_req_run(&req
, add_uuid_complete
);
2052 if (err
!= -ENODATA
)
2055 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2056 hdev
->dev_class
, 3);
2060 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2069 hci_dev_unlock(hdev
);
2073 static bool enable_service_cache(struct hci_dev
*hdev
)
2075 if (!hdev_is_powered(hdev
))
2078 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2079 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2087 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2089 BT_DBG("status 0x%02x", status
);
2091 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2094 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2097 struct mgmt_cp_remove_uuid
*cp
= data
;
2098 struct pending_cmd
*cmd
;
2099 struct bt_uuid
*match
, *tmp
;
2100 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2101 struct hci_request req
;
2104 BT_DBG("request for %s", hdev
->name
);
2108 if (pending_eir_or_class(hdev
)) {
2109 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2114 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2115 hci_uuids_clear(hdev
);
2117 if (enable_service_cache(hdev
)) {
2118 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2119 0, hdev
->dev_class
, 3);
2128 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2129 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2132 list_del(&match
->list
);
2138 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2139 MGMT_STATUS_INVALID_PARAMS
);
2144 hci_req_init(&req
, hdev
);
2149 err
= hci_req_run(&req
, remove_uuid_complete
);
2151 if (err
!= -ENODATA
)
2154 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2155 hdev
->dev_class
, 3);
2159 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2168 hci_dev_unlock(hdev
);
2172 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2174 BT_DBG("status 0x%02x", status
);
2176 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2179 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2182 struct mgmt_cp_set_dev_class
*cp
= data
;
2183 struct pending_cmd
*cmd
;
2184 struct hci_request req
;
2187 BT_DBG("request for %s", hdev
->name
);
2189 if (!lmp_bredr_capable(hdev
))
2190 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2191 MGMT_STATUS_NOT_SUPPORTED
);
2195 if (pending_eir_or_class(hdev
)) {
2196 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2201 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2202 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2203 MGMT_STATUS_INVALID_PARAMS
);
2207 hdev
->major_class
= cp
->major
;
2208 hdev
->minor_class
= cp
->minor
;
2210 if (!hdev_is_powered(hdev
)) {
2211 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2212 hdev
->dev_class
, 3);
2216 hci_req_init(&req
, hdev
);
2218 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2219 hci_dev_unlock(hdev
);
2220 cancel_delayed_work_sync(&hdev
->service_cache
);
2227 err
= hci_req_run(&req
, set_class_complete
);
2229 if (err
!= -ENODATA
)
2232 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2233 hdev
->dev_class
, 3);
2237 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2246 hci_dev_unlock(hdev
);
2250 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2253 struct mgmt_cp_load_link_keys
*cp
= data
;
2254 u16 key_count
, expected_len
;
2258 BT_DBG("request for %s", hdev
->name
);
2260 if (!lmp_bredr_capable(hdev
))
2261 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2262 MGMT_STATUS_NOT_SUPPORTED
);
2264 key_count
= __le16_to_cpu(cp
->key_count
);
2266 expected_len
= sizeof(*cp
) + key_count
*
2267 sizeof(struct mgmt_link_key_info
);
2268 if (expected_len
!= len
) {
2269 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2271 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2272 MGMT_STATUS_INVALID_PARAMS
);
2275 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2276 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2277 MGMT_STATUS_INVALID_PARAMS
);
2279 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2282 for (i
= 0; i
< key_count
; i
++) {
2283 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2285 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2286 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2287 MGMT_STATUS_INVALID_PARAMS
);
2292 hci_link_keys_clear(hdev
);
2295 changed
= !test_and_set_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
2297 changed
= test_and_clear_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
2300 new_settings(hdev
, NULL
);
2302 for (i
= 0; i
< key_count
; i
++) {
2303 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2305 hci_add_link_key(hdev
, NULL
, 0, &key
->addr
.bdaddr
, key
->val
,
2306 key
->type
, key
->pin_len
);
2309 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2311 hci_dev_unlock(hdev
);
2316 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2317 u8 addr_type
, struct sock
*skip_sk
)
2319 struct mgmt_ev_device_unpaired ev
;
2321 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2322 ev
.addr
.type
= addr_type
;
2324 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2328 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2331 struct mgmt_cp_unpair_device
*cp
= data
;
2332 struct mgmt_rp_unpair_device rp
;
2333 struct hci_cp_disconnect dc
;
2334 struct pending_cmd
*cmd
;
2335 struct hci_conn
*conn
;
2338 memset(&rp
, 0, sizeof(rp
));
2339 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2340 rp
.addr
.type
= cp
->addr
.type
;
2342 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2343 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2344 MGMT_STATUS_INVALID_PARAMS
,
2347 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2348 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2349 MGMT_STATUS_INVALID_PARAMS
,
2354 if (!hdev_is_powered(hdev
)) {
2355 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2356 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2360 if (cp
->addr
.type
== BDADDR_BREDR
) {
2361 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2365 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2366 addr_type
= ADDR_LE_DEV_PUBLIC
;
2368 addr_type
= ADDR_LE_DEV_RANDOM
;
2370 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2372 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2376 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2377 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2381 if (cp
->disconnect
) {
2382 if (cp
->addr
.type
== BDADDR_BREDR
)
2383 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2386 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2393 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2395 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2399 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2406 dc
.handle
= cpu_to_le16(conn
->handle
);
2407 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2408 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2410 mgmt_pending_remove(cmd
);
2413 hci_dev_unlock(hdev
);
2417 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2420 struct mgmt_cp_disconnect
*cp
= data
;
2421 struct mgmt_rp_disconnect rp
;
2422 struct hci_cp_disconnect dc
;
2423 struct pending_cmd
*cmd
;
2424 struct hci_conn
*conn
;
2429 memset(&rp
, 0, sizeof(rp
));
2430 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2431 rp
.addr
.type
= cp
->addr
.type
;
2433 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2434 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2435 MGMT_STATUS_INVALID_PARAMS
,
2440 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2441 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2442 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2446 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2447 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2448 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2452 if (cp
->addr
.type
== BDADDR_BREDR
)
2453 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2456 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2458 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2459 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2460 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2464 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2470 dc
.handle
= cpu_to_le16(conn
->handle
);
2471 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2473 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2475 mgmt_pending_remove(cmd
);
2478 hci_dev_unlock(hdev
);
2482 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2484 switch (link_type
) {
2486 switch (addr_type
) {
2487 case ADDR_LE_DEV_PUBLIC
:
2488 return BDADDR_LE_PUBLIC
;
2491 /* Fallback to LE Random address type */
2492 return BDADDR_LE_RANDOM
;
2496 /* Fallback to BR/EDR type */
2497 return BDADDR_BREDR
;
2501 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2504 struct mgmt_rp_get_connections
*rp
;
2514 if (!hdev_is_powered(hdev
)) {
2515 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2516 MGMT_STATUS_NOT_POWERED
);
2521 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2522 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2526 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2527 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2534 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2535 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2537 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2538 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2539 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2544 rp
->conn_count
= cpu_to_le16(i
);
2546 /* Recalculate length in case of filtered SCO connections, etc */
2547 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2549 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2555 hci_dev_unlock(hdev
);
2559 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2560 struct mgmt_cp_pin_code_neg_reply
*cp
)
2562 struct pending_cmd
*cmd
;
2565 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2570 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2571 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2573 mgmt_pending_remove(cmd
);
2578 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2581 struct hci_conn
*conn
;
2582 struct mgmt_cp_pin_code_reply
*cp
= data
;
2583 struct hci_cp_pin_code_reply reply
;
2584 struct pending_cmd
*cmd
;
2591 if (!hdev_is_powered(hdev
)) {
2592 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2593 MGMT_STATUS_NOT_POWERED
);
2597 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2599 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2600 MGMT_STATUS_NOT_CONNECTED
);
2604 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2605 struct mgmt_cp_pin_code_neg_reply ncp
;
2607 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2609 BT_ERR("PIN code is not 16 bytes long");
2611 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2613 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2614 MGMT_STATUS_INVALID_PARAMS
);
2619 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2625 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2626 reply
.pin_len
= cp
->pin_len
;
2627 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2629 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2631 mgmt_pending_remove(cmd
);
2634 hci_dev_unlock(hdev
);
2638 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2641 struct mgmt_cp_set_io_capability
*cp
= data
;
2647 hdev
->io_capability
= cp
->io_capability
;
2649 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2650 hdev
->io_capability
);
2652 hci_dev_unlock(hdev
);
2654 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
2658 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
2660 struct hci_dev
*hdev
= conn
->hdev
;
2661 struct pending_cmd
*cmd
;
2663 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2664 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2667 if (cmd
->user_data
!= conn
)
2676 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
2678 struct mgmt_rp_pair_device rp
;
2679 struct hci_conn
*conn
= cmd
->user_data
;
2681 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2682 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2684 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
2687 /* So we don't get further callbacks for this connection */
2688 conn
->connect_cfm_cb
= NULL
;
2689 conn
->security_cfm_cb
= NULL
;
2690 conn
->disconn_cfm_cb
= NULL
;
2692 hci_conn_drop(conn
);
2694 mgmt_pending_remove(cmd
);
2697 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2699 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2700 struct pending_cmd
*cmd
;
2702 cmd
= find_pairing(conn
);
2704 pairing_complete(cmd
, status
);
2707 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2709 struct pending_cmd
*cmd
;
2711 BT_DBG("status %u", status
);
2713 cmd
= find_pairing(conn
);
2715 BT_DBG("Unable to find a pending command");
2717 pairing_complete(cmd
, mgmt_status(status
));
2720 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2722 struct pending_cmd
*cmd
;
2724 BT_DBG("status %u", status
);
2729 cmd
= find_pairing(conn
);
2731 BT_DBG("Unable to find a pending command");
2733 pairing_complete(cmd
, mgmt_status(status
));
2736 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2739 struct mgmt_cp_pair_device
*cp
= data
;
2740 struct mgmt_rp_pair_device rp
;
2741 struct pending_cmd
*cmd
;
2742 u8 sec_level
, auth_type
;
2743 struct hci_conn
*conn
;
2748 memset(&rp
, 0, sizeof(rp
));
2749 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2750 rp
.addr
.type
= cp
->addr
.type
;
2752 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2753 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2754 MGMT_STATUS_INVALID_PARAMS
,
2759 if (!hdev_is_powered(hdev
)) {
2760 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2761 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2765 sec_level
= BT_SECURITY_MEDIUM
;
2766 if (cp
->io_cap
== 0x03)
2767 auth_type
= HCI_AT_DEDICATED_BONDING
;
2769 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
2771 if (cp
->addr
.type
== BDADDR_BREDR
)
2772 conn
= hci_connect(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
,
2773 cp
->addr
.type
, sec_level
, auth_type
);
2775 conn
= hci_connect(hdev
, LE_LINK
, &cp
->addr
.bdaddr
,
2776 cp
->addr
.type
, sec_level
, auth_type
);
2781 if (PTR_ERR(conn
) == -EBUSY
)
2782 status
= MGMT_STATUS_BUSY
;
2784 status
= MGMT_STATUS_CONNECT_FAILED
;
2786 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2792 if (conn
->connect_cfm_cb
) {
2793 hci_conn_drop(conn
);
2794 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2795 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2799 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2802 hci_conn_drop(conn
);
2806 /* For LE, just connecting isn't a proof that the pairing finished */
2807 if (cp
->addr
.type
== BDADDR_BREDR
) {
2808 conn
->connect_cfm_cb
= pairing_complete_cb
;
2809 conn
->security_cfm_cb
= pairing_complete_cb
;
2810 conn
->disconn_cfm_cb
= pairing_complete_cb
;
2812 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
2813 conn
->security_cfm_cb
= le_pairing_complete_cb
;
2814 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
2817 conn
->io_capability
= cp
->io_cap
;
2818 cmd
->user_data
= conn
;
2820 if (conn
->state
== BT_CONNECTED
&&
2821 hci_conn_security(conn
, sec_level
, auth_type
))
2822 pairing_complete(cmd
, 0);
2827 hci_dev_unlock(hdev
);
2831 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2834 struct mgmt_addr_info
*addr
= data
;
2835 struct pending_cmd
*cmd
;
2836 struct hci_conn
*conn
;
2843 if (!hdev_is_powered(hdev
)) {
2844 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2845 MGMT_STATUS_NOT_POWERED
);
2849 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
2851 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2852 MGMT_STATUS_INVALID_PARAMS
);
2856 conn
= cmd
->user_data
;
2858 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
2859 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2860 MGMT_STATUS_INVALID_PARAMS
);
2864 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
2866 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
2867 addr
, sizeof(*addr
));
2869 hci_dev_unlock(hdev
);
2873 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
2874 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
2875 u16 hci_op
, __le32 passkey
)
2877 struct pending_cmd
*cmd
;
2878 struct hci_conn
*conn
;
2883 if (!hdev_is_powered(hdev
)) {
2884 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2885 MGMT_STATUS_NOT_POWERED
, addr
,
2890 if (addr
->type
== BDADDR_BREDR
)
2891 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
2893 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
2896 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2897 MGMT_STATUS_NOT_CONNECTED
, addr
,
2902 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
2903 /* Continue with pairing via SMP */
2904 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
2907 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2908 MGMT_STATUS_SUCCESS
, addr
,
2911 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2912 MGMT_STATUS_FAILED
, addr
,
2918 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
2924 /* Continue with pairing via HCI */
2925 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
2926 struct hci_cp_user_passkey_reply cp
;
2928 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
2929 cp
.passkey
= passkey
;
2930 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
2932 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
2936 mgmt_pending_remove(cmd
);
2939 hci_dev_unlock(hdev
);
2943 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2944 void *data
, u16 len
)
2946 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
2950 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2951 MGMT_OP_PIN_CODE_NEG_REPLY
,
2952 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
2955 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2958 struct mgmt_cp_user_confirm_reply
*cp
= data
;
2962 if (len
!= sizeof(*cp
))
2963 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
2964 MGMT_STATUS_INVALID_PARAMS
);
2966 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2967 MGMT_OP_USER_CONFIRM_REPLY
,
2968 HCI_OP_USER_CONFIRM_REPLY
, 0);
2971 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2972 void *data
, u16 len
)
2974 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
2978 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2979 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
2980 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
2983 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2986 struct mgmt_cp_user_passkey_reply
*cp
= data
;
2990 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2991 MGMT_OP_USER_PASSKEY_REPLY
,
2992 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
2995 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2996 void *data
, u16 len
)
2998 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3002 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3003 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3004 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3007 static void update_name(struct hci_request
*req
)
3009 struct hci_dev
*hdev
= req
->hdev
;
3010 struct hci_cp_write_local_name cp
;
3012 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3014 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3017 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3019 struct mgmt_cp_set_local_name
*cp
;
3020 struct pending_cmd
*cmd
;
3022 BT_DBG("status 0x%02x", status
);
3026 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3033 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3034 mgmt_status(status
));
3036 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3039 mgmt_pending_remove(cmd
);
3042 hci_dev_unlock(hdev
);
3045 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3048 struct mgmt_cp_set_local_name
*cp
= data
;
3049 struct pending_cmd
*cmd
;
3050 struct hci_request req
;
3057 /* If the old values are the same as the new ones just return a
3058 * direct command complete event.
3060 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3061 !memcmp(hdev
->short_name
, cp
->short_name
,
3062 sizeof(hdev
->short_name
))) {
3063 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3068 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3070 if (!hdev_is_powered(hdev
)) {
3071 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3073 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3078 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3084 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3090 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3092 hci_req_init(&req
, hdev
);
3094 if (lmp_bredr_capable(hdev
)) {
3099 /* The name is stored in the scan response data and so
3100 * no need to udpate the advertising data here.
3102 if (lmp_le_capable(hdev
))
3103 update_scan_rsp_data(&req
);
3105 err
= hci_req_run(&req
, set_name_complete
);
3107 mgmt_pending_remove(cmd
);
3110 hci_dev_unlock(hdev
);
3114 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3115 void *data
, u16 data_len
)
3117 struct pending_cmd
*cmd
;
3120 BT_DBG("%s", hdev
->name
);
3124 if (!hdev_is_powered(hdev
)) {
3125 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3126 MGMT_STATUS_NOT_POWERED
);
3130 if (!lmp_ssp_capable(hdev
)) {
3131 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3132 MGMT_STATUS_NOT_SUPPORTED
);
3136 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3137 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3142 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3148 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3149 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3152 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3155 mgmt_pending_remove(cmd
);
3158 hci_dev_unlock(hdev
);
3162 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3163 void *data
, u16 len
)
3167 BT_DBG("%s ", hdev
->name
);
3171 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3172 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3175 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3176 cp
->hash
, cp
->randomizer
);
3178 status
= MGMT_STATUS_FAILED
;
3180 status
= MGMT_STATUS_SUCCESS
;
3182 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3183 status
, &cp
->addr
, sizeof(cp
->addr
));
3184 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3185 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3188 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3194 status
= MGMT_STATUS_FAILED
;
3196 status
= MGMT_STATUS_SUCCESS
;
3198 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3199 status
, &cp
->addr
, sizeof(cp
->addr
));
3201 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3202 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3203 MGMT_STATUS_INVALID_PARAMS
);
3206 hci_dev_unlock(hdev
);
3210 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3211 void *data
, u16 len
)
3213 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3217 BT_DBG("%s", hdev
->name
);
3221 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3223 status
= MGMT_STATUS_INVALID_PARAMS
;
3225 status
= MGMT_STATUS_SUCCESS
;
3227 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3228 status
, &cp
->addr
, sizeof(cp
->addr
));
3230 hci_dev_unlock(hdev
);
3234 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3236 struct pending_cmd
*cmd
;
3240 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3242 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3246 type
= hdev
->discovery
.type
;
3248 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3249 &type
, sizeof(type
));
3250 mgmt_pending_remove(cmd
);
3255 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3257 BT_DBG("status %d", status
);
3261 mgmt_start_discovery_failed(hdev
, status
);
3262 hci_dev_unlock(hdev
);
3267 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3268 hci_dev_unlock(hdev
);
3270 switch (hdev
->discovery
.type
) {
3271 case DISCOV_TYPE_LE
:
3272 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
3276 case DISCOV_TYPE_INTERLEAVED
:
3277 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
3278 DISCOV_INTERLEAVED_TIMEOUT
);
3281 case DISCOV_TYPE_BREDR
:
3285 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3289 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3290 void *data
, u16 len
)
3292 struct mgmt_cp_start_discovery
*cp
= data
;
3293 struct pending_cmd
*cmd
;
3294 struct hci_cp_le_set_scan_param param_cp
;
3295 struct hci_cp_le_set_scan_enable enable_cp
;
3296 struct hci_cp_inquiry inq_cp
;
3297 struct hci_request req
;
3298 /* General inquiry access code (GIAC) */
3299 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3300 u8 status
, own_addr_type
;
3303 BT_DBG("%s", hdev
->name
);
3307 if (!hdev_is_powered(hdev
)) {
3308 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3309 MGMT_STATUS_NOT_POWERED
);
3313 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3314 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3319 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3320 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3325 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3331 hdev
->discovery
.type
= cp
->type
;
3333 hci_req_init(&req
, hdev
);
3335 switch (hdev
->discovery
.type
) {
3336 case DISCOV_TYPE_BREDR
:
3337 status
= mgmt_bredr_support(hdev
);
3339 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3341 mgmt_pending_remove(cmd
);
3345 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3346 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3348 mgmt_pending_remove(cmd
);
3352 hci_inquiry_cache_flush(hdev
);
3354 memset(&inq_cp
, 0, sizeof(inq_cp
));
3355 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3356 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3357 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3360 case DISCOV_TYPE_LE
:
3361 case DISCOV_TYPE_INTERLEAVED
:
3362 status
= mgmt_le_support(hdev
);
3364 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3366 mgmt_pending_remove(cmd
);
3370 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3371 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3372 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3373 MGMT_STATUS_NOT_SUPPORTED
);
3374 mgmt_pending_remove(cmd
);
3378 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3379 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3380 MGMT_STATUS_REJECTED
);
3381 mgmt_pending_remove(cmd
);
3385 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
3386 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3388 mgmt_pending_remove(cmd
);
3392 memset(¶m_cp
, 0, sizeof(param_cp
));
3394 /* All active scans will be done with either a resolvable
3395 * private address (when privacy feature has been enabled)
3396 * or unresolvable private address.
3398 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3400 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3401 MGMT_STATUS_FAILED
);
3402 mgmt_pending_remove(cmd
);
3406 param_cp
.type
= LE_SCAN_ACTIVE
;
3407 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3408 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3409 param_cp
.own_address_type
= own_addr_type
;
3410 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3413 memset(&enable_cp
, 0, sizeof(enable_cp
));
3414 enable_cp
.enable
= LE_SCAN_ENABLE
;
3415 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3416 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3421 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3422 MGMT_STATUS_INVALID_PARAMS
);
3423 mgmt_pending_remove(cmd
);
3427 err
= hci_req_run(&req
, start_discovery_complete
);
3429 mgmt_pending_remove(cmd
);
3431 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3434 hci_dev_unlock(hdev
);
3438 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3440 struct pending_cmd
*cmd
;
3443 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3447 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3448 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3449 mgmt_pending_remove(cmd
);
3454 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3456 BT_DBG("status %d", status
);
3461 mgmt_stop_discovery_failed(hdev
, status
);
3465 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3468 hci_dev_unlock(hdev
);
3471 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3474 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3475 struct pending_cmd
*cmd
;
3476 struct hci_cp_remote_name_req_cancel cp
;
3477 struct inquiry_entry
*e
;
3478 struct hci_request req
;
3479 struct hci_cp_le_set_scan_enable enable_cp
;
3482 BT_DBG("%s", hdev
->name
);
3486 if (!hci_discovery_active(hdev
)) {
3487 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3488 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3489 sizeof(mgmt_cp
->type
));
3493 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3494 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3495 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3496 sizeof(mgmt_cp
->type
));
3500 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3506 hci_req_init(&req
, hdev
);
3508 switch (hdev
->discovery
.state
) {
3509 case DISCOVERY_FINDING
:
3510 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3511 hci_req_add(&req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
3513 cancel_delayed_work(&hdev
->le_scan_disable
);
3515 memset(&enable_cp
, 0, sizeof(enable_cp
));
3516 enable_cp
.enable
= LE_SCAN_DISABLE
;
3517 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
,
3518 sizeof(enable_cp
), &enable_cp
);
3523 case DISCOVERY_RESOLVING
:
3524 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
3527 mgmt_pending_remove(cmd
);
3528 err
= cmd_complete(sk
, hdev
->id
,
3529 MGMT_OP_STOP_DISCOVERY
, 0,
3531 sizeof(mgmt_cp
->type
));
3532 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3536 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
3537 hci_req_add(&req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
3543 BT_DBG("unknown discovery state %u", hdev
->discovery
.state
);
3545 mgmt_pending_remove(cmd
);
3546 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3547 MGMT_STATUS_FAILED
, &mgmt_cp
->type
,
3548 sizeof(mgmt_cp
->type
));
3552 err
= hci_req_run(&req
, stop_discovery_complete
);
3554 mgmt_pending_remove(cmd
);
3556 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3559 hci_dev_unlock(hdev
);
3563 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3566 struct mgmt_cp_confirm_name
*cp
= data
;
3567 struct inquiry_entry
*e
;
3570 BT_DBG("%s", hdev
->name
);
3574 if (!hci_discovery_active(hdev
)) {
3575 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3576 MGMT_STATUS_FAILED
);
3580 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3582 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3583 MGMT_STATUS_INVALID_PARAMS
);
3587 if (cp
->name_known
) {
3588 e
->name_state
= NAME_KNOWN
;
3591 e
->name_state
= NAME_NEEDED
;
3592 hci_inquiry_cache_update_resolve(hdev
, e
);
3595 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3599 hci_dev_unlock(hdev
);
3603 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3606 struct mgmt_cp_block_device
*cp
= data
;
3610 BT_DBG("%s", hdev
->name
);
3612 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3613 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3614 MGMT_STATUS_INVALID_PARAMS
,
3615 &cp
->addr
, sizeof(cp
->addr
));
3619 err
= hci_blacklist_add(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3621 status
= MGMT_STATUS_FAILED
;
3623 status
= MGMT_STATUS_SUCCESS
;
3625 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3626 &cp
->addr
, sizeof(cp
->addr
));
3628 hci_dev_unlock(hdev
);
3633 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3636 struct mgmt_cp_unblock_device
*cp
= data
;
3640 BT_DBG("%s", hdev
->name
);
3642 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3643 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3644 MGMT_STATUS_INVALID_PARAMS
,
3645 &cp
->addr
, sizeof(cp
->addr
));
3649 err
= hci_blacklist_del(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3651 status
= MGMT_STATUS_INVALID_PARAMS
;
3653 status
= MGMT_STATUS_SUCCESS
;
3655 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3656 &cp
->addr
, sizeof(cp
->addr
));
3658 hci_dev_unlock(hdev
);
3663 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3666 struct mgmt_cp_set_device_id
*cp
= data
;
3667 struct hci_request req
;
3671 BT_DBG("%s", hdev
->name
);
3673 source
= __le16_to_cpu(cp
->source
);
3675 if (source
> 0x0002)
3676 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3677 MGMT_STATUS_INVALID_PARAMS
);
3681 hdev
->devid_source
= source
;
3682 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3683 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3684 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3686 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
3688 hci_req_init(&req
, hdev
);
3690 hci_req_run(&req
, NULL
);
3692 hci_dev_unlock(hdev
);
3697 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
3699 struct cmd_lookup match
= { NULL
, hdev
};
3702 u8 mgmt_err
= mgmt_status(status
);
3704 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3705 cmd_status_rsp
, &mgmt_err
);
3709 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3712 new_settings(hdev
, match
.sk
);
3718 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3721 struct mgmt_mode
*cp
= data
;
3722 struct pending_cmd
*cmd
;
3723 struct hci_request req
;
3724 u8 val
, enabled
, status
;
3727 BT_DBG("request for %s", hdev
->name
);
3729 status
= mgmt_le_support(hdev
);
3731 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3734 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3735 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3736 MGMT_STATUS_INVALID_PARAMS
);
3741 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3743 /* The following conditions are ones which mean that we should
3744 * not do any HCI communication but directly send a mgmt
3745 * response to user space (after toggling the flag if
3748 if (!hdev_is_powered(hdev
) || val
== enabled
||
3749 hci_conn_num(hdev
, LE_LINK
) > 0) {
3750 bool changed
= false;
3752 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3753 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3757 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
3762 err
= new_settings(hdev
, sk
);
3767 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
3768 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
3769 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3774 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
3780 hci_req_init(&req
, hdev
);
3783 enable_advertising(&req
);
3785 disable_advertising(&req
);
3787 err
= hci_req_run(&req
, set_advertising_complete
);
3789 mgmt_pending_remove(cmd
);
3792 hci_dev_unlock(hdev
);
3796 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
3797 void *data
, u16 len
)
3799 struct mgmt_cp_set_static_address
*cp
= data
;
3802 BT_DBG("%s", hdev
->name
);
3804 if (!lmp_le_capable(hdev
))
3805 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3806 MGMT_STATUS_NOT_SUPPORTED
);
3808 if (hdev_is_powered(hdev
))
3809 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3810 MGMT_STATUS_REJECTED
);
3812 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
3813 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
3814 return cmd_status(sk
, hdev
->id
,
3815 MGMT_OP_SET_STATIC_ADDRESS
,
3816 MGMT_STATUS_INVALID_PARAMS
);
3818 /* Two most significant bits shall be set */
3819 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
3820 return cmd_status(sk
, hdev
->id
,
3821 MGMT_OP_SET_STATIC_ADDRESS
,
3822 MGMT_STATUS_INVALID_PARAMS
);
3827 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
3829 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
3831 hci_dev_unlock(hdev
);
3836 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
3837 void *data
, u16 len
)
3839 struct mgmt_cp_set_scan_params
*cp
= data
;
3840 __u16 interval
, window
;
3843 BT_DBG("%s", hdev
->name
);
3845 if (!lmp_le_capable(hdev
))
3846 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3847 MGMT_STATUS_NOT_SUPPORTED
);
3849 interval
= __le16_to_cpu(cp
->interval
);
3851 if (interval
< 0x0004 || interval
> 0x4000)
3852 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3853 MGMT_STATUS_INVALID_PARAMS
);
3855 window
= __le16_to_cpu(cp
->window
);
3857 if (window
< 0x0004 || window
> 0x4000)
3858 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3859 MGMT_STATUS_INVALID_PARAMS
);
3861 if (window
> interval
)
3862 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3863 MGMT_STATUS_INVALID_PARAMS
);
3867 hdev
->le_scan_interval
= interval
;
3868 hdev
->le_scan_window
= window
;
3870 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
3872 hci_dev_unlock(hdev
);
3877 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
3879 struct pending_cmd
*cmd
;
3881 BT_DBG("status 0x%02x", status
);
3885 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
3890 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3891 mgmt_status(status
));
3893 struct mgmt_mode
*cp
= cmd
->param
;
3896 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
3898 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
3900 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
3901 new_settings(hdev
, cmd
->sk
);
3904 mgmt_pending_remove(cmd
);
3907 hci_dev_unlock(hdev
);
3910 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
3911 void *data
, u16 len
)
3913 struct mgmt_mode
*cp
= data
;
3914 struct pending_cmd
*cmd
;
3915 struct hci_request req
;
3918 BT_DBG("%s", hdev
->name
);
3920 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
3921 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
3922 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3923 MGMT_STATUS_NOT_SUPPORTED
);
3925 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3926 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3927 MGMT_STATUS_INVALID_PARAMS
);
3929 if (!hdev_is_powered(hdev
))
3930 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3931 MGMT_STATUS_NOT_POWERED
);
3933 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
3934 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3935 MGMT_STATUS_REJECTED
);
3939 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
3940 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3945 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
3946 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
3951 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
3958 hci_req_init(&req
, hdev
);
3960 write_fast_connectable(&req
, cp
->val
);
3962 err
= hci_req_run(&req
, fast_connectable_complete
);
3964 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3965 MGMT_STATUS_FAILED
);
3966 mgmt_pending_remove(cmd
);
3970 hci_dev_unlock(hdev
);
3975 static void set_bredr_scan(struct hci_request
*req
)
3977 struct hci_dev
*hdev
= req
->hdev
;
3980 /* Ensure that fast connectable is disabled. This function will
3981 * not do anything if the page scan parameters are already what
3984 write_fast_connectable(req
, false);
3986 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
3988 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
3989 scan
|= SCAN_INQUIRY
;
3992 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
3995 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
3997 struct pending_cmd
*cmd
;
3999 BT_DBG("status 0x%02x", status
);
4003 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4008 u8 mgmt_err
= mgmt_status(status
);
4010 /* We need to restore the flag if related HCI commands
4013 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4015 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4017 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4018 new_settings(hdev
, cmd
->sk
);
4021 mgmt_pending_remove(cmd
);
4024 hci_dev_unlock(hdev
);
4027 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4029 struct mgmt_mode
*cp
= data
;
4030 struct pending_cmd
*cmd
;
4031 struct hci_request req
;
4034 BT_DBG("request for %s", hdev
->name
);
4036 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4037 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4038 MGMT_STATUS_NOT_SUPPORTED
);
4040 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4041 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4042 MGMT_STATUS_REJECTED
);
4044 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4045 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4046 MGMT_STATUS_INVALID_PARAMS
);
4050 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4051 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4055 if (!hdev_is_powered(hdev
)) {
4057 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4058 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4059 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4060 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4061 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4064 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4066 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4070 err
= new_settings(hdev
, sk
);
4074 /* Reject disabling when powered on */
4076 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4077 MGMT_STATUS_REJECTED
);
4081 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4082 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4087 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4093 /* We need to flip the bit already here so that update_adv_data
4094 * generates the correct flags.
4096 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4098 hci_req_init(&req
, hdev
);
4100 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4101 set_bredr_scan(&req
);
4103 /* Since only the advertising data flags will change, there
4104 * is no need to update the scan response data.
4106 update_adv_data(&req
);
4108 err
= hci_req_run(&req
, set_bredr_complete
);
4110 mgmt_pending_remove(cmd
);
4113 hci_dev_unlock(hdev
);
4117 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4118 void *data
, u16 len
)
4120 struct mgmt_mode
*cp
= data
;
4121 struct pending_cmd
*cmd
;
4125 BT_DBG("request for %s", hdev
->name
);
4127 status
= mgmt_bredr_support(hdev
);
4129 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4132 if (!lmp_sc_capable(hdev
) &&
4133 !test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
4134 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4135 MGMT_STATUS_NOT_SUPPORTED
);
4137 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4138 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4139 MGMT_STATUS_INVALID_PARAMS
);
4143 if (!hdev_is_powered(hdev
)) {
4147 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4149 if (cp
->val
== 0x02)
4150 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4152 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4154 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4156 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4159 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4164 err
= new_settings(hdev
, sk
);
4169 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4170 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4177 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4178 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4179 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4183 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4189 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4191 mgmt_pending_remove(cmd
);
4195 if (cp
->val
== 0x02)
4196 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4198 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4201 hci_dev_unlock(hdev
);
4205 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4206 void *data
, u16 len
)
4208 struct mgmt_mode
*cp
= data
;
4212 BT_DBG("request for %s", hdev
->name
);
4214 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4215 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4216 MGMT_STATUS_INVALID_PARAMS
);
4221 changed
= !test_and_set_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
4223 changed
= test_and_clear_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
4225 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4230 err
= new_settings(hdev
, sk
);
4233 hci_dev_unlock(hdev
);
4237 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4240 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4244 BT_DBG("request for %s", hdev
->name
);
4246 if (!lmp_le_capable(hdev
))
4247 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4248 MGMT_STATUS_NOT_SUPPORTED
);
4250 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4251 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4252 MGMT_STATUS_INVALID_PARAMS
);
4254 if (hdev_is_powered(hdev
))
4255 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4256 MGMT_STATUS_REJECTED
);
4260 /* If user space supports this command it is also expected to
4261 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4263 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4266 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4267 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4268 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4270 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4271 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4272 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4275 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4280 err
= new_settings(hdev
, sk
);
4283 hci_dev_unlock(hdev
);
4287 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4289 switch (irk
->addr
.type
) {
4290 case BDADDR_LE_PUBLIC
:
4293 case BDADDR_LE_RANDOM
:
4294 /* Two most significant bits shall be set */
4295 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4303 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4306 struct mgmt_cp_load_irks
*cp
= cp_data
;
4307 u16 irk_count
, expected_len
;
4310 BT_DBG("request for %s", hdev
->name
);
4312 if (!lmp_le_capable(hdev
))
4313 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4314 MGMT_STATUS_NOT_SUPPORTED
);
4316 irk_count
= __le16_to_cpu(cp
->irk_count
);
4318 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4319 if (expected_len
!= len
) {
4320 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4322 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4323 MGMT_STATUS_INVALID_PARAMS
);
4326 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4328 for (i
= 0; i
< irk_count
; i
++) {
4329 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4331 if (!irk_is_valid(key
))
4332 return cmd_status(sk
, hdev
->id
,
4334 MGMT_STATUS_INVALID_PARAMS
);
4339 hci_smp_irks_clear(hdev
);
4341 for (i
= 0; i
< irk_count
; i
++) {
4342 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4345 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4346 addr_type
= ADDR_LE_DEV_PUBLIC
;
4348 addr_type
= ADDR_LE_DEV_RANDOM
;
4350 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4354 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4356 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4358 hci_dev_unlock(hdev
);
4363 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4365 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4368 switch (key
->addr
.type
) {
4369 case BDADDR_LE_PUBLIC
:
4372 case BDADDR_LE_RANDOM
:
4373 /* Two most significant bits shall be set */
4374 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4382 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4383 void *cp_data
, u16 len
)
4385 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4386 u16 key_count
, expected_len
;
4389 BT_DBG("request for %s", hdev
->name
);
4391 if (!lmp_le_capable(hdev
))
4392 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4393 MGMT_STATUS_NOT_SUPPORTED
);
4395 key_count
= __le16_to_cpu(cp
->key_count
);
4397 expected_len
= sizeof(*cp
) + key_count
*
4398 sizeof(struct mgmt_ltk_info
);
4399 if (expected_len
!= len
) {
4400 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4402 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4403 MGMT_STATUS_INVALID_PARAMS
);
4406 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4408 for (i
= 0; i
< key_count
; i
++) {
4409 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4411 if (!ltk_is_valid(key
))
4412 return cmd_status(sk
, hdev
->id
,
4413 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4414 MGMT_STATUS_INVALID_PARAMS
);
4419 hci_smp_ltks_clear(hdev
);
4421 for (i
= 0; i
< key_count
; i
++) {
4422 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4425 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4426 addr_type
= ADDR_LE_DEV_PUBLIC
;
4428 addr_type
= ADDR_LE_DEV_RANDOM
;
4433 type
= HCI_SMP_LTK_SLAVE
;
4435 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4436 key
->type
, key
->val
, key
->enc_size
, key
->ediv
,
4440 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4443 hci_dev_unlock(hdev
);
4448 static const struct mgmt_handler
{
4449 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4453 } mgmt_handlers
[] = {
4454 { NULL
}, /* 0x0000 (no command) */
4455 { read_version
, false, MGMT_READ_VERSION_SIZE
},
4456 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
4457 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
4458 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
4459 { set_powered
, false, MGMT_SETTING_SIZE
},
4460 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
4461 { set_connectable
, false, MGMT_SETTING_SIZE
},
4462 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
4463 { set_pairable
, false, MGMT_SETTING_SIZE
},
4464 { set_link_security
, false, MGMT_SETTING_SIZE
},
4465 { set_ssp
, false, MGMT_SETTING_SIZE
},
4466 { set_hs
, false, MGMT_SETTING_SIZE
},
4467 { set_le
, false, MGMT_SETTING_SIZE
},
4468 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
4469 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
4470 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
4471 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
4472 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
4473 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
4474 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
4475 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
4476 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
4477 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
4478 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
4479 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
4480 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
4481 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
4482 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
4483 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
4484 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
4485 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
4486 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
4487 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
4488 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
4489 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
4490 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
4491 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
4492 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
4493 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
4494 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
4495 { set_advertising
, false, MGMT_SETTING_SIZE
},
4496 { set_bredr
, false, MGMT_SETTING_SIZE
},
4497 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
4498 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
4499 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
4500 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
4501 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
4502 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
4506 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
4510 struct mgmt_hdr
*hdr
;
4511 u16 opcode
, index
, len
;
4512 struct hci_dev
*hdev
= NULL
;
4513 const struct mgmt_handler
*handler
;
4516 BT_DBG("got %zu bytes", msglen
);
4518 if (msglen
< sizeof(*hdr
))
4521 buf
= kmalloc(msglen
, GFP_KERNEL
);
4525 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
4531 opcode
= __le16_to_cpu(hdr
->opcode
);
4532 index
= __le16_to_cpu(hdr
->index
);
4533 len
= __le16_to_cpu(hdr
->len
);
4535 if (len
!= msglen
- sizeof(*hdr
)) {
4540 if (index
!= MGMT_INDEX_NONE
) {
4541 hdev
= hci_dev_get(index
);
4543 err
= cmd_status(sk
, index
, opcode
,
4544 MGMT_STATUS_INVALID_INDEX
);
4548 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
4549 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4550 err
= cmd_status(sk
, index
, opcode
,
4551 MGMT_STATUS_INVALID_INDEX
);
4556 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
4557 mgmt_handlers
[opcode
].func
== NULL
) {
4558 BT_DBG("Unknown op %u", opcode
);
4559 err
= cmd_status(sk
, index
, opcode
,
4560 MGMT_STATUS_UNKNOWN_COMMAND
);
4564 if ((hdev
&& opcode
< MGMT_OP_READ_INFO
) ||
4565 (!hdev
&& opcode
>= MGMT_OP_READ_INFO
)) {
4566 err
= cmd_status(sk
, index
, opcode
,
4567 MGMT_STATUS_INVALID_INDEX
);
4571 handler
= &mgmt_handlers
[opcode
];
4573 if ((handler
->var_len
&& len
< handler
->data_len
) ||
4574 (!handler
->var_len
&& len
!= handler
->data_len
)) {
4575 err
= cmd_status(sk
, index
, opcode
,
4576 MGMT_STATUS_INVALID_PARAMS
);
4581 mgmt_init_hdev(sk
, hdev
);
4583 cp
= buf
+ sizeof(*hdr
);
4585 err
= handler
->func(sk
, hdev
, cp
, len
);
4599 void mgmt_index_added(struct hci_dev
*hdev
)
4601 if (hdev
->dev_type
!= HCI_BREDR
)
4604 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
4607 void mgmt_index_removed(struct hci_dev
*hdev
)
4609 u8 status
= MGMT_STATUS_INVALID_INDEX
;
4611 if (hdev
->dev_type
!= HCI_BREDR
)
4614 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
4616 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
4619 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
4621 struct cmd_lookup match
= { NULL
, hdev
};
4623 BT_DBG("status 0x%02x", status
);
4627 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
4629 new_settings(hdev
, match
.sk
);
4631 hci_dev_unlock(hdev
);
4637 static int powered_update_hci(struct hci_dev
*hdev
)
4639 struct hci_request req
;
4642 hci_req_init(&req
, hdev
);
4644 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
4645 !lmp_host_ssp_capable(hdev
)) {
4648 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
4651 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
4652 lmp_bredr_capable(hdev
)) {
4653 struct hci_cp_write_le_host_supported cp
;
4656 cp
.simul
= lmp_le_br_capable(hdev
);
4658 /* Check first if we already have the right
4659 * host state (host features set)
4661 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
4662 cp
.simul
!= lmp_host_le_br_capable(hdev
))
4663 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
4667 if (lmp_le_capable(hdev
)) {
4668 /* Make sure the controller has a good default for
4669 * advertising data. This also applies to the case
4670 * where BR/EDR was toggled during the AUTO_OFF phase.
4672 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
4673 update_adv_data(&req
);
4674 update_scan_rsp_data(&req
);
4677 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
4678 enable_advertising(&req
);
4681 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4682 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
4683 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
4684 sizeof(link_sec
), &link_sec
);
4686 if (lmp_bredr_capable(hdev
)) {
4687 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
4688 set_bredr_scan(&req
);
4694 return hci_req_run(&req
, powered_complete
);
4697 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
4699 struct cmd_lookup match
= { NULL
, hdev
};
4700 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
4701 u8 zero_cod
[] = { 0, 0, 0 };
4704 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
4708 if (powered_update_hci(hdev
) == 0)
4711 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
4716 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
4717 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
4719 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
4720 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
4721 zero_cod
, sizeof(zero_cod
), NULL
);
4724 err
= new_settings(hdev
, match
.sk
);
4732 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
4734 struct pending_cmd
*cmd
;
4737 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
4741 if (err
== -ERFKILL
)
4742 status
= MGMT_STATUS_RFKILLED
;
4744 status
= MGMT_STATUS_FAILED
;
4746 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
4748 mgmt_pending_remove(cmd
);
4751 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
4753 struct hci_request req
;
4757 /* When discoverable timeout triggers, then just make sure
4758 * the limited discoverable flag is cleared. Even in the case
4759 * of a timeout triggered from general discoverable, it is
4760 * safe to unconditionally clear the flag.
4762 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
4763 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4765 hci_req_init(&req
, hdev
);
4766 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4767 u8 scan
= SCAN_PAGE
;
4768 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
4769 sizeof(scan
), &scan
);
4772 update_adv_data(&req
);
4773 hci_req_run(&req
, NULL
);
4775 hdev
->discov_timeout
= 0;
4777 new_settings(hdev
, NULL
);
4779 hci_dev_unlock(hdev
);
4782 void mgmt_discoverable(struct hci_dev
*hdev
, u8 discoverable
)
4786 /* Nothing needed here if there's a pending command since that
4787 * commands request completion callback takes care of everything
4790 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
4794 changed
= !test_and_set_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4796 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
4797 changed
= test_and_clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4801 struct hci_request req
;
4803 /* In case this change in discoverable was triggered by
4804 * a disabling of connectable there could be a need to
4805 * update the advertising flags.
4807 hci_req_init(&req
, hdev
);
4808 update_adv_data(&req
);
4809 hci_req_run(&req
, NULL
);
4811 new_settings(hdev
, NULL
);
4815 void mgmt_connectable(struct hci_dev
*hdev
, u8 connectable
)
4819 /* Nothing needed here if there's a pending command since that
4820 * commands request completion callback takes care of everything
4823 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
4827 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
4829 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
4832 new_settings(hdev
, NULL
);
4835 void mgmt_advertising(struct hci_dev
*hdev
, u8 advertising
)
4838 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4840 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4843 void mgmt_write_scan_failed(struct hci_dev
*hdev
, u8 scan
, u8 status
)
4845 u8 mgmt_err
= mgmt_status(status
);
4847 if (scan
& SCAN_PAGE
)
4848 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE
, hdev
,
4849 cmd_status_rsp
, &mgmt_err
);
4851 if (scan
& SCAN_INQUIRY
)
4852 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE
, hdev
,
4853 cmd_status_rsp
, &mgmt_err
);
4856 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
4859 struct mgmt_ev_new_link_key ev
;
4861 memset(&ev
, 0, sizeof(ev
));
4863 ev
.store_hint
= persistent
;
4864 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
4865 ev
.key
.addr
.type
= BDADDR_BREDR
;
4866 ev
.key
.type
= key
->type
;
4867 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
4868 ev
.key
.pin_len
= key
->pin_len
;
4870 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
4873 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
)
4875 struct mgmt_ev_new_long_term_key ev
;
4877 memset(&ev
, 0, sizeof(ev
));
4879 /* Devices using resolvable or non-resolvable random addresses
4880 * without providing an indentity resolving key don't require
4881 * to store long term keys. Their addresses will change the
4884 * Only when a remote device provides an identity address
4885 * make sure the long term key is stored. If the remote
4886 * identity is known, the long term keys are internally
4887 * mapped to the identity address. So allow static random
4888 * and public addresses here.
4890 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
4891 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4892 ev
.store_hint
= 0x00;
4894 ev
.store_hint
= 0x01;
4896 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
4897 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
4898 ev
.key
.type
= key
->authenticated
;
4899 ev
.key
.enc_size
= key
->enc_size
;
4900 ev
.key
.ediv
= key
->ediv
;
4902 if (key
->type
== HCI_SMP_LTK
)
4905 memcpy(ev
.key
.rand
, key
->rand
, sizeof(key
->rand
));
4906 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
4908 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
4911 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
4913 struct mgmt_ev_new_irk ev
;
4915 memset(&ev
, 0, sizeof(ev
));
4917 /* For identity resolving keys from devices that are already
4918 * using a public address or static random address, do not
4919 * ask for storing this key. The identity resolving key really
4920 * is only mandatory for devices using resovlable random
4923 * Storing all identity resolving keys has the downside that
4924 * they will be also loaded on next boot of they system. More
4925 * identity resolving keys, means more time during scanning is
4926 * needed to actually resolve these addresses.
4928 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
4929 ev
.store_hint
= 0x01;
4931 ev
.store_hint
= 0x00;
4933 bacpy(&ev
.rpa
, &irk
->rpa
);
4934 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
4935 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
4936 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
4938 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
4941 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
4944 eir
[eir_len
++] = sizeof(type
) + data_len
;
4945 eir
[eir_len
++] = type
;
4946 memcpy(&eir
[eir_len
], data
, data_len
);
4947 eir_len
+= data_len
;
4952 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
4953 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
4957 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
4960 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
4961 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
4963 ev
->flags
= __cpu_to_le32(flags
);
4966 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
4969 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
4970 eir_len
= eir_append_data(ev
->eir
, eir_len
,
4971 EIR_CLASS_OF_DEV
, dev_class
, 3);
4973 ev
->eir_len
= cpu_to_le16(eir_len
);
4975 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
4976 sizeof(*ev
) + eir_len
, NULL
);
4979 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
4981 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
4982 struct sock
**sk
= data
;
4983 struct mgmt_rp_disconnect rp
;
4985 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4986 rp
.addr
.type
= cp
->addr
.type
;
4988 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
4994 mgmt_pending_remove(cmd
);
4997 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
4999 struct hci_dev
*hdev
= data
;
5000 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
5001 struct mgmt_rp_unpair_device rp
;
5003 memset(&rp
, 0, sizeof(rp
));
5004 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5005 rp
.addr
.type
= cp
->addr
.type
;
5007 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
5009 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
5011 mgmt_pending_remove(cmd
);
5014 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5015 u8 link_type
, u8 addr_type
, u8 reason
)
5017 struct mgmt_ev_device_disconnected ev
;
5018 struct sock
*sk
= NULL
;
5020 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
5023 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
5025 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5026 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5029 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
5034 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5038 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5039 u8 link_type
, u8 addr_type
, u8 status
)
5041 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
5042 struct mgmt_cp_disconnect
*cp
;
5043 struct mgmt_rp_disconnect rp
;
5044 struct pending_cmd
*cmd
;
5046 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5049 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
5055 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
5058 if (cp
->addr
.type
!= bdaddr_type
)
5061 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5062 rp
.addr
.type
= bdaddr_type
;
5064 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
5065 mgmt_status(status
), &rp
, sizeof(rp
));
5067 mgmt_pending_remove(cmd
);
5070 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5071 u8 addr_type
, u8 status
)
5073 struct mgmt_ev_connect_failed ev
;
5075 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5076 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5077 ev
.status
= mgmt_status(status
);
5079 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5082 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
5084 struct mgmt_ev_pin_code_request ev
;
5086 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5087 ev
.addr
.type
= BDADDR_BREDR
;
5090 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
5093 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5096 struct pending_cmd
*cmd
;
5097 struct mgmt_rp_pin_code_reply rp
;
5099 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
5103 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5104 rp
.addr
.type
= BDADDR_BREDR
;
5106 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
5107 mgmt_status(status
), &rp
, sizeof(rp
));
5109 mgmt_pending_remove(cmd
);
5112 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5115 struct pending_cmd
*cmd
;
5116 struct mgmt_rp_pin_code_reply rp
;
5118 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
5122 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5123 rp
.addr
.type
= BDADDR_BREDR
;
5125 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
5126 mgmt_status(status
), &rp
, sizeof(rp
));
5128 mgmt_pending_remove(cmd
);
5131 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5132 u8 link_type
, u8 addr_type
, __le32 value
,
5135 struct mgmt_ev_user_confirm_request ev
;
5137 BT_DBG("%s", hdev
->name
);
5139 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5140 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5141 ev
.confirm_hint
= confirm_hint
;
5144 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
5148 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5149 u8 link_type
, u8 addr_type
)
5151 struct mgmt_ev_user_passkey_request ev
;
5153 BT_DBG("%s", hdev
->name
);
5155 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5156 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5158 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
5162 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5163 u8 link_type
, u8 addr_type
, u8 status
,
5166 struct pending_cmd
*cmd
;
5167 struct mgmt_rp_user_confirm_reply rp
;
5170 cmd
= mgmt_pending_find(opcode
, hdev
);
5174 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5175 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5176 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
5179 mgmt_pending_remove(cmd
);
5184 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5185 u8 link_type
, u8 addr_type
, u8 status
)
5187 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5188 status
, MGMT_OP_USER_CONFIRM_REPLY
);
5191 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5192 u8 link_type
, u8 addr_type
, u8 status
)
5194 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5196 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
5199 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5200 u8 link_type
, u8 addr_type
, u8 status
)
5202 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5203 status
, MGMT_OP_USER_PASSKEY_REPLY
);
5206 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5207 u8 link_type
, u8 addr_type
, u8 status
)
5209 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5211 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
5214 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5215 u8 link_type
, u8 addr_type
, u32 passkey
,
5218 struct mgmt_ev_passkey_notify ev
;
5220 BT_DBG("%s", hdev
->name
);
5222 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5223 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5224 ev
.passkey
= __cpu_to_le32(passkey
);
5225 ev
.entered
= entered
;
5227 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
5230 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5231 u8 addr_type
, u8 status
)
5233 struct mgmt_ev_auth_failed ev
;
5235 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5236 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5237 ev
.status
= mgmt_status(status
);
5239 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5242 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
5244 struct cmd_lookup match
= { NULL
, hdev
};
5248 u8 mgmt_err
= mgmt_status(status
);
5249 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
5250 cmd_status_rsp
, &mgmt_err
);
5254 if (test_bit(HCI_AUTH
, &hdev
->flags
))
5255 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
5258 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
5261 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
5265 new_settings(hdev
, match
.sk
);
5271 static void clear_eir(struct hci_request
*req
)
5273 struct hci_dev
*hdev
= req
->hdev
;
5274 struct hci_cp_write_eir cp
;
5276 if (!lmp_ext_inq_capable(hdev
))
5279 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
5281 memset(&cp
, 0, sizeof(cp
));
5283 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
5286 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
5288 struct cmd_lookup match
= { NULL
, hdev
};
5289 struct hci_request req
;
5290 bool changed
= false;
5293 u8 mgmt_err
= mgmt_status(status
);
5295 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
5296 &hdev
->dev_flags
)) {
5297 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
5298 new_settings(hdev
, NULL
);
5301 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
5307 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
5309 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
5311 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
5314 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
5317 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
5320 new_settings(hdev
, match
.sk
);
5325 hci_req_init(&req
, hdev
);
5327 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
5332 hci_req_run(&req
, NULL
);
5335 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
5337 struct cmd_lookup match
= { NULL
, hdev
};
5338 bool changed
= false;
5341 u8 mgmt_err
= mgmt_status(status
);
5344 if (test_and_clear_bit(HCI_SC_ENABLED
,
5346 new_settings(hdev
, NULL
);
5347 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
5350 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
5351 cmd_status_rsp
, &mgmt_err
);
5356 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
5358 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
5359 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
5362 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
5363 settings_rsp
, &match
);
5366 new_settings(hdev
, match
.sk
);
5372 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
5374 struct cmd_lookup
*match
= data
;
5376 if (match
->sk
== NULL
) {
5377 match
->sk
= cmd
->sk
;
5378 sock_hold(match
->sk
);
5382 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
5385 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
5387 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
5388 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
5389 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
5392 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
5399 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
5401 struct mgmt_cp_set_local_name ev
;
5402 struct pending_cmd
*cmd
;
5407 memset(&ev
, 0, sizeof(ev
));
5408 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
5409 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
5411 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
5413 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
5415 /* If this is a HCI command related to powering on the
5416 * HCI dev don't send any mgmt signals.
5418 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5422 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
5423 cmd
? cmd
->sk
: NULL
);
5426 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
5427 u8
*randomizer192
, u8
*hash256
,
5428 u8
*randomizer256
, u8 status
)
5430 struct pending_cmd
*cmd
;
5432 BT_DBG("%s status %u", hdev
->name
, status
);
5434 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
5439 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5440 mgmt_status(status
));
5442 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
5443 hash256
&& randomizer256
) {
5444 struct mgmt_rp_read_local_oob_ext_data rp
;
5446 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
5447 memcpy(rp
.randomizer192
, randomizer192
,
5448 sizeof(rp
.randomizer192
));
5450 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
5451 memcpy(rp
.randomizer256
, randomizer256
,
5452 sizeof(rp
.randomizer256
));
5454 cmd_complete(cmd
->sk
, hdev
->id
,
5455 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
5458 struct mgmt_rp_read_local_oob_data rp
;
5460 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
5461 memcpy(rp
.randomizer
, randomizer192
,
5462 sizeof(rp
.randomizer
));
5464 cmd_complete(cmd
->sk
, hdev
->id
,
5465 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
5470 mgmt_pending_remove(cmd
);
5473 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5474 u8 addr_type
, u8
*dev_class
, s8 rssi
, u8 cfm_name
, u8
5475 ssp
, u8
*eir
, u16 eir_len
)
5478 struct mgmt_ev_device_found
*ev
= (void *) buf
;
5479 struct smp_irk
*irk
;
5482 if (!hci_discovery_active(hdev
))
5485 /* Leave 5 bytes for a potential CoD field */
5486 if (sizeof(*ev
) + eir_len
+ 5 > sizeof(buf
))
5489 memset(buf
, 0, sizeof(buf
));
5491 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
5493 bacpy(&ev
->addr
.bdaddr
, &irk
->bdaddr
);
5494 ev
->addr
.type
= link_to_bdaddr(link_type
, irk
->addr_type
);
5496 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5497 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5502 ev
->flags
|= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME
);
5504 ev
->flags
|= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING
);
5507 memcpy(ev
->eir
, eir
, eir_len
);
5509 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
5510 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
5513 ev
->eir_len
= cpu_to_le16(eir_len
);
5514 ev_size
= sizeof(*ev
) + eir_len
;
5516 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
5519 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5520 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
5522 struct mgmt_ev_device_found
*ev
;
5523 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
5526 ev
= (struct mgmt_ev_device_found
*) buf
;
5528 memset(buf
, 0, sizeof(buf
));
5530 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5531 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5534 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
5537 ev
->eir_len
= cpu_to_le16(eir_len
);
5539 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
5542 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
5544 struct mgmt_ev_discovering ev
;
5545 struct pending_cmd
*cmd
;
5547 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
5550 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
5552 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
5555 u8 type
= hdev
->discovery
.type
;
5557 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
5559 mgmt_pending_remove(cmd
);
5562 memset(&ev
, 0, sizeof(ev
));
5563 ev
.type
= hdev
->discovery
.type
;
5564 ev
.discovering
= discovering
;
5566 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
5569 int mgmt_device_blocked(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
5571 struct pending_cmd
*cmd
;
5572 struct mgmt_ev_device_blocked ev
;
5574 cmd
= mgmt_pending_find(MGMT_OP_BLOCK_DEVICE
, hdev
);
5576 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5577 ev
.addr
.type
= type
;
5579 return mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &ev
, sizeof(ev
),
5580 cmd
? cmd
->sk
: NULL
);
5583 int mgmt_device_unblocked(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
5585 struct pending_cmd
*cmd
;
5586 struct mgmt_ev_device_unblocked ev
;
5588 cmd
= mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE
, hdev
);
5590 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5591 ev
.addr
.type
= type
;
5593 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &ev
, sizeof(ev
),
5594 cmd
? cmd
->sk
: NULL
);
5597 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
5599 BT_DBG("%s status %u", hdev
->name
, status
);
5601 /* Clear the advertising mgmt setting if we failed to re-enable it */
5603 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5604 new_settings(hdev
, NULL
);
5608 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
5610 struct hci_request req
;
5612 if (hci_conn_num(hdev
, LE_LINK
) > 0)
5615 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5618 hci_req_init(&req
, hdev
);
5619 enable_advertising(&req
);
5621 /* If this fails we have no option but to let user space know
5622 * that we've disabled advertising.
5624 if (hci_req_run(&req
, adv_enable_complete
) < 0) {
5625 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5626 new_settings(hdev
, NULL
);