2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
39 static const u16 mgmt_commands
[] = {
40 MGMT_OP_READ_INDEX_LIST
,
43 MGMT_OP_SET_DISCOVERABLE
,
44 MGMT_OP_SET_CONNECTABLE
,
45 MGMT_OP_SET_FAST_CONNECTABLE
,
47 MGMT_OP_SET_LINK_SECURITY
,
51 MGMT_OP_SET_DEV_CLASS
,
52 MGMT_OP_SET_LOCAL_NAME
,
55 MGMT_OP_LOAD_LINK_KEYS
,
56 MGMT_OP_LOAD_LONG_TERM_KEYS
,
58 MGMT_OP_GET_CONNECTIONS
,
59 MGMT_OP_PIN_CODE_REPLY
,
60 MGMT_OP_PIN_CODE_NEG_REPLY
,
61 MGMT_OP_SET_IO_CAPABILITY
,
63 MGMT_OP_CANCEL_PAIR_DEVICE
,
64 MGMT_OP_UNPAIR_DEVICE
,
65 MGMT_OP_USER_CONFIRM_REPLY
,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
67 MGMT_OP_USER_PASSKEY_REPLY
,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
69 MGMT_OP_READ_LOCAL_OOB_DATA
,
70 MGMT_OP_ADD_REMOTE_OOB_DATA
,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
72 MGMT_OP_START_DISCOVERY
,
73 MGMT_OP_STOP_DISCOVERY
,
76 MGMT_OP_UNBLOCK_DEVICE
,
77 MGMT_OP_SET_DEVICE_ID
,
78 MGMT_OP_SET_ADVERTISING
,
80 MGMT_OP_SET_STATIC_ADDRESS
,
81 MGMT_OP_SET_SCAN_PARAMS
,
82 MGMT_OP_SET_SECURE_CONN
,
83 MGMT_OP_SET_DEBUG_KEYS
,
88 static const u16 mgmt_events
[] = {
89 MGMT_EV_CONTROLLER_ERROR
,
91 MGMT_EV_INDEX_REMOVED
,
93 MGMT_EV_CLASS_OF_DEV_CHANGED
,
94 MGMT_EV_LOCAL_NAME_CHANGED
,
96 MGMT_EV_NEW_LONG_TERM_KEY
,
97 MGMT_EV_DEVICE_CONNECTED
,
98 MGMT_EV_DEVICE_DISCONNECTED
,
99 MGMT_EV_CONNECT_FAILED
,
100 MGMT_EV_PIN_CODE_REQUEST
,
101 MGMT_EV_USER_CONFIRM_REQUEST
,
102 MGMT_EV_USER_PASSKEY_REQUEST
,
104 MGMT_EV_DEVICE_FOUND
,
106 MGMT_EV_DEVICE_BLOCKED
,
107 MGMT_EV_DEVICE_UNBLOCKED
,
108 MGMT_EV_DEVICE_UNPAIRED
,
109 MGMT_EV_PASSKEY_NOTIFY
,
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
119 struct list_head list
;
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table
[] = {
130 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
132 MGMT_STATUS_FAILED
, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
137 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY
, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED
, /* Rejected Security */
144 MGMT_STATUS_REJECTED
, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
152 MGMT_STATUS_BUSY
, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED
, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED
, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED
, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED
, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY
, /* Role Switch Pending */
178 MGMT_STATUS_FAILED
, /* Slot Violation */
179 MGMT_STATUS_FAILED
, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY
, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
192 static u8
mgmt_status(u8 hci_status
)
194 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
195 return mgmt_status_table
[hci_status
];
197 return MGMT_STATUS_FAILED
;
200 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
203 struct mgmt_hdr
*hdr
;
204 struct mgmt_ev_cmd_status
*ev
;
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
209 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
213 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
215 hdr
->opcode
= __constant_cpu_to_le16(MGMT_EV_CMD_STATUS
);
216 hdr
->index
= cpu_to_le16(index
);
217 hdr
->len
= cpu_to_le16(sizeof(*ev
));
219 ev
= (void *) skb_put(skb
, sizeof(*ev
));
221 ev
->opcode
= cpu_to_le16(cmd
);
223 err
= sock_queue_rcv_skb(sk
, skb
);
230 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
231 void *rp
, size_t rp_len
)
234 struct mgmt_hdr
*hdr
;
235 struct mgmt_ev_cmd_complete
*ev
;
238 BT_DBG("sock %p", sk
);
240 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
244 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
246 hdr
->opcode
= __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
247 hdr
->index
= cpu_to_le16(index
);
248 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
250 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
251 ev
->opcode
= cpu_to_le16(cmd
);
255 memcpy(ev
->data
, rp
, rp_len
);
257 err
= sock_queue_rcv_skb(sk
, skb
);
264 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
267 struct mgmt_rp_read_version rp
;
269 BT_DBG("sock %p", sk
);
271 rp
.version
= MGMT_VERSION
;
272 rp
.revision
= __constant_cpu_to_le16(MGMT_REVISION
);
274 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
278 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
281 struct mgmt_rp_read_commands
*rp
;
282 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
283 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
288 BT_DBG("sock %p", sk
);
290 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
292 rp
= kmalloc(rp_size
, GFP_KERNEL
);
296 rp
->num_commands
= __constant_cpu_to_le16(num_commands
);
297 rp
->num_events
= __constant_cpu_to_le16(num_events
);
299 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
300 put_unaligned_le16(mgmt_commands
[i
], opcode
);
302 for (i
= 0; i
< num_events
; i
++, opcode
++)
303 put_unaligned_le16(mgmt_events
[i
], opcode
);
305 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
312 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
315 struct mgmt_rp_read_index_list
*rp
;
321 BT_DBG("sock %p", sk
);
323 read_lock(&hci_dev_list_lock
);
326 list_for_each_entry(d
, &hci_dev_list
, list
) {
327 if (d
->dev_type
== HCI_BREDR
)
331 rp_len
= sizeof(*rp
) + (2 * count
);
332 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
334 read_unlock(&hci_dev_list_lock
);
339 list_for_each_entry(d
, &hci_dev_list
, list
) {
340 if (test_bit(HCI_SETUP
, &d
->dev_flags
))
343 if (test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
346 if (d
->dev_type
== HCI_BREDR
) {
347 rp
->index
[count
++] = cpu_to_le16(d
->id
);
348 BT_DBG("Added hci%u", d
->id
);
352 rp
->num_controllers
= cpu_to_le16(count
);
353 rp_len
= sizeof(*rp
) + (2 * count
);
355 read_unlock(&hci_dev_list_lock
);
357 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
365 static u32
get_supported_settings(struct hci_dev
*hdev
)
369 settings
|= MGMT_SETTING_POWERED
;
370 settings
|= MGMT_SETTING_PAIRABLE
;
371 settings
|= MGMT_SETTING_DEBUG_KEYS
;
373 if (lmp_bredr_capable(hdev
)) {
374 settings
|= MGMT_SETTING_CONNECTABLE
;
375 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
376 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
377 settings
|= MGMT_SETTING_DISCOVERABLE
;
378 settings
|= MGMT_SETTING_BREDR
;
379 settings
|= MGMT_SETTING_LINK_SECURITY
;
381 if (lmp_ssp_capable(hdev
)) {
382 settings
|= MGMT_SETTING_SSP
;
383 settings
|= MGMT_SETTING_HS
;
386 if (lmp_sc_capable(hdev
) ||
387 test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
388 settings
|= MGMT_SETTING_SECURE_CONN
;
391 if (lmp_le_capable(hdev
)) {
392 settings
|= MGMT_SETTING_LE
;
393 settings
|= MGMT_SETTING_ADVERTISING
;
394 settings
|= MGMT_SETTING_PRIVACY
;
400 static u32
get_current_settings(struct hci_dev
*hdev
)
404 if (hdev_is_powered(hdev
))
405 settings
|= MGMT_SETTING_POWERED
;
407 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
408 settings
|= MGMT_SETTING_CONNECTABLE
;
410 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
411 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
413 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
414 settings
|= MGMT_SETTING_DISCOVERABLE
;
416 if (test_bit(HCI_PAIRABLE
, &hdev
->dev_flags
))
417 settings
|= MGMT_SETTING_PAIRABLE
;
419 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
420 settings
|= MGMT_SETTING_BREDR
;
422 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
423 settings
|= MGMT_SETTING_LE
;
425 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
426 settings
|= MGMT_SETTING_LINK_SECURITY
;
428 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
429 settings
|= MGMT_SETTING_SSP
;
431 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
432 settings
|= MGMT_SETTING_HS
;
434 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
435 settings
|= MGMT_SETTING_ADVERTISING
;
437 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
438 settings
|= MGMT_SETTING_SECURE_CONN
;
440 if (test_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
))
441 settings
|= MGMT_SETTING_DEBUG_KEYS
;
443 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
444 settings
|= MGMT_SETTING_PRIVACY
;
449 #define PNP_INFO_SVCLASS_ID 0x1200
451 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
453 u8
*ptr
= data
, *uuids_start
= NULL
;
454 struct bt_uuid
*uuid
;
459 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
462 if (uuid
->size
!= 16)
465 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
469 if (uuid16
== PNP_INFO_SVCLASS_ID
)
475 uuids_start
[1] = EIR_UUID16_ALL
;
479 /* Stop if not enough space to put next UUID */
480 if ((ptr
- data
) + sizeof(u16
) > len
) {
481 uuids_start
[1] = EIR_UUID16_SOME
;
485 *ptr
++ = (uuid16
& 0x00ff);
486 *ptr
++ = (uuid16
& 0xff00) >> 8;
487 uuids_start
[0] += sizeof(uuid16
);
493 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
495 u8
*ptr
= data
, *uuids_start
= NULL
;
496 struct bt_uuid
*uuid
;
501 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
502 if (uuid
->size
!= 32)
508 uuids_start
[1] = EIR_UUID32_ALL
;
512 /* Stop if not enough space to put next UUID */
513 if ((ptr
- data
) + sizeof(u32
) > len
) {
514 uuids_start
[1] = EIR_UUID32_SOME
;
518 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
520 uuids_start
[0] += sizeof(u32
);
526 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
528 u8
*ptr
= data
, *uuids_start
= NULL
;
529 struct bt_uuid
*uuid
;
534 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
535 if (uuid
->size
!= 128)
541 uuids_start
[1] = EIR_UUID128_ALL
;
545 /* Stop if not enough space to put next UUID */
546 if ((ptr
- data
) + 16 > len
) {
547 uuids_start
[1] = EIR_UUID128_SOME
;
551 memcpy(ptr
, uuid
->uuid
, 16);
553 uuids_start
[0] += 16;
559 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
561 struct pending_cmd
*cmd
;
563 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
564 if (cmd
->opcode
== opcode
)
571 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
576 name_len
= strlen(hdev
->dev_name
);
578 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
580 if (name_len
> max_len
) {
582 ptr
[1] = EIR_NAME_SHORT
;
584 ptr
[1] = EIR_NAME_COMPLETE
;
586 ptr
[0] = name_len
+ 1;
588 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
590 ad_len
+= (name_len
+ 2);
591 ptr
+= (name_len
+ 2);
597 static void update_scan_rsp_data(struct hci_request
*req
)
599 struct hci_dev
*hdev
= req
->hdev
;
600 struct hci_cp_le_set_scan_rsp_data cp
;
603 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
606 memset(&cp
, 0, sizeof(cp
));
608 len
= create_scan_rsp_data(hdev
, cp
.data
);
610 if (hdev
->scan_rsp_data_len
== len
&&
611 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
614 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
615 hdev
->scan_rsp_data_len
= len
;
619 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
622 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
624 struct pending_cmd
*cmd
;
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
629 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
631 struct mgmt_mode
*cp
= cmd
->param
;
633 return LE_AD_GENERAL
;
634 else if (cp
->val
== 0x02)
635 return LE_AD_LIMITED
;
637 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
638 return LE_AD_LIMITED
;
639 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
640 return LE_AD_GENERAL
;
646 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
648 u8 ad_len
= 0, flags
= 0;
650 flags
|= get_adv_discov_flags(hdev
);
652 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
653 flags
|= LE_AD_NO_BREDR
;
656 BT_DBG("adv flags 0x%02x", flags
);
666 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
668 ptr
[1] = EIR_TX_POWER
;
669 ptr
[2] = (u8
) hdev
->adv_tx_power
;
678 static void update_adv_data(struct hci_request
*req
)
680 struct hci_dev
*hdev
= req
->hdev
;
681 struct hci_cp_le_set_adv_data cp
;
684 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
687 memset(&cp
, 0, sizeof(cp
));
689 len
= create_adv_data(hdev
, cp
.data
);
691 if (hdev
->adv_data_len
== len
&&
692 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
695 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
696 hdev
->adv_data_len
= len
;
700 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
703 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
708 name_len
= strlen(hdev
->dev_name
);
714 ptr
[1] = EIR_NAME_SHORT
;
716 ptr
[1] = EIR_NAME_COMPLETE
;
718 /* EIR Data length */
719 ptr
[0] = name_len
+ 1;
721 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
723 ptr
+= (name_len
+ 2);
726 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
728 ptr
[1] = EIR_TX_POWER
;
729 ptr
[2] = (u8
) hdev
->inq_tx_power
;
734 if (hdev
->devid_source
> 0) {
736 ptr
[1] = EIR_DEVICE_ID
;
738 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
739 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
740 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
741 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
746 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
747 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
748 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
751 static void update_eir(struct hci_request
*req
)
753 struct hci_dev
*hdev
= req
->hdev
;
754 struct hci_cp_write_eir cp
;
756 if (!hdev_is_powered(hdev
))
759 if (!lmp_ext_inq_capable(hdev
))
762 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
765 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
768 memset(&cp
, 0, sizeof(cp
));
770 create_eir(hdev
, cp
.data
);
772 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
775 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
777 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
780 static u8
get_service_classes(struct hci_dev
*hdev
)
782 struct bt_uuid
*uuid
;
785 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
786 val
|= uuid
->svc_hint
;
791 static void update_class(struct hci_request
*req
)
793 struct hci_dev
*hdev
= req
->hdev
;
796 BT_DBG("%s", hdev
->name
);
798 if (!hdev_is_powered(hdev
))
801 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
804 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
807 cod
[0] = hdev
->minor_class
;
808 cod
[1] = hdev
->major_class
;
809 cod
[2] = get_service_classes(hdev
);
811 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
814 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
817 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
820 static bool get_connectable(struct hci_dev
*hdev
)
822 struct pending_cmd
*cmd
;
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
827 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
829 struct mgmt_mode
*cp
= cmd
->param
;
833 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
836 static void enable_advertising(struct hci_request
*req
)
838 struct hci_dev
*hdev
= req
->hdev
;
839 struct hci_cp_le_set_adv_param cp
;
840 u8 own_addr_type
, enable
= 0x01;
843 connectable
= get_connectable(hdev
);
845 /* Set require_privacy to true only when non-connectable
846 * advertising is used. In that case it is fine to use a
847 * non-resolvable private address.
849 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
852 memset(&cp
, 0, sizeof(cp
));
853 cp
.min_interval
= __constant_cpu_to_le16(0x0800);
854 cp
.max_interval
= __constant_cpu_to_le16(0x0800);
855 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
856 cp
.own_address_type
= own_addr_type
;
857 cp
.channel_map
= hdev
->le_adv_channel_map
;
859 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
861 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
864 static void disable_advertising(struct hci_request
*req
)
868 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
871 static void service_cache_off(struct work_struct
*work
)
873 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
875 struct hci_request req
;
877 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
880 hci_req_init(&req
, hdev
);
887 hci_dev_unlock(hdev
);
889 hci_req_run(&req
, NULL
);
892 static void rpa_expired(struct work_struct
*work
)
894 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
896 struct hci_request req
;
900 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
902 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) ||
903 hci_conn_num(hdev
, LE_LINK
) > 0)
906 /* The generation of a new RPA and programming it into the
907 * controller happens in the enable_advertising() function.
910 hci_req_init(&req
, hdev
);
912 disable_advertising(&req
);
913 enable_advertising(&req
);
915 hci_req_run(&req
, NULL
);
918 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
920 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
923 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
924 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
926 /* Non-mgmt controlled devices get this bit set
927 * implicitly so that pairing works for them, however
928 * for mgmt we require user-space to explicitly enable
931 clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
934 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
935 void *data
, u16 data_len
)
937 struct mgmt_rp_read_info rp
;
939 BT_DBG("sock %p %s", sk
, hdev
->name
);
943 memset(&rp
, 0, sizeof(rp
));
945 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
947 rp
.version
= hdev
->hci_ver
;
948 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
950 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
951 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
953 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
955 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
956 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
958 hci_dev_unlock(hdev
);
960 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
964 static void mgmt_pending_free(struct pending_cmd
*cmd
)
971 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
972 struct hci_dev
*hdev
, void *data
,
975 struct pending_cmd
*cmd
;
977 cmd
= kmalloc(sizeof(*cmd
), GFP_KERNEL
);
981 cmd
->opcode
= opcode
;
982 cmd
->index
= hdev
->id
;
984 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
991 memcpy(cmd
->param
, data
, len
);
996 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1001 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1002 void (*cb
)(struct pending_cmd
*cmd
,
1006 struct pending_cmd
*cmd
, *tmp
;
1008 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1009 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1016 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1018 list_del(&cmd
->list
);
1019 mgmt_pending_free(cmd
);
1022 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1024 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1026 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1030 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1032 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1034 if (hci_conn_count(hdev
) == 0)
1035 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1038 static int clean_up_hci_state(struct hci_dev
*hdev
)
1040 struct hci_request req
;
1041 struct hci_conn
*conn
;
1043 hci_req_init(&req
, hdev
);
1045 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1046 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1048 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1051 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1052 disable_advertising(&req
);
1054 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
1055 hci_req_add_le_scan_disable(&req
);
1058 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1059 struct hci_cp_disconnect dc
;
1061 dc
.handle
= cpu_to_le16(conn
->handle
);
1062 dc
.reason
= 0x15; /* Terminated due to Power Off */
1063 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1066 return hci_req_run(&req
, clean_up_hci_complete
);
1069 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1072 struct mgmt_mode
*cp
= data
;
1073 struct pending_cmd
*cmd
;
1076 BT_DBG("request for %s", hdev
->name
);
1078 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1079 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1080 MGMT_STATUS_INVALID_PARAMS
);
1084 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1085 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1090 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1091 cancel_delayed_work(&hdev
->power_off
);
1094 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1096 err
= mgmt_powered(hdev
, 1);
1101 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1102 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1106 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1113 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1116 /* Disconnect connections, stop scans, etc */
1117 err
= clean_up_hci_state(hdev
);
1119 /* ENODATA means there were no HCI commands queued */
1120 if (err
== -ENODATA
) {
1121 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1127 hci_dev_unlock(hdev
);
1131 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
1132 struct sock
*skip_sk
)
1134 struct sk_buff
*skb
;
1135 struct mgmt_hdr
*hdr
;
1137 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
1141 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
1142 hdr
->opcode
= cpu_to_le16(event
);
1144 hdr
->index
= cpu_to_le16(hdev
->id
);
1146 hdr
->index
= __constant_cpu_to_le16(MGMT_INDEX_NONE
);
1147 hdr
->len
= cpu_to_le16(data_len
);
1150 memcpy(skb_put(skb
, data_len
), data
, data_len
);
1153 __net_timestamp(skb
);
1155 hci_send_to_control(skb
, skip_sk
);
1161 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1165 ev
= cpu_to_le32(get_current_settings(hdev
));
1167 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1172 struct hci_dev
*hdev
;
1176 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1178 struct cmd_lookup
*match
= data
;
1180 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1182 list_del(&cmd
->list
);
1184 if (match
->sk
== NULL
) {
1185 match
->sk
= cmd
->sk
;
1186 sock_hold(match
->sk
);
1189 mgmt_pending_free(cmd
);
1192 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1196 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1197 mgmt_pending_remove(cmd
);
1200 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1202 if (!lmp_bredr_capable(hdev
))
1203 return MGMT_STATUS_NOT_SUPPORTED
;
1204 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1205 return MGMT_STATUS_REJECTED
;
1207 return MGMT_STATUS_SUCCESS
;
1210 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1212 if (!lmp_le_capable(hdev
))
1213 return MGMT_STATUS_NOT_SUPPORTED
;
1214 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1215 return MGMT_STATUS_REJECTED
;
1217 return MGMT_STATUS_SUCCESS
;
1220 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1222 struct pending_cmd
*cmd
;
1223 struct mgmt_mode
*cp
;
1224 struct hci_request req
;
1227 BT_DBG("status 0x%02x", status
);
1231 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1236 u8 mgmt_err
= mgmt_status(status
);
1237 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1238 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1244 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1247 if (hdev
->discov_timeout
> 0) {
1248 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1249 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1253 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1257 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1260 new_settings(hdev
, cmd
->sk
);
1262 /* When the discoverable mode gets changed, make sure
1263 * that class of device has the limited discoverable
1264 * bit correctly set.
1266 hci_req_init(&req
, hdev
);
1268 hci_req_run(&req
, NULL
);
1271 mgmt_pending_remove(cmd
);
1274 hci_dev_unlock(hdev
);
1277 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1280 struct mgmt_cp_set_discoverable
*cp
= data
;
1281 struct pending_cmd
*cmd
;
1282 struct hci_request req
;
1287 BT_DBG("request for %s", hdev
->name
);
1289 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1290 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1291 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1292 MGMT_STATUS_REJECTED
);
1294 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1295 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1296 MGMT_STATUS_INVALID_PARAMS
);
1298 timeout
= __le16_to_cpu(cp
->timeout
);
1300 /* Disabling discoverable requires that no timeout is set,
1301 * and enabling limited discoverable requires a timeout.
1303 if ((cp
->val
== 0x00 && timeout
> 0) ||
1304 (cp
->val
== 0x02 && timeout
== 0))
1305 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1306 MGMT_STATUS_INVALID_PARAMS
);
1310 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1311 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1312 MGMT_STATUS_NOT_POWERED
);
1316 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1317 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1318 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1323 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1324 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1325 MGMT_STATUS_REJECTED
);
1329 if (!hdev_is_powered(hdev
)) {
1330 bool changed
= false;
1332 /* Setting limited discoverable when powered off is
1333 * not a valid operation since it requires a timeout
1334 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1336 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1337 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1341 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1346 err
= new_settings(hdev
, sk
);
1351 /* If the current mode is the same, then just update the timeout
1352 * value with the new value. And if only the timeout gets updated,
1353 * then no need for any HCI transactions.
1355 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1356 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1357 &hdev
->dev_flags
)) {
1358 cancel_delayed_work(&hdev
->discov_off
);
1359 hdev
->discov_timeout
= timeout
;
1361 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1362 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1363 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1367 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1371 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1377 /* Cancel any potential discoverable timeout that might be
1378 * still active and store new timeout value. The arming of
1379 * the timeout happens in the complete handler.
1381 cancel_delayed_work(&hdev
->discov_off
);
1382 hdev
->discov_timeout
= timeout
;
1384 /* Limited discoverable mode */
1385 if (cp
->val
== 0x02)
1386 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1388 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1390 hci_req_init(&req
, hdev
);
1392 /* The procedure for LE-only controllers is much simpler - just
1393 * update the advertising data.
1395 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1401 struct hci_cp_write_current_iac_lap hci_cp
;
1403 if (cp
->val
== 0x02) {
1404 /* Limited discoverable mode */
1405 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1406 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1407 hci_cp
.iac_lap
[1] = 0x8b;
1408 hci_cp
.iac_lap
[2] = 0x9e;
1409 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1410 hci_cp
.iac_lap
[4] = 0x8b;
1411 hci_cp
.iac_lap
[5] = 0x9e;
1413 /* General discoverable mode */
1415 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1416 hci_cp
.iac_lap
[1] = 0x8b;
1417 hci_cp
.iac_lap
[2] = 0x9e;
1420 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1421 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1423 scan
|= SCAN_INQUIRY
;
1425 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1428 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1431 update_adv_data(&req
);
1433 err
= hci_req_run(&req
, set_discoverable_complete
);
1435 mgmt_pending_remove(cmd
);
1438 hci_dev_unlock(hdev
);
1442 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1444 struct hci_dev
*hdev
= req
->hdev
;
1445 struct hci_cp_write_page_scan_activity acp
;
1448 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1451 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1455 type
= PAGE_SCAN_TYPE_INTERLACED
;
1457 /* 160 msec page scan interval */
1458 acp
.interval
= __constant_cpu_to_le16(0x0100);
1460 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1462 /* default 1.28 sec page scan */
1463 acp
.interval
= __constant_cpu_to_le16(0x0800);
1466 acp
.window
= __constant_cpu_to_le16(0x0012);
1468 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1469 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1470 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1473 if (hdev
->page_scan_type
!= type
)
1474 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1477 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1479 struct pending_cmd
*cmd
;
1480 struct mgmt_mode
*cp
;
1483 BT_DBG("status 0x%02x", status
);
1487 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1492 u8 mgmt_err
= mgmt_status(status
);
1493 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1499 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1501 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1503 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1506 new_settings(hdev
, cmd
->sk
);
1509 mgmt_pending_remove(cmd
);
1512 hci_dev_unlock(hdev
);
1515 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1516 struct sock
*sk
, u8 val
)
1518 bool changed
= false;
1521 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1525 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1527 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1528 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1531 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1536 return new_settings(hdev
, sk
);
1541 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1544 struct mgmt_mode
*cp
= data
;
1545 struct pending_cmd
*cmd
;
1546 struct hci_request req
;
1550 BT_DBG("request for %s", hdev
->name
);
1552 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1553 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1554 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1555 MGMT_STATUS_REJECTED
);
1557 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1558 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1559 MGMT_STATUS_INVALID_PARAMS
);
1563 if (!hdev_is_powered(hdev
)) {
1564 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1568 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1569 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1570 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1575 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1581 hci_req_init(&req
, hdev
);
1583 /* If BR/EDR is not enabled and we disable advertising as a
1584 * by-product of disabling connectable, we need to update the
1585 * advertising flags.
1587 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1589 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1590 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1592 update_adv_data(&req
);
1593 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1599 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1600 hdev
->discov_timeout
> 0)
1601 cancel_delayed_work(&hdev
->discov_off
);
1604 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1607 /* If we're going from non-connectable to connectable or
1608 * vice-versa when fast connectable is enabled ensure that fast
1609 * connectable gets disabled. write_fast_connectable won't do
1610 * anything if the page scan parameters are already what they
1613 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1614 write_fast_connectable(&req
, false);
1616 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) &&
1617 hci_conn_num(hdev
, LE_LINK
) == 0) {
1618 disable_advertising(&req
);
1619 enable_advertising(&req
);
1622 err
= hci_req_run(&req
, set_connectable_complete
);
1624 mgmt_pending_remove(cmd
);
1625 if (err
== -ENODATA
)
1626 err
= set_connectable_update_settings(hdev
, sk
,
1632 hci_dev_unlock(hdev
);
1636 static int set_pairable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1639 struct mgmt_mode
*cp
= data
;
1643 BT_DBG("request for %s", hdev
->name
);
1645 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1646 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PAIRABLE
,
1647 MGMT_STATUS_INVALID_PARAMS
);
1652 changed
= !test_and_set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1654 changed
= test_and_clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1656 err
= send_settings_rsp(sk
, MGMT_OP_SET_PAIRABLE
, hdev
);
1661 err
= new_settings(hdev
, sk
);
1664 hci_dev_unlock(hdev
);
1668 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1671 struct mgmt_mode
*cp
= data
;
1672 struct pending_cmd
*cmd
;
1676 BT_DBG("request for %s", hdev
->name
);
1678 status
= mgmt_bredr_support(hdev
);
1680 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1683 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1684 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1685 MGMT_STATUS_INVALID_PARAMS
);
1689 if (!hdev_is_powered(hdev
)) {
1690 bool changed
= false;
1692 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1693 &hdev
->dev_flags
)) {
1694 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1698 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1703 err
= new_settings(hdev
, sk
);
1708 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1709 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1716 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1717 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1721 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1727 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1729 mgmt_pending_remove(cmd
);
1734 hci_dev_unlock(hdev
);
1738 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1740 struct mgmt_mode
*cp
= data
;
1741 struct pending_cmd
*cmd
;
1745 BT_DBG("request for %s", hdev
->name
);
1747 status
= mgmt_bredr_support(hdev
);
1749 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1751 if (!lmp_ssp_capable(hdev
))
1752 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1753 MGMT_STATUS_NOT_SUPPORTED
);
1755 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1756 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1757 MGMT_STATUS_INVALID_PARAMS
);
1761 if (!hdev_is_powered(hdev
)) {
1765 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
1768 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
1771 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
1774 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1777 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1782 err
= new_settings(hdev
, sk
);
1787 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
1788 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
1789 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1794 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1795 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1799 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1805 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1807 mgmt_pending_remove(cmd
);
1812 hci_dev_unlock(hdev
);
1816 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1818 struct mgmt_mode
*cp
= data
;
1823 BT_DBG("request for %s", hdev
->name
);
1825 status
= mgmt_bredr_support(hdev
);
1827 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1829 if (!lmp_ssp_capable(hdev
))
1830 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1831 MGMT_STATUS_NOT_SUPPORTED
);
1833 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
1834 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1835 MGMT_STATUS_REJECTED
);
1837 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1838 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1839 MGMT_STATUS_INVALID_PARAMS
);
1844 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1846 if (hdev_is_powered(hdev
)) {
1847 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1848 MGMT_STATUS_REJECTED
);
1852 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1855 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1860 err
= new_settings(hdev
, sk
);
1863 hci_dev_unlock(hdev
);
1867 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
1869 struct cmd_lookup match
= { NULL
, hdev
};
1872 u8 mgmt_err
= mgmt_status(status
);
1874 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1879 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1881 new_settings(hdev
, match
.sk
);
1886 /* Make sure the controller has a good default for
1887 * advertising data. Restrict the update to when LE
1888 * has actually been enabled. During power on, the
1889 * update in powered_update_hci will take care of it.
1891 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1892 struct hci_request req
;
1896 hci_req_init(&req
, hdev
);
1897 update_adv_data(&req
);
1898 update_scan_rsp_data(&req
);
1899 hci_req_run(&req
, NULL
);
1901 hci_dev_unlock(hdev
);
1905 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1907 struct mgmt_mode
*cp
= data
;
1908 struct hci_cp_write_le_host_supported hci_cp
;
1909 struct pending_cmd
*cmd
;
1910 struct hci_request req
;
1914 BT_DBG("request for %s", hdev
->name
);
1916 if (!lmp_le_capable(hdev
))
1917 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1918 MGMT_STATUS_NOT_SUPPORTED
);
1920 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1921 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1922 MGMT_STATUS_INVALID_PARAMS
);
1924 /* LE-only devices do not allow toggling LE on/off */
1925 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1926 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1927 MGMT_STATUS_REJECTED
);
1932 enabled
= lmp_host_le_capable(hdev
);
1934 if (!hdev_is_powered(hdev
) || val
== enabled
) {
1935 bool changed
= false;
1937 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1938 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
1942 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
1943 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
1947 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1952 err
= new_settings(hdev
, sk
);
1957 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
1958 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
1959 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1964 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
1970 hci_req_init(&req
, hdev
);
1972 memset(&hci_cp
, 0, sizeof(hci_cp
));
1976 hci_cp
.simul
= lmp_le_br_capable(hdev
);
1978 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1979 disable_advertising(&req
);
1982 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
1985 err
= hci_req_run(&req
, le_enable_complete
);
1987 mgmt_pending_remove(cmd
);
1990 hci_dev_unlock(hdev
);
1994 /* This is a helper function to test for pending mgmt commands that can
1995 * cause CoD or EIR HCI commands. We can only allow one such pending
1996 * mgmt command at a time since otherwise we cannot easily track what
1997 * the current values are, will be, and based on that calculate if a new
1998 * HCI command needs to be sent and if yes with what value.
2000 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2002 struct pending_cmd
*cmd
;
2004 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2005 switch (cmd
->opcode
) {
2006 case MGMT_OP_ADD_UUID
:
2007 case MGMT_OP_REMOVE_UUID
:
2008 case MGMT_OP_SET_DEV_CLASS
:
2009 case MGMT_OP_SET_POWERED
:
2017 static const u8 bluetooth_base_uuid
[] = {
2018 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2019 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2022 static u8
get_uuid_size(const u8
*uuid
)
2026 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2029 val
= get_unaligned_le32(&uuid
[12]);
2036 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2038 struct pending_cmd
*cmd
;
2042 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2046 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2047 hdev
->dev_class
, 3);
2049 mgmt_pending_remove(cmd
);
2052 hci_dev_unlock(hdev
);
2055 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2057 BT_DBG("status 0x%02x", status
);
2059 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2062 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2064 struct mgmt_cp_add_uuid
*cp
= data
;
2065 struct pending_cmd
*cmd
;
2066 struct hci_request req
;
2067 struct bt_uuid
*uuid
;
2070 BT_DBG("request for %s", hdev
->name
);
2074 if (pending_eir_or_class(hdev
)) {
2075 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2080 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2086 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2087 uuid
->svc_hint
= cp
->svc_hint
;
2088 uuid
->size
= get_uuid_size(cp
->uuid
);
2090 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2092 hci_req_init(&req
, hdev
);
2097 err
= hci_req_run(&req
, add_uuid_complete
);
2099 if (err
!= -ENODATA
)
2102 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2103 hdev
->dev_class
, 3);
2107 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2116 hci_dev_unlock(hdev
);
2120 static bool enable_service_cache(struct hci_dev
*hdev
)
2122 if (!hdev_is_powered(hdev
))
2125 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2126 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2134 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2136 BT_DBG("status 0x%02x", status
);
2138 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2141 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2144 struct mgmt_cp_remove_uuid
*cp
= data
;
2145 struct pending_cmd
*cmd
;
2146 struct bt_uuid
*match
, *tmp
;
2147 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2148 struct hci_request req
;
2151 BT_DBG("request for %s", hdev
->name
);
2155 if (pending_eir_or_class(hdev
)) {
2156 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2161 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2162 hci_uuids_clear(hdev
);
2164 if (enable_service_cache(hdev
)) {
2165 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2166 0, hdev
->dev_class
, 3);
2175 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2176 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2179 list_del(&match
->list
);
2185 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2186 MGMT_STATUS_INVALID_PARAMS
);
2191 hci_req_init(&req
, hdev
);
2196 err
= hci_req_run(&req
, remove_uuid_complete
);
2198 if (err
!= -ENODATA
)
2201 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2202 hdev
->dev_class
, 3);
2206 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2215 hci_dev_unlock(hdev
);
2219 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2221 BT_DBG("status 0x%02x", status
);
2223 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2226 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2229 struct mgmt_cp_set_dev_class
*cp
= data
;
2230 struct pending_cmd
*cmd
;
2231 struct hci_request req
;
2234 BT_DBG("request for %s", hdev
->name
);
2236 if (!lmp_bredr_capable(hdev
))
2237 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2238 MGMT_STATUS_NOT_SUPPORTED
);
2242 if (pending_eir_or_class(hdev
)) {
2243 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2248 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2249 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2250 MGMT_STATUS_INVALID_PARAMS
);
2254 hdev
->major_class
= cp
->major
;
2255 hdev
->minor_class
= cp
->minor
;
2257 if (!hdev_is_powered(hdev
)) {
2258 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2259 hdev
->dev_class
, 3);
2263 hci_req_init(&req
, hdev
);
2265 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2266 hci_dev_unlock(hdev
);
2267 cancel_delayed_work_sync(&hdev
->service_cache
);
2274 err
= hci_req_run(&req
, set_class_complete
);
2276 if (err
!= -ENODATA
)
2279 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2280 hdev
->dev_class
, 3);
2284 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2293 hci_dev_unlock(hdev
);
2297 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2300 struct mgmt_cp_load_link_keys
*cp
= data
;
2301 u16 key_count
, expected_len
;
2305 BT_DBG("request for %s", hdev
->name
);
2307 if (!lmp_bredr_capable(hdev
))
2308 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2309 MGMT_STATUS_NOT_SUPPORTED
);
2311 key_count
= __le16_to_cpu(cp
->key_count
);
2313 expected_len
= sizeof(*cp
) + key_count
*
2314 sizeof(struct mgmt_link_key_info
);
2315 if (expected_len
!= len
) {
2316 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2318 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2319 MGMT_STATUS_INVALID_PARAMS
);
2322 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2323 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2324 MGMT_STATUS_INVALID_PARAMS
);
2326 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2329 for (i
= 0; i
< key_count
; i
++) {
2330 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2332 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2333 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2334 MGMT_STATUS_INVALID_PARAMS
);
2339 hci_link_keys_clear(hdev
);
2342 changed
= !test_and_set_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
2344 changed
= test_and_clear_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
2347 new_settings(hdev
, NULL
);
2349 for (i
= 0; i
< key_count
; i
++) {
2350 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2352 hci_add_link_key(hdev
, NULL
, 0, &key
->addr
.bdaddr
, key
->val
,
2353 key
->type
, key
->pin_len
);
2356 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2358 hci_dev_unlock(hdev
);
2363 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2364 u8 addr_type
, struct sock
*skip_sk
)
2366 struct mgmt_ev_device_unpaired ev
;
2368 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2369 ev
.addr
.type
= addr_type
;
2371 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2375 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2378 struct mgmt_cp_unpair_device
*cp
= data
;
2379 struct mgmt_rp_unpair_device rp
;
2380 struct hci_cp_disconnect dc
;
2381 struct pending_cmd
*cmd
;
2382 struct hci_conn
*conn
;
2385 memset(&rp
, 0, sizeof(rp
));
2386 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2387 rp
.addr
.type
= cp
->addr
.type
;
2389 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2390 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2391 MGMT_STATUS_INVALID_PARAMS
,
2394 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2395 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2396 MGMT_STATUS_INVALID_PARAMS
,
2401 if (!hdev_is_powered(hdev
)) {
2402 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2403 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2407 if (cp
->addr
.type
== BDADDR_BREDR
) {
2408 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2412 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2413 addr_type
= ADDR_LE_DEV_PUBLIC
;
2415 addr_type
= ADDR_LE_DEV_RANDOM
;
2417 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2419 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2423 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2424 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2428 if (cp
->disconnect
) {
2429 if (cp
->addr
.type
== BDADDR_BREDR
)
2430 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2433 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2440 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2442 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2446 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2453 dc
.handle
= cpu_to_le16(conn
->handle
);
2454 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2455 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2457 mgmt_pending_remove(cmd
);
2460 hci_dev_unlock(hdev
);
2464 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2467 struct mgmt_cp_disconnect
*cp
= data
;
2468 struct mgmt_rp_disconnect rp
;
2469 struct hci_cp_disconnect dc
;
2470 struct pending_cmd
*cmd
;
2471 struct hci_conn
*conn
;
2476 memset(&rp
, 0, sizeof(rp
));
2477 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2478 rp
.addr
.type
= cp
->addr
.type
;
2480 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2481 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2482 MGMT_STATUS_INVALID_PARAMS
,
2487 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2488 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2489 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2493 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2494 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2495 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2499 if (cp
->addr
.type
== BDADDR_BREDR
)
2500 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2503 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2505 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2506 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2507 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2511 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2517 dc
.handle
= cpu_to_le16(conn
->handle
);
2518 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2520 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2522 mgmt_pending_remove(cmd
);
2525 hci_dev_unlock(hdev
);
2529 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2531 switch (link_type
) {
2533 switch (addr_type
) {
2534 case ADDR_LE_DEV_PUBLIC
:
2535 return BDADDR_LE_PUBLIC
;
2538 /* Fallback to LE Random address type */
2539 return BDADDR_LE_RANDOM
;
2543 /* Fallback to BR/EDR type */
2544 return BDADDR_BREDR
;
2548 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2551 struct mgmt_rp_get_connections
*rp
;
2561 if (!hdev_is_powered(hdev
)) {
2562 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2563 MGMT_STATUS_NOT_POWERED
);
2568 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2569 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2573 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2574 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2581 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2582 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2584 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2585 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2586 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2591 rp
->conn_count
= cpu_to_le16(i
);
2593 /* Recalculate length in case of filtered SCO connections, etc */
2594 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2596 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2602 hci_dev_unlock(hdev
);
2606 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2607 struct mgmt_cp_pin_code_neg_reply
*cp
)
2609 struct pending_cmd
*cmd
;
2612 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2617 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2618 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2620 mgmt_pending_remove(cmd
);
2625 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2628 struct hci_conn
*conn
;
2629 struct mgmt_cp_pin_code_reply
*cp
= data
;
2630 struct hci_cp_pin_code_reply reply
;
2631 struct pending_cmd
*cmd
;
2638 if (!hdev_is_powered(hdev
)) {
2639 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2640 MGMT_STATUS_NOT_POWERED
);
2644 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2646 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2647 MGMT_STATUS_NOT_CONNECTED
);
2651 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2652 struct mgmt_cp_pin_code_neg_reply ncp
;
2654 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2656 BT_ERR("PIN code is not 16 bytes long");
2658 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2660 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2661 MGMT_STATUS_INVALID_PARAMS
);
2666 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2672 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2673 reply
.pin_len
= cp
->pin_len
;
2674 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2676 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2678 mgmt_pending_remove(cmd
);
2681 hci_dev_unlock(hdev
);
2685 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2688 struct mgmt_cp_set_io_capability
*cp
= data
;
2694 hdev
->io_capability
= cp
->io_capability
;
2696 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2697 hdev
->io_capability
);
2699 hci_dev_unlock(hdev
);
2701 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
2705 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
2707 struct hci_dev
*hdev
= conn
->hdev
;
2708 struct pending_cmd
*cmd
;
2710 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2711 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2714 if (cmd
->user_data
!= conn
)
2723 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
2725 struct mgmt_rp_pair_device rp
;
2726 struct hci_conn
*conn
= cmd
->user_data
;
2728 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2729 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2731 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
2734 /* So we don't get further callbacks for this connection */
2735 conn
->connect_cfm_cb
= NULL
;
2736 conn
->security_cfm_cb
= NULL
;
2737 conn
->disconn_cfm_cb
= NULL
;
2739 hci_conn_drop(conn
);
2741 mgmt_pending_remove(cmd
);
2744 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2746 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2747 struct pending_cmd
*cmd
;
2749 cmd
= find_pairing(conn
);
2751 pairing_complete(cmd
, status
);
2754 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2756 struct pending_cmd
*cmd
;
2758 BT_DBG("status %u", status
);
2760 cmd
= find_pairing(conn
);
2762 BT_DBG("Unable to find a pending command");
2764 pairing_complete(cmd
, mgmt_status(status
));
2767 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2769 struct pending_cmd
*cmd
;
2771 BT_DBG("status %u", status
);
2776 cmd
= find_pairing(conn
);
2778 BT_DBG("Unable to find a pending command");
2780 pairing_complete(cmd
, mgmt_status(status
));
2783 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2786 struct mgmt_cp_pair_device
*cp
= data
;
2787 struct mgmt_rp_pair_device rp
;
2788 struct pending_cmd
*cmd
;
2789 u8 sec_level
, auth_type
;
2790 struct hci_conn
*conn
;
2795 memset(&rp
, 0, sizeof(rp
));
2796 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2797 rp
.addr
.type
= cp
->addr
.type
;
2799 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2800 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2801 MGMT_STATUS_INVALID_PARAMS
,
2806 if (!hdev_is_powered(hdev
)) {
2807 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2808 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2812 sec_level
= BT_SECURITY_MEDIUM
;
2813 if (cp
->io_cap
== 0x03)
2814 auth_type
= HCI_AT_DEDICATED_BONDING
;
2816 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
2818 if (cp
->addr
.type
== BDADDR_BREDR
)
2819 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2822 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
,
2823 sec_level
, auth_type
);
2828 if (PTR_ERR(conn
) == -EBUSY
)
2829 status
= MGMT_STATUS_BUSY
;
2831 status
= MGMT_STATUS_CONNECT_FAILED
;
2833 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2839 if (conn
->connect_cfm_cb
) {
2840 hci_conn_drop(conn
);
2841 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2842 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2846 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2849 hci_conn_drop(conn
);
2853 /* For LE, just connecting isn't a proof that the pairing finished */
2854 if (cp
->addr
.type
== BDADDR_BREDR
) {
2855 conn
->connect_cfm_cb
= pairing_complete_cb
;
2856 conn
->security_cfm_cb
= pairing_complete_cb
;
2857 conn
->disconn_cfm_cb
= pairing_complete_cb
;
2859 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
2860 conn
->security_cfm_cb
= le_pairing_complete_cb
;
2861 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
2864 conn
->io_capability
= cp
->io_cap
;
2865 cmd
->user_data
= conn
;
2867 if (conn
->state
== BT_CONNECTED
&&
2868 hci_conn_security(conn
, sec_level
, auth_type
))
2869 pairing_complete(cmd
, 0);
2874 hci_dev_unlock(hdev
);
2878 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2881 struct mgmt_addr_info
*addr
= data
;
2882 struct pending_cmd
*cmd
;
2883 struct hci_conn
*conn
;
2890 if (!hdev_is_powered(hdev
)) {
2891 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2892 MGMT_STATUS_NOT_POWERED
);
2896 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
2898 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2899 MGMT_STATUS_INVALID_PARAMS
);
2903 conn
= cmd
->user_data
;
2905 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
2906 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2907 MGMT_STATUS_INVALID_PARAMS
);
2911 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
2913 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
2914 addr
, sizeof(*addr
));
2916 hci_dev_unlock(hdev
);
2920 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
2921 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
2922 u16 hci_op
, __le32 passkey
)
2924 struct pending_cmd
*cmd
;
2925 struct hci_conn
*conn
;
2930 if (!hdev_is_powered(hdev
)) {
2931 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2932 MGMT_STATUS_NOT_POWERED
, addr
,
2937 if (addr
->type
== BDADDR_BREDR
)
2938 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
2940 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
2943 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2944 MGMT_STATUS_NOT_CONNECTED
, addr
,
2949 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
2950 /* Continue with pairing via SMP */
2951 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
2954 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2955 MGMT_STATUS_SUCCESS
, addr
,
2958 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
2959 MGMT_STATUS_FAILED
, addr
,
2965 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
2971 /* Continue with pairing via HCI */
2972 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
2973 struct hci_cp_user_passkey_reply cp
;
2975 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
2976 cp
.passkey
= passkey
;
2977 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
2979 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
2983 mgmt_pending_remove(cmd
);
2986 hci_dev_unlock(hdev
);
2990 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2991 void *data
, u16 len
)
2993 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
2997 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2998 MGMT_OP_PIN_CODE_NEG_REPLY
,
2999 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3002 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3005 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3009 if (len
!= sizeof(*cp
))
3010 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3011 MGMT_STATUS_INVALID_PARAMS
);
3013 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3014 MGMT_OP_USER_CONFIRM_REPLY
,
3015 HCI_OP_USER_CONFIRM_REPLY
, 0);
3018 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3019 void *data
, u16 len
)
3021 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3025 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3026 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3027 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3030 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3033 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3037 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3038 MGMT_OP_USER_PASSKEY_REPLY
,
3039 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3042 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3043 void *data
, u16 len
)
3045 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3049 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3050 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3051 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3054 static void update_name(struct hci_request
*req
)
3056 struct hci_dev
*hdev
= req
->hdev
;
3057 struct hci_cp_write_local_name cp
;
3059 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3061 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3064 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3066 struct mgmt_cp_set_local_name
*cp
;
3067 struct pending_cmd
*cmd
;
3069 BT_DBG("status 0x%02x", status
);
3073 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3080 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3081 mgmt_status(status
));
3083 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3086 mgmt_pending_remove(cmd
);
3089 hci_dev_unlock(hdev
);
3092 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3095 struct mgmt_cp_set_local_name
*cp
= data
;
3096 struct pending_cmd
*cmd
;
3097 struct hci_request req
;
3104 /* If the old values are the same as the new ones just return a
3105 * direct command complete event.
3107 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3108 !memcmp(hdev
->short_name
, cp
->short_name
,
3109 sizeof(hdev
->short_name
))) {
3110 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3115 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3117 if (!hdev_is_powered(hdev
)) {
3118 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3120 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3125 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3131 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3137 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3139 hci_req_init(&req
, hdev
);
3141 if (lmp_bredr_capable(hdev
)) {
3146 /* The name is stored in the scan response data and so
3147 * no need to udpate the advertising data here.
3149 if (lmp_le_capable(hdev
))
3150 update_scan_rsp_data(&req
);
3152 err
= hci_req_run(&req
, set_name_complete
);
3154 mgmt_pending_remove(cmd
);
3157 hci_dev_unlock(hdev
);
3161 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3162 void *data
, u16 data_len
)
3164 struct pending_cmd
*cmd
;
3167 BT_DBG("%s", hdev
->name
);
3171 if (!hdev_is_powered(hdev
)) {
3172 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3173 MGMT_STATUS_NOT_POWERED
);
3177 if (!lmp_ssp_capable(hdev
)) {
3178 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3179 MGMT_STATUS_NOT_SUPPORTED
);
3183 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3184 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3189 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3195 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3196 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3199 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3202 mgmt_pending_remove(cmd
);
3205 hci_dev_unlock(hdev
);
3209 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3210 void *data
, u16 len
)
3214 BT_DBG("%s ", hdev
->name
);
3218 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3219 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3222 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3223 cp
->hash
, cp
->randomizer
);
3225 status
= MGMT_STATUS_FAILED
;
3227 status
= MGMT_STATUS_SUCCESS
;
3229 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3230 status
, &cp
->addr
, sizeof(cp
->addr
));
3231 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3232 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3235 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3241 status
= MGMT_STATUS_FAILED
;
3243 status
= MGMT_STATUS_SUCCESS
;
3245 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3246 status
, &cp
->addr
, sizeof(cp
->addr
));
3248 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3249 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3250 MGMT_STATUS_INVALID_PARAMS
);
3253 hci_dev_unlock(hdev
);
3257 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3258 void *data
, u16 len
)
3260 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3264 BT_DBG("%s", hdev
->name
);
3268 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3270 status
= MGMT_STATUS_INVALID_PARAMS
;
3272 status
= MGMT_STATUS_SUCCESS
;
3274 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3275 status
, &cp
->addr
, sizeof(cp
->addr
));
3277 hci_dev_unlock(hdev
);
3281 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3283 struct pending_cmd
*cmd
;
3287 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3289 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3293 type
= hdev
->discovery
.type
;
3295 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3296 &type
, sizeof(type
));
3297 mgmt_pending_remove(cmd
);
3302 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3304 BT_DBG("status %d", status
);
3308 mgmt_start_discovery_failed(hdev
, status
);
3309 hci_dev_unlock(hdev
);
3314 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3315 hci_dev_unlock(hdev
);
3317 switch (hdev
->discovery
.type
) {
3318 case DISCOV_TYPE_LE
:
3319 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
3323 case DISCOV_TYPE_INTERLEAVED
:
3324 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
,
3325 DISCOV_INTERLEAVED_TIMEOUT
);
3328 case DISCOV_TYPE_BREDR
:
3332 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3336 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3337 void *data
, u16 len
)
3339 struct mgmt_cp_start_discovery
*cp
= data
;
3340 struct pending_cmd
*cmd
;
3341 struct hci_cp_le_set_scan_param param_cp
;
3342 struct hci_cp_le_set_scan_enable enable_cp
;
3343 struct hci_cp_inquiry inq_cp
;
3344 struct hci_request req
;
3345 /* General inquiry access code (GIAC) */
3346 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3347 u8 status
, own_addr_type
;
3350 BT_DBG("%s", hdev
->name
);
3354 if (!hdev_is_powered(hdev
)) {
3355 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3356 MGMT_STATUS_NOT_POWERED
);
3360 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3361 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3366 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3367 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3372 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3378 hdev
->discovery
.type
= cp
->type
;
3380 hci_req_init(&req
, hdev
);
3382 switch (hdev
->discovery
.type
) {
3383 case DISCOV_TYPE_BREDR
:
3384 status
= mgmt_bredr_support(hdev
);
3386 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3388 mgmt_pending_remove(cmd
);
3392 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3393 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3395 mgmt_pending_remove(cmd
);
3399 hci_inquiry_cache_flush(hdev
);
3401 memset(&inq_cp
, 0, sizeof(inq_cp
));
3402 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3403 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3404 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3407 case DISCOV_TYPE_LE
:
3408 case DISCOV_TYPE_INTERLEAVED
:
3409 status
= mgmt_le_support(hdev
);
3411 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3413 mgmt_pending_remove(cmd
);
3417 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3418 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3419 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3420 MGMT_STATUS_NOT_SUPPORTED
);
3421 mgmt_pending_remove(cmd
);
3425 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3426 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3427 MGMT_STATUS_REJECTED
);
3428 mgmt_pending_remove(cmd
);
3432 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
3433 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3435 mgmt_pending_remove(cmd
);
3439 memset(¶m_cp
, 0, sizeof(param_cp
));
3441 /* All active scans will be done with either a resolvable
3442 * private address (when privacy feature has been enabled)
3443 * or unresolvable private address.
3445 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3447 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3448 MGMT_STATUS_FAILED
);
3449 mgmt_pending_remove(cmd
);
3453 param_cp
.type
= LE_SCAN_ACTIVE
;
3454 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3455 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3456 param_cp
.own_address_type
= own_addr_type
;
3457 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3460 memset(&enable_cp
, 0, sizeof(enable_cp
));
3461 enable_cp
.enable
= LE_SCAN_ENABLE
;
3462 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3463 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3468 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3469 MGMT_STATUS_INVALID_PARAMS
);
3470 mgmt_pending_remove(cmd
);
3474 err
= hci_req_run(&req
, start_discovery_complete
);
3476 mgmt_pending_remove(cmd
);
3478 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3481 hci_dev_unlock(hdev
);
3485 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3487 struct pending_cmd
*cmd
;
3490 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3494 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3495 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3496 mgmt_pending_remove(cmd
);
3501 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3503 BT_DBG("status %d", status
);
3508 mgmt_stop_discovery_failed(hdev
, status
);
3512 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3515 hci_dev_unlock(hdev
);
3518 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3521 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3522 struct pending_cmd
*cmd
;
3523 struct hci_cp_remote_name_req_cancel cp
;
3524 struct inquiry_entry
*e
;
3525 struct hci_request req
;
3528 BT_DBG("%s", hdev
->name
);
3532 if (!hci_discovery_active(hdev
)) {
3533 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3534 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3535 sizeof(mgmt_cp
->type
));
3539 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3540 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3541 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3542 sizeof(mgmt_cp
->type
));
3546 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3552 hci_req_init(&req
, hdev
);
3554 switch (hdev
->discovery
.state
) {
3555 case DISCOVERY_FINDING
:
3556 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3557 hci_req_add(&req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
3559 cancel_delayed_work(&hdev
->le_scan_disable
);
3561 hci_req_add_le_scan_disable(&req
);
3566 case DISCOVERY_RESOLVING
:
3567 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
3570 mgmt_pending_remove(cmd
);
3571 err
= cmd_complete(sk
, hdev
->id
,
3572 MGMT_OP_STOP_DISCOVERY
, 0,
3574 sizeof(mgmt_cp
->type
));
3575 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3579 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
3580 hci_req_add(&req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
3586 BT_DBG("unknown discovery state %u", hdev
->discovery
.state
);
3588 mgmt_pending_remove(cmd
);
3589 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3590 MGMT_STATUS_FAILED
, &mgmt_cp
->type
,
3591 sizeof(mgmt_cp
->type
));
3595 err
= hci_req_run(&req
, stop_discovery_complete
);
3597 mgmt_pending_remove(cmd
);
3599 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3602 hci_dev_unlock(hdev
);
3606 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3609 struct mgmt_cp_confirm_name
*cp
= data
;
3610 struct inquiry_entry
*e
;
3613 BT_DBG("%s", hdev
->name
);
3617 if (!hci_discovery_active(hdev
)) {
3618 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3619 MGMT_STATUS_FAILED
);
3623 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3625 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3626 MGMT_STATUS_INVALID_PARAMS
);
3630 if (cp
->name_known
) {
3631 e
->name_state
= NAME_KNOWN
;
3634 e
->name_state
= NAME_NEEDED
;
3635 hci_inquiry_cache_update_resolve(hdev
, e
);
3638 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3642 hci_dev_unlock(hdev
);
3646 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3649 struct mgmt_cp_block_device
*cp
= data
;
3653 BT_DBG("%s", hdev
->name
);
3655 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3656 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3657 MGMT_STATUS_INVALID_PARAMS
,
3658 &cp
->addr
, sizeof(cp
->addr
));
3662 err
= hci_blacklist_add(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3664 status
= MGMT_STATUS_FAILED
;
3666 status
= MGMT_STATUS_SUCCESS
;
3668 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3669 &cp
->addr
, sizeof(cp
->addr
));
3671 hci_dev_unlock(hdev
);
3676 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3679 struct mgmt_cp_unblock_device
*cp
= data
;
3683 BT_DBG("%s", hdev
->name
);
3685 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3686 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3687 MGMT_STATUS_INVALID_PARAMS
,
3688 &cp
->addr
, sizeof(cp
->addr
));
3692 err
= hci_blacklist_del(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3694 status
= MGMT_STATUS_INVALID_PARAMS
;
3696 status
= MGMT_STATUS_SUCCESS
;
3698 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3699 &cp
->addr
, sizeof(cp
->addr
));
3701 hci_dev_unlock(hdev
);
3706 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3709 struct mgmt_cp_set_device_id
*cp
= data
;
3710 struct hci_request req
;
3714 BT_DBG("%s", hdev
->name
);
3716 source
= __le16_to_cpu(cp
->source
);
3718 if (source
> 0x0002)
3719 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3720 MGMT_STATUS_INVALID_PARAMS
);
3724 hdev
->devid_source
= source
;
3725 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3726 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3727 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3729 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
3731 hci_req_init(&req
, hdev
);
3733 hci_req_run(&req
, NULL
);
3735 hci_dev_unlock(hdev
);
3740 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
3742 struct cmd_lookup match
= { NULL
, hdev
};
3745 u8 mgmt_err
= mgmt_status(status
);
3747 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3748 cmd_status_rsp
, &mgmt_err
);
3752 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3755 new_settings(hdev
, match
.sk
);
3761 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3764 struct mgmt_mode
*cp
= data
;
3765 struct pending_cmd
*cmd
;
3766 struct hci_request req
;
3767 u8 val
, enabled
, status
;
3770 BT_DBG("request for %s", hdev
->name
);
3772 status
= mgmt_le_support(hdev
);
3774 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3777 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3778 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3779 MGMT_STATUS_INVALID_PARAMS
);
3784 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3786 /* The following conditions are ones which mean that we should
3787 * not do any HCI communication but directly send a mgmt
3788 * response to user space (after toggling the flag if
3791 if (!hdev_is_powered(hdev
) || val
== enabled
||
3792 hci_conn_num(hdev
, LE_LINK
) > 0) {
3793 bool changed
= false;
3795 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3796 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3800 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
3805 err
= new_settings(hdev
, sk
);
3810 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
3811 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
3812 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3817 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
3823 hci_req_init(&req
, hdev
);
3826 enable_advertising(&req
);
3828 disable_advertising(&req
);
3830 err
= hci_req_run(&req
, set_advertising_complete
);
3832 mgmt_pending_remove(cmd
);
3835 hci_dev_unlock(hdev
);
3839 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
3840 void *data
, u16 len
)
3842 struct mgmt_cp_set_static_address
*cp
= data
;
3845 BT_DBG("%s", hdev
->name
);
3847 if (!lmp_le_capable(hdev
))
3848 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3849 MGMT_STATUS_NOT_SUPPORTED
);
3851 if (hdev_is_powered(hdev
))
3852 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3853 MGMT_STATUS_REJECTED
);
3855 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
3856 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
3857 return cmd_status(sk
, hdev
->id
,
3858 MGMT_OP_SET_STATIC_ADDRESS
,
3859 MGMT_STATUS_INVALID_PARAMS
);
3861 /* Two most significant bits shall be set */
3862 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
3863 return cmd_status(sk
, hdev
->id
,
3864 MGMT_OP_SET_STATIC_ADDRESS
,
3865 MGMT_STATUS_INVALID_PARAMS
);
3870 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
3872 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
3874 hci_dev_unlock(hdev
);
3879 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
3880 void *data
, u16 len
)
3882 struct mgmt_cp_set_scan_params
*cp
= data
;
3883 __u16 interval
, window
;
3886 BT_DBG("%s", hdev
->name
);
3888 if (!lmp_le_capable(hdev
))
3889 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3890 MGMT_STATUS_NOT_SUPPORTED
);
3892 interval
= __le16_to_cpu(cp
->interval
);
3894 if (interval
< 0x0004 || interval
> 0x4000)
3895 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3896 MGMT_STATUS_INVALID_PARAMS
);
3898 window
= __le16_to_cpu(cp
->window
);
3900 if (window
< 0x0004 || window
> 0x4000)
3901 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3902 MGMT_STATUS_INVALID_PARAMS
);
3904 if (window
> interval
)
3905 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3906 MGMT_STATUS_INVALID_PARAMS
);
3910 hdev
->le_scan_interval
= interval
;
3911 hdev
->le_scan_window
= window
;
3913 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
3915 hci_dev_unlock(hdev
);
3920 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
3922 struct pending_cmd
*cmd
;
3924 BT_DBG("status 0x%02x", status
);
3928 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
3933 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3934 mgmt_status(status
));
3936 struct mgmt_mode
*cp
= cmd
->param
;
3939 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
3941 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
3943 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
3944 new_settings(hdev
, cmd
->sk
);
3947 mgmt_pending_remove(cmd
);
3950 hci_dev_unlock(hdev
);
3953 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
3954 void *data
, u16 len
)
3956 struct mgmt_mode
*cp
= data
;
3957 struct pending_cmd
*cmd
;
3958 struct hci_request req
;
3961 BT_DBG("%s", hdev
->name
);
3963 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
3964 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
3965 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3966 MGMT_STATUS_NOT_SUPPORTED
);
3968 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3969 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3970 MGMT_STATUS_INVALID_PARAMS
);
3972 if (!hdev_is_powered(hdev
))
3973 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3974 MGMT_STATUS_NOT_POWERED
);
3976 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
3977 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3978 MGMT_STATUS_REJECTED
);
3982 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
3983 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
3988 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
3989 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
3994 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4001 hci_req_init(&req
, hdev
);
4003 write_fast_connectable(&req
, cp
->val
);
4005 err
= hci_req_run(&req
, fast_connectable_complete
);
4007 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4008 MGMT_STATUS_FAILED
);
4009 mgmt_pending_remove(cmd
);
4013 hci_dev_unlock(hdev
);
4018 static void set_bredr_scan(struct hci_request
*req
)
4020 struct hci_dev
*hdev
= req
->hdev
;
4023 /* Ensure that fast connectable is disabled. This function will
4024 * not do anything if the page scan parameters are already what
4027 write_fast_connectable(req
, false);
4029 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4031 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
4032 scan
|= SCAN_INQUIRY
;
4035 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
4038 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4040 struct pending_cmd
*cmd
;
4042 BT_DBG("status 0x%02x", status
);
4046 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4051 u8 mgmt_err
= mgmt_status(status
);
4053 /* We need to restore the flag if related HCI commands
4056 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4058 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4060 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4061 new_settings(hdev
, cmd
->sk
);
4064 mgmt_pending_remove(cmd
);
4067 hci_dev_unlock(hdev
);
4070 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4072 struct mgmt_mode
*cp
= data
;
4073 struct pending_cmd
*cmd
;
4074 struct hci_request req
;
4077 BT_DBG("request for %s", hdev
->name
);
4079 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4080 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4081 MGMT_STATUS_NOT_SUPPORTED
);
4083 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4084 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4085 MGMT_STATUS_REJECTED
);
4087 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4088 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4089 MGMT_STATUS_INVALID_PARAMS
);
4093 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4094 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4098 if (!hdev_is_powered(hdev
)) {
4100 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4101 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4102 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4103 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4104 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4107 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4109 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4113 err
= new_settings(hdev
, sk
);
4117 /* Reject disabling when powered on */
4119 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4120 MGMT_STATUS_REJECTED
);
4124 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4125 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4130 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4136 /* We need to flip the bit already here so that update_adv_data
4137 * generates the correct flags.
4139 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4141 hci_req_init(&req
, hdev
);
4143 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4144 set_bredr_scan(&req
);
4146 /* Since only the advertising data flags will change, there
4147 * is no need to update the scan response data.
4149 update_adv_data(&req
);
4151 err
= hci_req_run(&req
, set_bredr_complete
);
4153 mgmt_pending_remove(cmd
);
4156 hci_dev_unlock(hdev
);
4160 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4161 void *data
, u16 len
)
4163 struct mgmt_mode
*cp
= data
;
4164 struct pending_cmd
*cmd
;
4168 BT_DBG("request for %s", hdev
->name
);
4170 status
= mgmt_bredr_support(hdev
);
4172 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4175 if (!lmp_sc_capable(hdev
) &&
4176 !test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
4177 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4178 MGMT_STATUS_NOT_SUPPORTED
);
4180 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4181 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4182 MGMT_STATUS_INVALID_PARAMS
);
4186 if (!hdev_is_powered(hdev
)) {
4190 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4192 if (cp
->val
== 0x02)
4193 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4195 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4197 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4199 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4202 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4207 err
= new_settings(hdev
, sk
);
4212 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4213 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4220 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4221 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4222 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4226 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4232 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4234 mgmt_pending_remove(cmd
);
4238 if (cp
->val
== 0x02)
4239 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4241 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4244 hci_dev_unlock(hdev
);
4248 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4249 void *data
, u16 len
)
4251 struct mgmt_mode
*cp
= data
;
4255 BT_DBG("request for %s", hdev
->name
);
4257 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4258 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4259 MGMT_STATUS_INVALID_PARAMS
);
4264 changed
= !test_and_set_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
4266 changed
= test_and_clear_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
4268 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4273 err
= new_settings(hdev
, sk
);
4276 hci_dev_unlock(hdev
);
4280 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4283 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4287 BT_DBG("request for %s", hdev
->name
);
4289 if (!lmp_le_capable(hdev
))
4290 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4291 MGMT_STATUS_NOT_SUPPORTED
);
4293 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4294 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4295 MGMT_STATUS_INVALID_PARAMS
);
4297 if (hdev_is_powered(hdev
))
4298 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4299 MGMT_STATUS_REJECTED
);
4303 /* If user space supports this command it is also expected to
4304 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4306 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4309 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4310 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4311 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4313 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4314 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4315 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4318 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4323 err
= new_settings(hdev
, sk
);
4326 hci_dev_unlock(hdev
);
4330 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4332 switch (irk
->addr
.type
) {
4333 case BDADDR_LE_PUBLIC
:
4336 case BDADDR_LE_RANDOM
:
4337 /* Two most significant bits shall be set */
4338 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4346 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4349 struct mgmt_cp_load_irks
*cp
= cp_data
;
4350 u16 irk_count
, expected_len
;
4353 BT_DBG("request for %s", hdev
->name
);
4355 if (!lmp_le_capable(hdev
))
4356 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4357 MGMT_STATUS_NOT_SUPPORTED
);
4359 irk_count
= __le16_to_cpu(cp
->irk_count
);
4361 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4362 if (expected_len
!= len
) {
4363 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4365 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4366 MGMT_STATUS_INVALID_PARAMS
);
4369 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4371 for (i
= 0; i
< irk_count
; i
++) {
4372 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4374 if (!irk_is_valid(key
))
4375 return cmd_status(sk
, hdev
->id
,
4377 MGMT_STATUS_INVALID_PARAMS
);
4382 hci_smp_irks_clear(hdev
);
4384 for (i
= 0; i
< irk_count
; i
++) {
4385 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4388 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4389 addr_type
= ADDR_LE_DEV_PUBLIC
;
4391 addr_type
= ADDR_LE_DEV_RANDOM
;
4393 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4397 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4399 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4401 hci_dev_unlock(hdev
);
4406 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4408 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4411 switch (key
->addr
.type
) {
4412 case BDADDR_LE_PUBLIC
:
4415 case BDADDR_LE_RANDOM
:
4416 /* Two most significant bits shall be set */
4417 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4425 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4426 void *cp_data
, u16 len
)
4428 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4429 u16 key_count
, expected_len
;
4432 BT_DBG("request for %s", hdev
->name
);
4434 if (!lmp_le_capable(hdev
))
4435 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4436 MGMT_STATUS_NOT_SUPPORTED
);
4438 key_count
= __le16_to_cpu(cp
->key_count
);
4440 expected_len
= sizeof(*cp
) + key_count
*
4441 sizeof(struct mgmt_ltk_info
);
4442 if (expected_len
!= len
) {
4443 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4445 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4446 MGMT_STATUS_INVALID_PARAMS
);
4449 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4451 for (i
= 0; i
< key_count
; i
++) {
4452 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4454 if (!ltk_is_valid(key
))
4455 return cmd_status(sk
, hdev
->id
,
4456 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4457 MGMT_STATUS_INVALID_PARAMS
);
4462 hci_smp_ltks_clear(hdev
);
4464 for (i
= 0; i
< key_count
; i
++) {
4465 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4468 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4469 addr_type
= ADDR_LE_DEV_PUBLIC
;
4471 addr_type
= ADDR_LE_DEV_RANDOM
;
4476 type
= HCI_SMP_LTK_SLAVE
;
4478 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4479 key
->type
, key
->val
, key
->enc_size
, key
->ediv
,
4483 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4486 hci_dev_unlock(hdev
);
4491 static const struct mgmt_handler
{
4492 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4496 } mgmt_handlers
[] = {
4497 { NULL
}, /* 0x0000 (no command) */
4498 { read_version
, false, MGMT_READ_VERSION_SIZE
},
4499 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
4500 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
4501 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
4502 { set_powered
, false, MGMT_SETTING_SIZE
},
4503 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
4504 { set_connectable
, false, MGMT_SETTING_SIZE
},
4505 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
4506 { set_pairable
, false, MGMT_SETTING_SIZE
},
4507 { set_link_security
, false, MGMT_SETTING_SIZE
},
4508 { set_ssp
, false, MGMT_SETTING_SIZE
},
4509 { set_hs
, false, MGMT_SETTING_SIZE
},
4510 { set_le
, false, MGMT_SETTING_SIZE
},
4511 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
4512 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
4513 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
4514 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
4515 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
4516 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
4517 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
4518 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
4519 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
4520 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
4521 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
4522 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
4523 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
4524 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
4525 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
4526 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
4527 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
4528 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
4529 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
4530 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
4531 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
4532 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
4533 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
4534 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
4535 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
4536 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
4537 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
4538 { set_advertising
, false, MGMT_SETTING_SIZE
},
4539 { set_bredr
, false, MGMT_SETTING_SIZE
},
4540 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
4541 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
4542 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
4543 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
4544 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
4545 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
4549 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
4553 struct mgmt_hdr
*hdr
;
4554 u16 opcode
, index
, len
;
4555 struct hci_dev
*hdev
= NULL
;
4556 const struct mgmt_handler
*handler
;
4559 BT_DBG("got %zu bytes", msglen
);
4561 if (msglen
< sizeof(*hdr
))
4564 buf
= kmalloc(msglen
, GFP_KERNEL
);
4568 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
4574 opcode
= __le16_to_cpu(hdr
->opcode
);
4575 index
= __le16_to_cpu(hdr
->index
);
4576 len
= __le16_to_cpu(hdr
->len
);
4578 if (len
!= msglen
- sizeof(*hdr
)) {
4583 if (index
!= MGMT_INDEX_NONE
) {
4584 hdev
= hci_dev_get(index
);
4586 err
= cmd_status(sk
, index
, opcode
,
4587 MGMT_STATUS_INVALID_INDEX
);
4591 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
4592 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4593 err
= cmd_status(sk
, index
, opcode
,
4594 MGMT_STATUS_INVALID_INDEX
);
4599 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
4600 mgmt_handlers
[opcode
].func
== NULL
) {
4601 BT_DBG("Unknown op %u", opcode
);
4602 err
= cmd_status(sk
, index
, opcode
,
4603 MGMT_STATUS_UNKNOWN_COMMAND
);
4607 if ((hdev
&& opcode
< MGMT_OP_READ_INFO
) ||
4608 (!hdev
&& opcode
>= MGMT_OP_READ_INFO
)) {
4609 err
= cmd_status(sk
, index
, opcode
,
4610 MGMT_STATUS_INVALID_INDEX
);
4614 handler
= &mgmt_handlers
[opcode
];
4616 if ((handler
->var_len
&& len
< handler
->data_len
) ||
4617 (!handler
->var_len
&& len
!= handler
->data_len
)) {
4618 err
= cmd_status(sk
, index
, opcode
,
4619 MGMT_STATUS_INVALID_PARAMS
);
4624 mgmt_init_hdev(sk
, hdev
);
4626 cp
= buf
+ sizeof(*hdr
);
4628 err
= handler
->func(sk
, hdev
, cp
, len
);
4642 void mgmt_index_added(struct hci_dev
*hdev
)
4644 if (hdev
->dev_type
!= HCI_BREDR
)
4647 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
4650 void mgmt_index_removed(struct hci_dev
*hdev
)
4652 u8 status
= MGMT_STATUS_INVALID_INDEX
;
4654 if (hdev
->dev_type
!= HCI_BREDR
)
4657 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
4659 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
4662 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
4664 struct cmd_lookup match
= { NULL
, hdev
};
4666 BT_DBG("status 0x%02x", status
);
4670 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
4672 new_settings(hdev
, match
.sk
);
4674 hci_dev_unlock(hdev
);
4680 static int powered_update_hci(struct hci_dev
*hdev
)
4682 struct hci_request req
;
4685 hci_req_init(&req
, hdev
);
4687 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
4688 !lmp_host_ssp_capable(hdev
)) {
4691 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
4694 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
4695 lmp_bredr_capable(hdev
)) {
4696 struct hci_cp_write_le_host_supported cp
;
4699 cp
.simul
= lmp_le_br_capable(hdev
);
4701 /* Check first if we already have the right
4702 * host state (host features set)
4704 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
4705 cp
.simul
!= lmp_host_le_br_capable(hdev
))
4706 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
4710 if (lmp_le_capable(hdev
)) {
4711 /* Make sure the controller has a good default for
4712 * advertising data. This also applies to the case
4713 * where BR/EDR was toggled during the AUTO_OFF phase.
4715 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
4716 update_adv_data(&req
);
4717 update_scan_rsp_data(&req
);
4720 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
4721 enable_advertising(&req
);
4724 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4725 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
4726 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
4727 sizeof(link_sec
), &link_sec
);
4729 if (lmp_bredr_capable(hdev
)) {
4730 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
4731 set_bredr_scan(&req
);
4737 return hci_req_run(&req
, powered_complete
);
4740 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
4742 struct cmd_lookup match
= { NULL
, hdev
};
4743 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
4744 u8 zero_cod
[] = { 0, 0, 0 };
4747 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
4751 if (powered_update_hci(hdev
) == 0)
4754 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
4759 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
4760 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
4762 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
4763 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
4764 zero_cod
, sizeof(zero_cod
), NULL
);
4767 err
= new_settings(hdev
, match
.sk
);
4775 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
4777 struct pending_cmd
*cmd
;
4780 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
4784 if (err
== -ERFKILL
)
4785 status
= MGMT_STATUS_RFKILLED
;
4787 status
= MGMT_STATUS_FAILED
;
4789 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
4791 mgmt_pending_remove(cmd
);
4794 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
4796 struct hci_request req
;
4800 /* When discoverable timeout triggers, then just make sure
4801 * the limited discoverable flag is cleared. Even in the case
4802 * of a timeout triggered from general discoverable, it is
4803 * safe to unconditionally clear the flag.
4805 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
4806 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4808 hci_req_init(&req
, hdev
);
4809 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4810 u8 scan
= SCAN_PAGE
;
4811 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
4812 sizeof(scan
), &scan
);
4815 update_adv_data(&req
);
4816 hci_req_run(&req
, NULL
);
4818 hdev
->discov_timeout
= 0;
4820 new_settings(hdev
, NULL
);
4822 hci_dev_unlock(hdev
);
4825 void mgmt_discoverable(struct hci_dev
*hdev
, u8 discoverable
)
4829 /* Nothing needed here if there's a pending command since that
4830 * commands request completion callback takes care of everything
4833 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
4836 /* Powering off may clear the scan mode - don't let that interfere */
4837 if (!discoverable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
4841 changed
= !test_and_set_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4843 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
4844 changed
= test_and_clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4848 struct hci_request req
;
4850 /* In case this change in discoverable was triggered by
4851 * a disabling of connectable there could be a need to
4852 * update the advertising flags.
4854 hci_req_init(&req
, hdev
);
4855 update_adv_data(&req
);
4856 hci_req_run(&req
, NULL
);
4858 new_settings(hdev
, NULL
);
4862 void mgmt_connectable(struct hci_dev
*hdev
, u8 connectable
)
4866 /* Nothing needed here if there's a pending command since that
4867 * commands request completion callback takes care of everything
4870 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
4873 /* Powering off may clear the scan mode - don't let that interfere */
4874 if (!connectable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
4878 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
4880 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
4883 new_settings(hdev
, NULL
);
4886 void mgmt_advertising(struct hci_dev
*hdev
, u8 advertising
)
4888 /* Powering off may stop advertising - don't let that interfere */
4889 if (!advertising
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
4893 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4895 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4898 void mgmt_write_scan_failed(struct hci_dev
*hdev
, u8 scan
, u8 status
)
4900 u8 mgmt_err
= mgmt_status(status
);
4902 if (scan
& SCAN_PAGE
)
4903 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE
, hdev
,
4904 cmd_status_rsp
, &mgmt_err
);
4906 if (scan
& SCAN_INQUIRY
)
4907 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE
, hdev
,
4908 cmd_status_rsp
, &mgmt_err
);
4911 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
4914 struct mgmt_ev_new_link_key ev
;
4916 memset(&ev
, 0, sizeof(ev
));
4918 ev
.store_hint
= persistent
;
4919 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
4920 ev
.key
.addr
.type
= BDADDR_BREDR
;
4921 ev
.key
.type
= key
->type
;
4922 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
4923 ev
.key
.pin_len
= key
->pin_len
;
4925 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
4928 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
)
4930 struct mgmt_ev_new_long_term_key ev
;
4932 memset(&ev
, 0, sizeof(ev
));
4934 /* Devices using resolvable or non-resolvable random addresses
4935 * without providing an indentity resolving key don't require
4936 * to store long term keys. Their addresses will change the
4939 * Only when a remote device provides an identity address
4940 * make sure the long term key is stored. If the remote
4941 * identity is known, the long term keys are internally
4942 * mapped to the identity address. So allow static random
4943 * and public addresses here.
4945 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
4946 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4947 ev
.store_hint
= 0x00;
4949 ev
.store_hint
= 0x01;
4951 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
4952 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
4953 ev
.key
.type
= key
->authenticated
;
4954 ev
.key
.enc_size
= key
->enc_size
;
4955 ev
.key
.ediv
= key
->ediv
;
4957 if (key
->type
== HCI_SMP_LTK
)
4960 memcpy(ev
.key
.rand
, key
->rand
, sizeof(key
->rand
));
4961 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
4963 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
4966 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
4968 struct mgmt_ev_new_irk ev
;
4970 memset(&ev
, 0, sizeof(ev
));
4972 /* For identity resolving keys from devices that are already
4973 * using a public address or static random address, do not
4974 * ask for storing this key. The identity resolving key really
4975 * is only mandatory for devices using resovlable random
4978 * Storing all identity resolving keys has the downside that
4979 * they will be also loaded on next boot of they system. More
4980 * identity resolving keys, means more time during scanning is
4981 * needed to actually resolve these addresses.
4983 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
4984 ev
.store_hint
= 0x01;
4986 ev
.store_hint
= 0x00;
4988 bacpy(&ev
.rpa
, &irk
->rpa
);
4989 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
4990 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
4991 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
4993 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
4996 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
4999 eir
[eir_len
++] = sizeof(type
) + data_len
;
5000 eir
[eir_len
++] = type
;
5001 memcpy(&eir
[eir_len
], data
, data_len
);
5002 eir_len
+= data_len
;
5007 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5008 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
5012 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
5015 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5016 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5018 ev
->flags
= __cpu_to_le32(flags
);
5021 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
5024 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
5025 eir_len
= eir_append_data(ev
->eir
, eir_len
,
5026 EIR_CLASS_OF_DEV
, dev_class
, 3);
5028 ev
->eir_len
= cpu_to_le16(eir_len
);
5030 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
5031 sizeof(*ev
) + eir_len
, NULL
);
5034 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
5036 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
5037 struct sock
**sk
= data
;
5038 struct mgmt_rp_disconnect rp
;
5040 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5041 rp
.addr
.type
= cp
->addr
.type
;
5043 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
5049 mgmt_pending_remove(cmd
);
5052 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
5054 struct hci_dev
*hdev
= data
;
5055 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
5056 struct mgmt_rp_unpair_device rp
;
5058 memset(&rp
, 0, sizeof(rp
));
5059 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5060 rp
.addr
.type
= cp
->addr
.type
;
5062 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
5064 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
5066 mgmt_pending_remove(cmd
);
5069 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5070 u8 link_type
, u8 addr_type
, u8 reason
,
5071 bool mgmt_connected
)
5073 struct mgmt_ev_device_disconnected ev
;
5074 struct pending_cmd
*power_off
;
5075 struct sock
*sk
= NULL
;
5077 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5079 struct mgmt_mode
*cp
= power_off
->param
;
5081 /* The connection is still in hci_conn_hash so test for 1
5082 * instead of 0 to know if this is the last one.
5084 if (!cp
->val
&& hci_conn_count(hdev
) == 1)
5085 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5088 if (!mgmt_connected
)
5091 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
5094 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
5096 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5097 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5100 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
5105 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5109 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5110 u8 link_type
, u8 addr_type
, u8 status
)
5112 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
5113 struct mgmt_cp_disconnect
*cp
;
5114 struct mgmt_rp_disconnect rp
;
5115 struct pending_cmd
*cmd
;
5117 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5120 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
5126 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
5129 if (cp
->addr
.type
!= bdaddr_type
)
5132 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5133 rp
.addr
.type
= bdaddr_type
;
5135 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
5136 mgmt_status(status
), &rp
, sizeof(rp
));
5138 mgmt_pending_remove(cmd
);
5141 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5142 u8 addr_type
, u8 status
)
5144 struct mgmt_ev_connect_failed ev
;
5146 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5147 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5148 ev
.status
= mgmt_status(status
);
5150 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5153 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
5155 struct mgmt_ev_pin_code_request ev
;
5157 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5158 ev
.addr
.type
= BDADDR_BREDR
;
5161 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
5164 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5167 struct pending_cmd
*cmd
;
5168 struct mgmt_rp_pin_code_reply rp
;
5170 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
5174 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5175 rp
.addr
.type
= BDADDR_BREDR
;
5177 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
5178 mgmt_status(status
), &rp
, sizeof(rp
));
5180 mgmt_pending_remove(cmd
);
5183 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5186 struct pending_cmd
*cmd
;
5187 struct mgmt_rp_pin_code_reply rp
;
5189 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
5193 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5194 rp
.addr
.type
= BDADDR_BREDR
;
5196 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
5197 mgmt_status(status
), &rp
, sizeof(rp
));
5199 mgmt_pending_remove(cmd
);
5202 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5203 u8 link_type
, u8 addr_type
, __le32 value
,
5206 struct mgmt_ev_user_confirm_request ev
;
5208 BT_DBG("%s", hdev
->name
);
5210 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5211 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5212 ev
.confirm_hint
= confirm_hint
;
5215 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
5219 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5220 u8 link_type
, u8 addr_type
)
5222 struct mgmt_ev_user_passkey_request ev
;
5224 BT_DBG("%s", hdev
->name
);
5226 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5227 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5229 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
5233 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5234 u8 link_type
, u8 addr_type
, u8 status
,
5237 struct pending_cmd
*cmd
;
5238 struct mgmt_rp_user_confirm_reply rp
;
5241 cmd
= mgmt_pending_find(opcode
, hdev
);
5245 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5246 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5247 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
5250 mgmt_pending_remove(cmd
);
5255 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5256 u8 link_type
, u8 addr_type
, u8 status
)
5258 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5259 status
, MGMT_OP_USER_CONFIRM_REPLY
);
5262 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5263 u8 link_type
, u8 addr_type
, u8 status
)
5265 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5267 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
5270 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5271 u8 link_type
, u8 addr_type
, u8 status
)
5273 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5274 status
, MGMT_OP_USER_PASSKEY_REPLY
);
5277 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5278 u8 link_type
, u8 addr_type
, u8 status
)
5280 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5282 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
5285 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5286 u8 link_type
, u8 addr_type
, u32 passkey
,
5289 struct mgmt_ev_passkey_notify ev
;
5291 BT_DBG("%s", hdev
->name
);
5293 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5294 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5295 ev
.passkey
= __cpu_to_le32(passkey
);
5296 ev
.entered
= entered
;
5298 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
5301 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5302 u8 addr_type
, u8 status
)
5304 struct mgmt_ev_auth_failed ev
;
5306 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5307 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5308 ev
.status
= mgmt_status(status
);
5310 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5313 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
5315 struct cmd_lookup match
= { NULL
, hdev
};
5319 u8 mgmt_err
= mgmt_status(status
);
5320 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
5321 cmd_status_rsp
, &mgmt_err
);
5325 if (test_bit(HCI_AUTH
, &hdev
->flags
))
5326 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
5329 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
5332 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
5336 new_settings(hdev
, match
.sk
);
5342 static void clear_eir(struct hci_request
*req
)
5344 struct hci_dev
*hdev
= req
->hdev
;
5345 struct hci_cp_write_eir cp
;
5347 if (!lmp_ext_inq_capable(hdev
))
5350 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
5352 memset(&cp
, 0, sizeof(cp
));
5354 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
5357 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
5359 struct cmd_lookup match
= { NULL
, hdev
};
5360 struct hci_request req
;
5361 bool changed
= false;
5364 u8 mgmt_err
= mgmt_status(status
);
5366 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
5367 &hdev
->dev_flags
)) {
5368 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
5369 new_settings(hdev
, NULL
);
5372 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
5378 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
5380 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
5382 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
5385 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
5388 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
5391 new_settings(hdev
, match
.sk
);
5396 hci_req_init(&req
, hdev
);
5398 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
5403 hci_req_run(&req
, NULL
);
5406 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
5408 struct cmd_lookup match
= { NULL
, hdev
};
5409 bool changed
= false;
5412 u8 mgmt_err
= mgmt_status(status
);
5415 if (test_and_clear_bit(HCI_SC_ENABLED
,
5417 new_settings(hdev
, NULL
);
5418 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
5421 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
5422 cmd_status_rsp
, &mgmt_err
);
5427 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
5429 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
5430 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
5433 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
5434 settings_rsp
, &match
);
5437 new_settings(hdev
, match
.sk
);
5443 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
5445 struct cmd_lookup
*match
= data
;
5447 if (match
->sk
== NULL
) {
5448 match
->sk
= cmd
->sk
;
5449 sock_hold(match
->sk
);
5453 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
5456 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
5458 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
5459 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
5460 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
5463 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
5470 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
5472 struct mgmt_cp_set_local_name ev
;
5473 struct pending_cmd
*cmd
;
5478 memset(&ev
, 0, sizeof(ev
));
5479 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
5480 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
5482 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
5484 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
5486 /* If this is a HCI command related to powering on the
5487 * HCI dev don't send any mgmt signals.
5489 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5493 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
5494 cmd
? cmd
->sk
: NULL
);
5497 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
5498 u8
*randomizer192
, u8
*hash256
,
5499 u8
*randomizer256
, u8 status
)
5501 struct pending_cmd
*cmd
;
5503 BT_DBG("%s status %u", hdev
->name
, status
);
5505 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
5510 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5511 mgmt_status(status
));
5513 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
5514 hash256
&& randomizer256
) {
5515 struct mgmt_rp_read_local_oob_ext_data rp
;
5517 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
5518 memcpy(rp
.randomizer192
, randomizer192
,
5519 sizeof(rp
.randomizer192
));
5521 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
5522 memcpy(rp
.randomizer256
, randomizer256
,
5523 sizeof(rp
.randomizer256
));
5525 cmd_complete(cmd
->sk
, hdev
->id
,
5526 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
5529 struct mgmt_rp_read_local_oob_data rp
;
5531 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
5532 memcpy(rp
.randomizer
, randomizer192
,
5533 sizeof(rp
.randomizer
));
5535 cmd_complete(cmd
->sk
, hdev
->id
,
5536 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
5541 mgmt_pending_remove(cmd
);
5544 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5545 u8 addr_type
, u8
*dev_class
, s8 rssi
, u8 cfm_name
, u8
5546 ssp
, u8
*eir
, u16 eir_len
)
5549 struct mgmt_ev_device_found
*ev
= (void *) buf
;
5550 struct smp_irk
*irk
;
5553 if (!hci_discovery_active(hdev
))
5556 /* Leave 5 bytes for a potential CoD field */
5557 if (sizeof(*ev
) + eir_len
+ 5 > sizeof(buf
))
5560 memset(buf
, 0, sizeof(buf
));
5562 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
5564 bacpy(&ev
->addr
.bdaddr
, &irk
->bdaddr
);
5565 ev
->addr
.type
= link_to_bdaddr(link_type
, irk
->addr_type
);
5567 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5568 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5573 ev
->flags
|= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME
);
5575 ev
->flags
|= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING
);
5578 memcpy(ev
->eir
, eir
, eir_len
);
5580 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
5581 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
5584 ev
->eir_len
= cpu_to_le16(eir_len
);
5585 ev_size
= sizeof(*ev
) + eir_len
;
5587 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
5590 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5591 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
5593 struct mgmt_ev_device_found
*ev
;
5594 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
5597 ev
= (struct mgmt_ev_device_found
*) buf
;
5599 memset(buf
, 0, sizeof(buf
));
5601 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5602 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5605 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
5608 ev
->eir_len
= cpu_to_le16(eir_len
);
5610 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
5613 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
5615 struct mgmt_ev_discovering ev
;
5616 struct pending_cmd
*cmd
;
5618 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
5621 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
5623 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
5626 u8 type
= hdev
->discovery
.type
;
5628 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
5630 mgmt_pending_remove(cmd
);
5633 memset(&ev
, 0, sizeof(ev
));
5634 ev
.type
= hdev
->discovery
.type
;
5635 ev
.discovering
= discovering
;
5637 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
5640 int mgmt_device_blocked(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
5642 struct pending_cmd
*cmd
;
5643 struct mgmt_ev_device_blocked ev
;
5645 cmd
= mgmt_pending_find(MGMT_OP_BLOCK_DEVICE
, hdev
);
5647 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5648 ev
.addr
.type
= type
;
5650 return mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &ev
, sizeof(ev
),
5651 cmd
? cmd
->sk
: NULL
);
5654 int mgmt_device_unblocked(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
5656 struct pending_cmd
*cmd
;
5657 struct mgmt_ev_device_unblocked ev
;
5659 cmd
= mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE
, hdev
);
5661 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5662 ev
.addr
.type
= type
;
5664 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &ev
, sizeof(ev
),
5665 cmd
? cmd
->sk
: NULL
);
5668 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
5670 BT_DBG("%s status %u", hdev
->name
, status
);
5672 /* Clear the advertising mgmt setting if we failed to re-enable it */
5674 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5675 new_settings(hdev
, NULL
);
5679 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
5681 struct hci_request req
;
5683 if (hci_conn_num(hdev
, LE_LINK
) > 0)
5686 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5689 hci_req_init(&req
, hdev
);
5690 enable_advertising(&req
);
5692 /* If this fails we have no option but to let user space know
5693 * that we've disabled advertising.
5695 if (hci_req_run(&req
, adv_enable_complete
) < 0) {
5696 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5697 new_settings(hdev
, NULL
);