2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands
[] = {
41 MGMT_OP_READ_INDEX_LIST
,
44 MGMT_OP_SET_DISCOVERABLE
,
45 MGMT_OP_SET_CONNECTABLE
,
46 MGMT_OP_SET_FAST_CONNECTABLE
,
48 MGMT_OP_SET_LINK_SECURITY
,
52 MGMT_OP_SET_DEV_CLASS
,
53 MGMT_OP_SET_LOCAL_NAME
,
56 MGMT_OP_LOAD_LINK_KEYS
,
57 MGMT_OP_LOAD_LONG_TERM_KEYS
,
59 MGMT_OP_GET_CONNECTIONS
,
60 MGMT_OP_PIN_CODE_REPLY
,
61 MGMT_OP_PIN_CODE_NEG_REPLY
,
62 MGMT_OP_SET_IO_CAPABILITY
,
64 MGMT_OP_CANCEL_PAIR_DEVICE
,
65 MGMT_OP_UNPAIR_DEVICE
,
66 MGMT_OP_USER_CONFIRM_REPLY
,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
68 MGMT_OP_USER_PASSKEY_REPLY
,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
70 MGMT_OP_READ_LOCAL_OOB_DATA
,
71 MGMT_OP_ADD_REMOTE_OOB_DATA
,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
73 MGMT_OP_START_DISCOVERY
,
74 MGMT_OP_STOP_DISCOVERY
,
77 MGMT_OP_UNBLOCK_DEVICE
,
78 MGMT_OP_SET_DEVICE_ID
,
79 MGMT_OP_SET_ADVERTISING
,
81 MGMT_OP_SET_STATIC_ADDRESS
,
82 MGMT_OP_SET_SCAN_PARAMS
,
83 MGMT_OP_SET_SECURE_CONN
,
84 MGMT_OP_SET_DEBUG_KEYS
,
87 MGMT_OP_GET_CONN_INFO
,
88 MGMT_OP_GET_CLOCK_INFO
,
90 MGMT_OP_REMOVE_DEVICE
,
91 MGMT_OP_LOAD_CONN_PARAM
,
94 static const u16 mgmt_events
[] = {
95 MGMT_EV_CONTROLLER_ERROR
,
97 MGMT_EV_INDEX_REMOVED
,
99 MGMT_EV_CLASS_OF_DEV_CHANGED
,
100 MGMT_EV_LOCAL_NAME_CHANGED
,
101 MGMT_EV_NEW_LINK_KEY
,
102 MGMT_EV_NEW_LONG_TERM_KEY
,
103 MGMT_EV_DEVICE_CONNECTED
,
104 MGMT_EV_DEVICE_DISCONNECTED
,
105 MGMT_EV_CONNECT_FAILED
,
106 MGMT_EV_PIN_CODE_REQUEST
,
107 MGMT_EV_USER_CONFIRM_REQUEST
,
108 MGMT_EV_USER_PASSKEY_REQUEST
,
110 MGMT_EV_DEVICE_FOUND
,
112 MGMT_EV_DEVICE_BLOCKED
,
113 MGMT_EV_DEVICE_UNBLOCKED
,
114 MGMT_EV_DEVICE_UNPAIRED
,
115 MGMT_EV_PASSKEY_NOTIFY
,
118 MGMT_EV_DEVICE_ADDED
,
119 MGMT_EV_DEVICE_REMOVED
,
120 MGMT_EV_NEW_CONN_PARAM
,
123 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
125 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
126 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
129 struct list_head list
;
137 /* HCI to MGMT error code conversion table */
138 static u8 mgmt_status_table
[] = {
140 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
141 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
142 MGMT_STATUS_FAILED
, /* Hardware Failure */
143 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
144 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
145 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
146 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
147 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
148 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
149 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
150 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
151 MGMT_STATUS_BUSY
, /* Command Disallowed */
152 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
153 MGMT_STATUS_REJECTED
, /* Rejected Security */
154 MGMT_STATUS_REJECTED
, /* Rejected Personal */
155 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
156 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
157 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
158 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
159 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
160 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
161 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
162 MGMT_STATUS_BUSY
, /* Repeated Attempts */
163 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
164 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
165 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
166 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
167 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
168 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
169 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
170 MGMT_STATUS_FAILED
, /* Unspecified Error */
171 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
172 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
173 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
174 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
175 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
176 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
177 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
178 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
179 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
180 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
181 MGMT_STATUS_FAILED
, /* Transaction Collision */
182 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
183 MGMT_STATUS_REJECTED
, /* QoS Rejected */
184 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
185 MGMT_STATUS_REJECTED
, /* Insufficient Security */
186 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
187 MGMT_STATUS_BUSY
, /* Role Switch Pending */
188 MGMT_STATUS_FAILED
, /* Slot Violation */
189 MGMT_STATUS_FAILED
, /* Role Switch Failed */
190 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
191 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
192 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
193 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
194 MGMT_STATUS_BUSY
, /* Controller Busy */
195 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
196 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
197 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
198 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
199 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
202 static u8
mgmt_status(u8 hci_status
)
204 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
205 return mgmt_status_table
[hci_status
];
207 return MGMT_STATUS_FAILED
;
210 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
213 struct mgmt_hdr
*hdr
;
214 struct mgmt_ev_cmd_status
*ev
;
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
219 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
223 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
225 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
226 hdr
->index
= cpu_to_le16(index
);
227 hdr
->len
= cpu_to_le16(sizeof(*ev
));
229 ev
= (void *) skb_put(skb
, sizeof(*ev
));
231 ev
->opcode
= cpu_to_le16(cmd
);
233 err
= sock_queue_rcv_skb(sk
, skb
);
240 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
241 void *rp
, size_t rp_len
)
244 struct mgmt_hdr
*hdr
;
245 struct mgmt_ev_cmd_complete
*ev
;
248 BT_DBG("sock %p", sk
);
250 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
254 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
256 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
257 hdr
->index
= cpu_to_le16(index
);
258 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
260 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
261 ev
->opcode
= cpu_to_le16(cmd
);
265 memcpy(ev
->data
, rp
, rp_len
);
267 err
= sock_queue_rcv_skb(sk
, skb
);
274 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
277 struct mgmt_rp_read_version rp
;
279 BT_DBG("sock %p", sk
);
281 rp
.version
= MGMT_VERSION
;
282 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
284 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
288 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
291 struct mgmt_rp_read_commands
*rp
;
292 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
293 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
298 BT_DBG("sock %p", sk
);
300 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
302 rp
= kmalloc(rp_size
, GFP_KERNEL
);
306 rp
->num_commands
= cpu_to_le16(num_commands
);
307 rp
->num_events
= cpu_to_le16(num_events
);
309 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
310 put_unaligned_le16(mgmt_commands
[i
], opcode
);
312 for (i
= 0; i
< num_events
; i
++, opcode
++)
313 put_unaligned_le16(mgmt_events
[i
], opcode
);
315 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
322 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
325 struct mgmt_rp_read_index_list
*rp
;
331 BT_DBG("sock %p", sk
);
333 read_lock(&hci_dev_list_lock
);
336 list_for_each_entry(d
, &hci_dev_list
, list
) {
337 if (d
->dev_type
== HCI_BREDR
)
341 rp_len
= sizeof(*rp
) + (2 * count
);
342 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
344 read_unlock(&hci_dev_list_lock
);
349 list_for_each_entry(d
, &hci_dev_list
, list
) {
350 if (test_bit(HCI_SETUP
, &d
->dev_flags
))
353 if (test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
356 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
359 if (d
->dev_type
== HCI_BREDR
) {
360 rp
->index
[count
++] = cpu_to_le16(d
->id
);
361 BT_DBG("Added hci%u", d
->id
);
365 rp
->num_controllers
= cpu_to_le16(count
);
366 rp_len
= sizeof(*rp
) + (2 * count
);
368 read_unlock(&hci_dev_list_lock
);
370 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
378 static u32
get_supported_settings(struct hci_dev
*hdev
)
382 settings
|= MGMT_SETTING_POWERED
;
383 settings
|= MGMT_SETTING_PAIRABLE
;
384 settings
|= MGMT_SETTING_DEBUG_KEYS
;
386 if (lmp_bredr_capable(hdev
)) {
387 settings
|= MGMT_SETTING_CONNECTABLE
;
388 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
389 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
390 settings
|= MGMT_SETTING_DISCOVERABLE
;
391 settings
|= MGMT_SETTING_BREDR
;
392 settings
|= MGMT_SETTING_LINK_SECURITY
;
394 if (lmp_ssp_capable(hdev
)) {
395 settings
|= MGMT_SETTING_SSP
;
396 settings
|= MGMT_SETTING_HS
;
399 if (lmp_sc_capable(hdev
) ||
400 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
401 settings
|= MGMT_SETTING_SECURE_CONN
;
404 if (lmp_le_capable(hdev
)) {
405 settings
|= MGMT_SETTING_LE
;
406 settings
|= MGMT_SETTING_ADVERTISING
;
407 settings
|= MGMT_SETTING_PRIVACY
;
413 static u32
get_current_settings(struct hci_dev
*hdev
)
417 if (hdev_is_powered(hdev
))
418 settings
|= MGMT_SETTING_POWERED
;
420 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
421 settings
|= MGMT_SETTING_CONNECTABLE
;
423 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
424 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
426 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
427 settings
|= MGMT_SETTING_DISCOVERABLE
;
429 if (test_bit(HCI_PAIRABLE
, &hdev
->dev_flags
))
430 settings
|= MGMT_SETTING_PAIRABLE
;
432 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
433 settings
|= MGMT_SETTING_BREDR
;
435 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
436 settings
|= MGMT_SETTING_LE
;
438 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
439 settings
|= MGMT_SETTING_LINK_SECURITY
;
441 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
442 settings
|= MGMT_SETTING_SSP
;
444 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
445 settings
|= MGMT_SETTING_HS
;
447 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
448 settings
|= MGMT_SETTING_ADVERTISING
;
450 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
451 settings
|= MGMT_SETTING_SECURE_CONN
;
453 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
454 settings
|= MGMT_SETTING_DEBUG_KEYS
;
456 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
457 settings
|= MGMT_SETTING_PRIVACY
;
462 #define PNP_INFO_SVCLASS_ID 0x1200
464 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
466 u8
*ptr
= data
, *uuids_start
= NULL
;
467 struct bt_uuid
*uuid
;
472 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
475 if (uuid
->size
!= 16)
478 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
482 if (uuid16
== PNP_INFO_SVCLASS_ID
)
488 uuids_start
[1] = EIR_UUID16_ALL
;
492 /* Stop if not enough space to put next UUID */
493 if ((ptr
- data
) + sizeof(u16
) > len
) {
494 uuids_start
[1] = EIR_UUID16_SOME
;
498 *ptr
++ = (uuid16
& 0x00ff);
499 *ptr
++ = (uuid16
& 0xff00) >> 8;
500 uuids_start
[0] += sizeof(uuid16
);
506 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
508 u8
*ptr
= data
, *uuids_start
= NULL
;
509 struct bt_uuid
*uuid
;
514 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
515 if (uuid
->size
!= 32)
521 uuids_start
[1] = EIR_UUID32_ALL
;
525 /* Stop if not enough space to put next UUID */
526 if ((ptr
- data
) + sizeof(u32
) > len
) {
527 uuids_start
[1] = EIR_UUID32_SOME
;
531 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
533 uuids_start
[0] += sizeof(u32
);
539 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
541 u8
*ptr
= data
, *uuids_start
= NULL
;
542 struct bt_uuid
*uuid
;
547 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
548 if (uuid
->size
!= 128)
554 uuids_start
[1] = EIR_UUID128_ALL
;
558 /* Stop if not enough space to put next UUID */
559 if ((ptr
- data
) + 16 > len
) {
560 uuids_start
[1] = EIR_UUID128_SOME
;
564 memcpy(ptr
, uuid
->uuid
, 16);
566 uuids_start
[0] += 16;
572 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
574 struct pending_cmd
*cmd
;
576 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
577 if (cmd
->opcode
== opcode
)
584 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
585 struct hci_dev
*hdev
,
588 struct pending_cmd
*cmd
;
590 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
591 if (cmd
->user_data
!= data
)
593 if (cmd
->opcode
== opcode
)
600 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
605 name_len
= strlen(hdev
->dev_name
);
607 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
609 if (name_len
> max_len
) {
611 ptr
[1] = EIR_NAME_SHORT
;
613 ptr
[1] = EIR_NAME_COMPLETE
;
615 ptr
[0] = name_len
+ 1;
617 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
619 ad_len
+= (name_len
+ 2);
620 ptr
+= (name_len
+ 2);
626 static void update_scan_rsp_data(struct hci_request
*req
)
628 struct hci_dev
*hdev
= req
->hdev
;
629 struct hci_cp_le_set_scan_rsp_data cp
;
632 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
635 memset(&cp
, 0, sizeof(cp
));
637 len
= create_scan_rsp_data(hdev
, cp
.data
);
639 if (hdev
->scan_rsp_data_len
== len
&&
640 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
643 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
644 hdev
->scan_rsp_data_len
= len
;
648 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
651 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
653 struct pending_cmd
*cmd
;
655 /* If there's a pending mgmt command the flags will not yet have
656 * their final values, so check for this first.
658 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
660 struct mgmt_mode
*cp
= cmd
->param
;
662 return LE_AD_GENERAL
;
663 else if (cp
->val
== 0x02)
664 return LE_AD_LIMITED
;
666 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
667 return LE_AD_LIMITED
;
668 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
669 return LE_AD_GENERAL
;
675 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
677 u8 ad_len
= 0, flags
= 0;
679 flags
|= get_adv_discov_flags(hdev
);
681 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
682 flags
|= LE_AD_NO_BREDR
;
685 BT_DBG("adv flags 0x%02x", flags
);
695 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
697 ptr
[1] = EIR_TX_POWER
;
698 ptr
[2] = (u8
) hdev
->adv_tx_power
;
707 static void update_adv_data(struct hci_request
*req
)
709 struct hci_dev
*hdev
= req
->hdev
;
710 struct hci_cp_le_set_adv_data cp
;
713 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
716 memset(&cp
, 0, sizeof(cp
));
718 len
= create_adv_data(hdev
, cp
.data
);
720 if (hdev
->adv_data_len
== len
&&
721 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
724 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
725 hdev
->adv_data_len
= len
;
729 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
732 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
737 name_len
= strlen(hdev
->dev_name
);
743 ptr
[1] = EIR_NAME_SHORT
;
745 ptr
[1] = EIR_NAME_COMPLETE
;
747 /* EIR Data length */
748 ptr
[0] = name_len
+ 1;
750 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
752 ptr
+= (name_len
+ 2);
755 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
757 ptr
[1] = EIR_TX_POWER
;
758 ptr
[2] = (u8
) hdev
->inq_tx_power
;
763 if (hdev
->devid_source
> 0) {
765 ptr
[1] = EIR_DEVICE_ID
;
767 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
768 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
769 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
770 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
775 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
776 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
777 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
780 static void update_eir(struct hci_request
*req
)
782 struct hci_dev
*hdev
= req
->hdev
;
783 struct hci_cp_write_eir cp
;
785 if (!hdev_is_powered(hdev
))
788 if (!lmp_ext_inq_capable(hdev
))
791 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
794 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
797 memset(&cp
, 0, sizeof(cp
));
799 create_eir(hdev
, cp
.data
);
801 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
804 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
806 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
809 static u8
get_service_classes(struct hci_dev
*hdev
)
811 struct bt_uuid
*uuid
;
814 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
815 val
|= uuid
->svc_hint
;
820 static void update_class(struct hci_request
*req
)
822 struct hci_dev
*hdev
= req
->hdev
;
825 BT_DBG("%s", hdev
->name
);
827 if (!hdev_is_powered(hdev
))
830 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
833 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
836 cod
[0] = hdev
->minor_class
;
837 cod
[1] = hdev
->major_class
;
838 cod
[2] = get_service_classes(hdev
);
840 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
843 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
846 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
849 static bool get_connectable(struct hci_dev
*hdev
)
851 struct pending_cmd
*cmd
;
853 /* If there's a pending mgmt command the flag will not yet have
854 * it's final value, so check for this first.
856 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
858 struct mgmt_mode
*cp
= cmd
->param
;
862 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
865 static void enable_advertising(struct hci_request
*req
)
867 struct hci_dev
*hdev
= req
->hdev
;
868 struct hci_cp_le_set_adv_param cp
;
869 u8 own_addr_type
, enable
= 0x01;
872 /* Clear the HCI_ADVERTISING bit temporarily so that the
873 * hci_update_random_address knows that it's safe to go ahead
874 * and write a new random address. The flag will be set back on
875 * as soon as the SET_ADV_ENABLE HCI command completes.
877 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
879 connectable
= get_connectable(hdev
);
881 /* Set require_privacy to true only when non-connectable
882 * advertising is used. In that case it is fine to use a
883 * non-resolvable private address.
885 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
888 memset(&cp
, 0, sizeof(cp
));
889 cp
.min_interval
= cpu_to_le16(0x0800);
890 cp
.max_interval
= cpu_to_le16(0x0800);
891 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
892 cp
.own_address_type
= own_addr_type
;
893 cp
.channel_map
= hdev
->le_adv_channel_map
;
895 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
897 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
900 static void disable_advertising(struct hci_request
*req
)
904 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
907 static void service_cache_off(struct work_struct
*work
)
909 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
911 struct hci_request req
;
913 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
916 hci_req_init(&req
, hdev
);
923 hci_dev_unlock(hdev
);
925 hci_req_run(&req
, NULL
);
928 static void rpa_expired(struct work_struct
*work
)
930 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
932 struct hci_request req
;
936 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
938 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) ||
939 hci_conn_num(hdev
, LE_LINK
) > 0)
942 /* The generation of a new RPA and programming it into the
943 * controller happens in the enable_advertising() function.
946 hci_req_init(&req
, hdev
);
948 disable_advertising(&req
);
949 enable_advertising(&req
);
951 hci_req_run(&req
, NULL
);
954 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
956 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
959 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
960 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
962 /* Non-mgmt controlled devices get this bit set
963 * implicitly so that pairing works for them, however
964 * for mgmt we require user-space to explicitly enable
967 clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
970 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
971 void *data
, u16 data_len
)
973 struct mgmt_rp_read_info rp
;
975 BT_DBG("sock %p %s", sk
, hdev
->name
);
979 memset(&rp
, 0, sizeof(rp
));
981 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
983 rp
.version
= hdev
->hci_ver
;
984 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
986 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
987 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
989 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
991 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
992 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
994 hci_dev_unlock(hdev
);
996 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1000 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1007 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1008 struct hci_dev
*hdev
, void *data
,
1011 struct pending_cmd
*cmd
;
1013 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1017 cmd
->opcode
= opcode
;
1018 cmd
->index
= hdev
->id
;
1020 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
1027 memcpy(cmd
->param
, data
, len
);
1032 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1037 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1038 void (*cb
)(struct pending_cmd
*cmd
,
1042 struct pending_cmd
*cmd
, *tmp
;
1044 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1045 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1052 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1054 list_del(&cmd
->list
);
1055 mgmt_pending_free(cmd
);
1058 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1060 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1062 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1066 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1068 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1070 if (hci_conn_count(hdev
) == 0) {
1071 cancel_delayed_work(&hdev
->power_off
);
1072 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1076 static void hci_stop_discovery(struct hci_request
*req
)
1078 struct hci_dev
*hdev
= req
->hdev
;
1079 struct hci_cp_remote_name_req_cancel cp
;
1080 struct inquiry_entry
*e
;
1082 switch (hdev
->discovery
.state
) {
1083 case DISCOVERY_FINDING
:
1084 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1085 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1087 cancel_delayed_work(&hdev
->le_scan_disable
);
1088 hci_req_add_le_scan_disable(req
);
1093 case DISCOVERY_RESOLVING
:
1094 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1099 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1100 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1106 /* Passive scanning */
1107 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1108 hci_req_add_le_scan_disable(req
);
1113 static int clean_up_hci_state(struct hci_dev
*hdev
)
1115 struct hci_request req
;
1116 struct hci_conn
*conn
;
1118 hci_req_init(&req
, hdev
);
1120 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1121 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1123 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1126 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1127 disable_advertising(&req
);
1129 hci_stop_discovery(&req
);
1131 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1132 struct hci_cp_disconnect dc
;
1133 struct hci_cp_reject_conn_req rej
;
1135 switch (conn
->state
) {
1138 dc
.handle
= cpu_to_le16(conn
->handle
);
1139 dc
.reason
= 0x15; /* Terminated due to Power Off */
1140 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1143 if (conn
->type
== LE_LINK
)
1144 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1146 else if (conn
->type
== ACL_LINK
)
1147 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1151 bacpy(&rej
.bdaddr
, &conn
->dst
);
1152 rej
.reason
= 0x15; /* Terminated due to Power Off */
1153 if (conn
->type
== ACL_LINK
)
1154 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1156 else if (conn
->type
== SCO_LINK
)
1157 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1163 return hci_req_run(&req
, clean_up_hci_complete
);
1166 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1169 struct mgmt_mode
*cp
= data
;
1170 struct pending_cmd
*cmd
;
1173 BT_DBG("request for %s", hdev
->name
);
1175 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1176 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1177 MGMT_STATUS_INVALID_PARAMS
);
1181 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1182 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1187 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1188 cancel_delayed_work(&hdev
->power_off
);
1191 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1193 err
= mgmt_powered(hdev
, 1);
1198 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1199 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1203 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1210 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1213 /* Disconnect connections, stop scans, etc */
1214 err
= clean_up_hci_state(hdev
);
1216 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1217 HCI_POWER_OFF_TIMEOUT
);
1219 /* ENODATA means there were no HCI commands queued */
1220 if (err
== -ENODATA
) {
1221 cancel_delayed_work(&hdev
->power_off
);
1222 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1228 hci_dev_unlock(hdev
);
1232 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
1233 struct sock
*skip_sk
)
1235 struct sk_buff
*skb
;
1236 struct mgmt_hdr
*hdr
;
1238 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
1242 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
1243 hdr
->opcode
= cpu_to_le16(event
);
1245 hdr
->index
= cpu_to_le16(hdev
->id
);
1247 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
1248 hdr
->len
= cpu_to_le16(data_len
);
1251 memcpy(skb_put(skb
, data_len
), data
, data_len
);
1254 __net_timestamp(skb
);
1256 hci_send_to_control(skb
, skip_sk
);
1262 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1266 ev
= cpu_to_le32(get_current_settings(hdev
));
1268 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1273 struct hci_dev
*hdev
;
1277 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1279 struct cmd_lookup
*match
= data
;
1281 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1283 list_del(&cmd
->list
);
1285 if (match
->sk
== NULL
) {
1286 match
->sk
= cmd
->sk
;
1287 sock_hold(match
->sk
);
1290 mgmt_pending_free(cmd
);
1293 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1297 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1298 mgmt_pending_remove(cmd
);
1301 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1303 if (!lmp_bredr_capable(hdev
))
1304 return MGMT_STATUS_NOT_SUPPORTED
;
1305 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1306 return MGMT_STATUS_REJECTED
;
1308 return MGMT_STATUS_SUCCESS
;
1311 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1313 if (!lmp_le_capable(hdev
))
1314 return MGMT_STATUS_NOT_SUPPORTED
;
1315 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1316 return MGMT_STATUS_REJECTED
;
1318 return MGMT_STATUS_SUCCESS
;
1321 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1323 struct pending_cmd
*cmd
;
1324 struct mgmt_mode
*cp
;
1325 struct hci_request req
;
1328 BT_DBG("status 0x%02x", status
);
1332 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1337 u8 mgmt_err
= mgmt_status(status
);
1338 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1339 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1345 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1348 if (hdev
->discov_timeout
> 0) {
1349 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1350 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1354 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1358 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1361 new_settings(hdev
, cmd
->sk
);
1363 /* When the discoverable mode gets changed, make sure
1364 * that class of device has the limited discoverable
1365 * bit correctly set.
1367 hci_req_init(&req
, hdev
);
1369 hci_req_run(&req
, NULL
);
1372 mgmt_pending_remove(cmd
);
1375 hci_dev_unlock(hdev
);
1378 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1381 struct mgmt_cp_set_discoverable
*cp
= data
;
1382 struct pending_cmd
*cmd
;
1383 struct hci_request req
;
1388 BT_DBG("request for %s", hdev
->name
);
1390 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1391 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1392 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1393 MGMT_STATUS_REJECTED
);
1395 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1396 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1397 MGMT_STATUS_INVALID_PARAMS
);
1399 timeout
= __le16_to_cpu(cp
->timeout
);
1401 /* Disabling discoverable requires that no timeout is set,
1402 * and enabling limited discoverable requires a timeout.
1404 if ((cp
->val
== 0x00 && timeout
> 0) ||
1405 (cp
->val
== 0x02 && timeout
== 0))
1406 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1407 MGMT_STATUS_INVALID_PARAMS
);
1411 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1412 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1413 MGMT_STATUS_NOT_POWERED
);
1417 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1418 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1419 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1424 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1425 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1426 MGMT_STATUS_REJECTED
);
1430 if (!hdev_is_powered(hdev
)) {
1431 bool changed
= false;
1433 /* Setting limited discoverable when powered off is
1434 * not a valid operation since it requires a timeout
1435 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1437 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1438 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1442 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1447 err
= new_settings(hdev
, sk
);
1452 /* If the current mode is the same, then just update the timeout
1453 * value with the new value. And if only the timeout gets updated,
1454 * then no need for any HCI transactions.
1456 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1457 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1458 &hdev
->dev_flags
)) {
1459 cancel_delayed_work(&hdev
->discov_off
);
1460 hdev
->discov_timeout
= timeout
;
1462 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1463 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1464 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1468 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1472 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1478 /* Cancel any potential discoverable timeout that might be
1479 * still active and store new timeout value. The arming of
1480 * the timeout happens in the complete handler.
1482 cancel_delayed_work(&hdev
->discov_off
);
1483 hdev
->discov_timeout
= timeout
;
1485 /* Limited discoverable mode */
1486 if (cp
->val
== 0x02)
1487 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1489 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1491 hci_req_init(&req
, hdev
);
1493 /* The procedure for LE-only controllers is much simpler - just
1494 * update the advertising data.
1496 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1502 struct hci_cp_write_current_iac_lap hci_cp
;
1504 if (cp
->val
== 0x02) {
1505 /* Limited discoverable mode */
1506 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1507 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1508 hci_cp
.iac_lap
[1] = 0x8b;
1509 hci_cp
.iac_lap
[2] = 0x9e;
1510 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1511 hci_cp
.iac_lap
[4] = 0x8b;
1512 hci_cp
.iac_lap
[5] = 0x9e;
1514 /* General discoverable mode */
1516 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1517 hci_cp
.iac_lap
[1] = 0x8b;
1518 hci_cp
.iac_lap
[2] = 0x9e;
1521 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1522 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1524 scan
|= SCAN_INQUIRY
;
1526 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1529 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1532 update_adv_data(&req
);
1534 err
= hci_req_run(&req
, set_discoverable_complete
);
1536 mgmt_pending_remove(cmd
);
1539 hci_dev_unlock(hdev
);
1543 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1545 struct hci_dev
*hdev
= req
->hdev
;
1546 struct hci_cp_write_page_scan_activity acp
;
1549 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1552 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1556 type
= PAGE_SCAN_TYPE_INTERLACED
;
1558 /* 160 msec page scan interval */
1559 acp
.interval
= cpu_to_le16(0x0100);
1561 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1563 /* default 1.28 sec page scan */
1564 acp
.interval
= cpu_to_le16(0x0800);
1567 acp
.window
= cpu_to_le16(0x0012);
1569 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1570 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1571 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1574 if (hdev
->page_scan_type
!= type
)
1575 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1578 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1580 struct pending_cmd
*cmd
;
1581 struct mgmt_mode
*cp
;
1584 BT_DBG("status 0x%02x", status
);
1588 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1593 u8 mgmt_err
= mgmt_status(status
);
1594 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1600 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1602 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1604 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1607 new_settings(hdev
, cmd
->sk
);
1610 mgmt_pending_remove(cmd
);
1613 hci_dev_unlock(hdev
);
1616 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1617 struct sock
*sk
, u8 val
)
1619 bool changed
= false;
1622 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1626 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1628 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1629 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1632 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1637 return new_settings(hdev
, sk
);
1642 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1645 struct mgmt_mode
*cp
= data
;
1646 struct pending_cmd
*cmd
;
1647 struct hci_request req
;
1651 BT_DBG("request for %s", hdev
->name
);
1653 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1654 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1655 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1656 MGMT_STATUS_REJECTED
);
1658 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1659 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1660 MGMT_STATUS_INVALID_PARAMS
);
1664 if (!hdev_is_powered(hdev
)) {
1665 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1669 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1670 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1671 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1676 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1682 hci_req_init(&req
, hdev
);
1684 /* If BR/EDR is not enabled and we disable advertising as a
1685 * by-product of disabling connectable, we need to update the
1686 * advertising flags.
1688 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1690 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1691 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1693 update_adv_data(&req
);
1694 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1700 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1701 hdev
->discov_timeout
> 0)
1702 cancel_delayed_work(&hdev
->discov_off
);
1705 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1708 /* If we're going from non-connectable to connectable or
1709 * vice-versa when fast connectable is enabled ensure that fast
1710 * connectable gets disabled. write_fast_connectable won't do
1711 * anything if the page scan parameters are already what they
1714 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1715 write_fast_connectable(&req
, false);
1717 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) &&
1718 hci_conn_num(hdev
, LE_LINK
) == 0) {
1719 disable_advertising(&req
);
1720 enable_advertising(&req
);
1723 err
= hci_req_run(&req
, set_connectable_complete
);
1725 mgmt_pending_remove(cmd
);
1726 if (err
== -ENODATA
)
1727 err
= set_connectable_update_settings(hdev
, sk
,
1733 hci_dev_unlock(hdev
);
1737 static int set_pairable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1740 struct mgmt_mode
*cp
= data
;
1744 BT_DBG("request for %s", hdev
->name
);
1746 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1747 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PAIRABLE
,
1748 MGMT_STATUS_INVALID_PARAMS
);
1753 changed
= !test_and_set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1755 changed
= test_and_clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1757 err
= send_settings_rsp(sk
, MGMT_OP_SET_PAIRABLE
, hdev
);
1762 err
= new_settings(hdev
, sk
);
1765 hci_dev_unlock(hdev
);
1769 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1772 struct mgmt_mode
*cp
= data
;
1773 struct pending_cmd
*cmd
;
1777 BT_DBG("request for %s", hdev
->name
);
1779 status
= mgmt_bredr_support(hdev
);
1781 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1784 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1785 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1786 MGMT_STATUS_INVALID_PARAMS
);
1790 if (!hdev_is_powered(hdev
)) {
1791 bool changed
= false;
1793 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1794 &hdev
->dev_flags
)) {
1795 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1799 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1804 err
= new_settings(hdev
, sk
);
1809 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1810 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1817 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1818 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1822 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1828 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1830 mgmt_pending_remove(cmd
);
1835 hci_dev_unlock(hdev
);
1839 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1841 struct mgmt_mode
*cp
= data
;
1842 struct pending_cmd
*cmd
;
1846 BT_DBG("request for %s", hdev
->name
);
1848 status
= mgmt_bredr_support(hdev
);
1850 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1852 if (!lmp_ssp_capable(hdev
))
1853 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1854 MGMT_STATUS_NOT_SUPPORTED
);
1856 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1857 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1858 MGMT_STATUS_INVALID_PARAMS
);
1862 if (!hdev_is_powered(hdev
)) {
1866 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
1869 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
1872 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
1875 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1878 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1883 err
= new_settings(hdev
, sk
);
1888 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
1889 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
1890 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1895 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1896 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1900 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1906 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
1907 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
1908 sizeof(cp
->val
), &cp
->val
);
1910 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1912 mgmt_pending_remove(cmd
);
1917 hci_dev_unlock(hdev
);
1921 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1923 struct mgmt_mode
*cp
= data
;
1928 BT_DBG("request for %s", hdev
->name
);
1930 status
= mgmt_bredr_support(hdev
);
1932 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1934 if (!lmp_ssp_capable(hdev
))
1935 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1936 MGMT_STATUS_NOT_SUPPORTED
);
1938 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
1939 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1940 MGMT_STATUS_REJECTED
);
1942 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1943 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1944 MGMT_STATUS_INVALID_PARAMS
);
1949 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1951 if (hdev_is_powered(hdev
)) {
1952 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1953 MGMT_STATUS_REJECTED
);
1957 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1960 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1965 err
= new_settings(hdev
, sk
);
1968 hci_dev_unlock(hdev
);
1972 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
1974 struct cmd_lookup match
= { NULL
, hdev
};
1977 u8 mgmt_err
= mgmt_status(status
);
1979 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1984 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1986 new_settings(hdev
, match
.sk
);
1991 /* Make sure the controller has a good default for
1992 * advertising data. Restrict the update to when LE
1993 * has actually been enabled. During power on, the
1994 * update in powered_update_hci will take care of it.
1996 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1997 struct hci_request req
;
2001 hci_req_init(&req
, hdev
);
2002 update_adv_data(&req
);
2003 update_scan_rsp_data(&req
);
2004 hci_req_run(&req
, NULL
);
2006 hci_dev_unlock(hdev
);
2010 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2012 struct mgmt_mode
*cp
= data
;
2013 struct hci_cp_write_le_host_supported hci_cp
;
2014 struct pending_cmd
*cmd
;
2015 struct hci_request req
;
2019 BT_DBG("request for %s", hdev
->name
);
2021 if (!lmp_le_capable(hdev
))
2022 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2023 MGMT_STATUS_NOT_SUPPORTED
);
2025 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2026 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2027 MGMT_STATUS_INVALID_PARAMS
);
2029 /* LE-only devices do not allow toggling LE on/off */
2030 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2031 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2032 MGMT_STATUS_REJECTED
);
2037 enabled
= lmp_host_le_capable(hdev
);
2039 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2040 bool changed
= false;
2042 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2043 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2047 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2048 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2052 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2057 err
= new_settings(hdev
, sk
);
2062 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2063 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2064 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2069 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2075 hci_req_init(&req
, hdev
);
2077 memset(&hci_cp
, 0, sizeof(hci_cp
));
2081 hci_cp
.simul
= lmp_le_br_capable(hdev
);
2083 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
2084 disable_advertising(&req
);
2087 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2090 err
= hci_req_run(&req
, le_enable_complete
);
2092 mgmt_pending_remove(cmd
);
2095 hci_dev_unlock(hdev
);
2099 /* This is a helper function to test for pending mgmt commands that can
2100 * cause CoD or EIR HCI commands. We can only allow one such pending
2101 * mgmt command at a time since otherwise we cannot easily track what
2102 * the current values are, will be, and based on that calculate if a new
2103 * HCI command needs to be sent and if yes with what value.
2105 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2107 struct pending_cmd
*cmd
;
2109 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2110 switch (cmd
->opcode
) {
2111 case MGMT_OP_ADD_UUID
:
2112 case MGMT_OP_REMOVE_UUID
:
2113 case MGMT_OP_SET_DEV_CLASS
:
2114 case MGMT_OP_SET_POWERED
:
2122 static const u8 bluetooth_base_uuid
[] = {
2123 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2124 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2127 static u8
get_uuid_size(const u8
*uuid
)
2131 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2134 val
= get_unaligned_le32(&uuid
[12]);
2141 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2143 struct pending_cmd
*cmd
;
2147 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2151 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2152 hdev
->dev_class
, 3);
2154 mgmt_pending_remove(cmd
);
2157 hci_dev_unlock(hdev
);
2160 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2162 BT_DBG("status 0x%02x", status
);
2164 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2167 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2169 struct mgmt_cp_add_uuid
*cp
= data
;
2170 struct pending_cmd
*cmd
;
2171 struct hci_request req
;
2172 struct bt_uuid
*uuid
;
2175 BT_DBG("request for %s", hdev
->name
);
2179 if (pending_eir_or_class(hdev
)) {
2180 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2185 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2191 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2192 uuid
->svc_hint
= cp
->svc_hint
;
2193 uuid
->size
= get_uuid_size(cp
->uuid
);
2195 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2197 hci_req_init(&req
, hdev
);
2202 err
= hci_req_run(&req
, add_uuid_complete
);
2204 if (err
!= -ENODATA
)
2207 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2208 hdev
->dev_class
, 3);
2212 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2221 hci_dev_unlock(hdev
);
2225 static bool enable_service_cache(struct hci_dev
*hdev
)
2227 if (!hdev_is_powered(hdev
))
2230 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2231 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2239 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2241 BT_DBG("status 0x%02x", status
);
2243 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2246 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2249 struct mgmt_cp_remove_uuid
*cp
= data
;
2250 struct pending_cmd
*cmd
;
2251 struct bt_uuid
*match
, *tmp
;
2252 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2253 struct hci_request req
;
2256 BT_DBG("request for %s", hdev
->name
);
2260 if (pending_eir_or_class(hdev
)) {
2261 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2266 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2267 hci_uuids_clear(hdev
);
2269 if (enable_service_cache(hdev
)) {
2270 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2271 0, hdev
->dev_class
, 3);
2280 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2281 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2284 list_del(&match
->list
);
2290 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2291 MGMT_STATUS_INVALID_PARAMS
);
2296 hci_req_init(&req
, hdev
);
2301 err
= hci_req_run(&req
, remove_uuid_complete
);
2303 if (err
!= -ENODATA
)
2306 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2307 hdev
->dev_class
, 3);
2311 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2320 hci_dev_unlock(hdev
);
2324 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2326 BT_DBG("status 0x%02x", status
);
2328 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2331 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2334 struct mgmt_cp_set_dev_class
*cp
= data
;
2335 struct pending_cmd
*cmd
;
2336 struct hci_request req
;
2339 BT_DBG("request for %s", hdev
->name
);
2341 if (!lmp_bredr_capable(hdev
))
2342 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2343 MGMT_STATUS_NOT_SUPPORTED
);
2347 if (pending_eir_or_class(hdev
)) {
2348 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2353 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2354 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2355 MGMT_STATUS_INVALID_PARAMS
);
2359 hdev
->major_class
= cp
->major
;
2360 hdev
->minor_class
= cp
->minor
;
2362 if (!hdev_is_powered(hdev
)) {
2363 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2364 hdev
->dev_class
, 3);
2368 hci_req_init(&req
, hdev
);
2370 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2371 hci_dev_unlock(hdev
);
2372 cancel_delayed_work_sync(&hdev
->service_cache
);
2379 err
= hci_req_run(&req
, set_class_complete
);
2381 if (err
!= -ENODATA
)
2384 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2385 hdev
->dev_class
, 3);
2389 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2398 hci_dev_unlock(hdev
);
2402 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2405 struct mgmt_cp_load_link_keys
*cp
= data
;
2406 u16 key_count
, expected_len
;
2410 BT_DBG("request for %s", hdev
->name
);
2412 if (!lmp_bredr_capable(hdev
))
2413 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2414 MGMT_STATUS_NOT_SUPPORTED
);
2416 key_count
= __le16_to_cpu(cp
->key_count
);
2418 expected_len
= sizeof(*cp
) + key_count
*
2419 sizeof(struct mgmt_link_key_info
);
2420 if (expected_len
!= len
) {
2421 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2423 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2424 MGMT_STATUS_INVALID_PARAMS
);
2427 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2428 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2429 MGMT_STATUS_INVALID_PARAMS
);
2431 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2434 for (i
= 0; i
< key_count
; i
++) {
2435 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2437 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2438 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2439 MGMT_STATUS_INVALID_PARAMS
);
2444 hci_link_keys_clear(hdev
);
2447 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2450 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2454 new_settings(hdev
, NULL
);
2456 for (i
= 0; i
< key_count
; i
++) {
2457 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2459 /* Always ignore debug keys and require a new pairing if
2460 * the user wants to use them.
2462 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2465 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2466 key
->type
, key
->pin_len
, NULL
);
2469 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2471 hci_dev_unlock(hdev
);
2476 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2477 u8 addr_type
, struct sock
*skip_sk
)
2479 struct mgmt_ev_device_unpaired ev
;
2481 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2482 ev
.addr
.type
= addr_type
;
2484 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2488 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2491 struct mgmt_cp_unpair_device
*cp
= data
;
2492 struct mgmt_rp_unpair_device rp
;
2493 struct hci_cp_disconnect dc
;
2494 struct pending_cmd
*cmd
;
2495 struct hci_conn
*conn
;
2498 memset(&rp
, 0, sizeof(rp
));
2499 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2500 rp
.addr
.type
= cp
->addr
.type
;
2502 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2503 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2504 MGMT_STATUS_INVALID_PARAMS
,
2507 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2508 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2509 MGMT_STATUS_INVALID_PARAMS
,
2514 if (!hdev_is_powered(hdev
)) {
2515 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2516 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2520 if (cp
->addr
.type
== BDADDR_BREDR
) {
2521 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2525 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2526 addr_type
= ADDR_LE_DEV_PUBLIC
;
2528 addr_type
= ADDR_LE_DEV_RANDOM
;
2530 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2532 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2534 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2538 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2539 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2543 if (cp
->disconnect
) {
2544 if (cp
->addr
.type
== BDADDR_BREDR
)
2545 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2548 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2555 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2557 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2561 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2568 dc
.handle
= cpu_to_le16(conn
->handle
);
2569 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2570 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2572 mgmt_pending_remove(cmd
);
2575 hci_dev_unlock(hdev
);
2579 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2582 struct mgmt_cp_disconnect
*cp
= data
;
2583 struct mgmt_rp_disconnect rp
;
2584 struct hci_cp_disconnect dc
;
2585 struct pending_cmd
*cmd
;
2586 struct hci_conn
*conn
;
2591 memset(&rp
, 0, sizeof(rp
));
2592 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2593 rp
.addr
.type
= cp
->addr
.type
;
2595 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2596 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2597 MGMT_STATUS_INVALID_PARAMS
,
2602 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2603 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2604 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2608 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2609 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2610 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2614 if (cp
->addr
.type
== BDADDR_BREDR
)
2615 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2618 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2620 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2621 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2622 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2626 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2632 dc
.handle
= cpu_to_le16(conn
->handle
);
2633 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2635 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2637 mgmt_pending_remove(cmd
);
2640 hci_dev_unlock(hdev
);
2644 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2646 switch (link_type
) {
2648 switch (addr_type
) {
2649 case ADDR_LE_DEV_PUBLIC
:
2650 return BDADDR_LE_PUBLIC
;
2653 /* Fallback to LE Random address type */
2654 return BDADDR_LE_RANDOM
;
2658 /* Fallback to BR/EDR type */
2659 return BDADDR_BREDR
;
2663 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2666 struct mgmt_rp_get_connections
*rp
;
2676 if (!hdev_is_powered(hdev
)) {
2677 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2678 MGMT_STATUS_NOT_POWERED
);
2683 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2684 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2688 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2689 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2696 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2697 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2699 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2700 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2701 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2706 rp
->conn_count
= cpu_to_le16(i
);
2708 /* Recalculate length in case of filtered SCO connections, etc */
2709 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2711 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2717 hci_dev_unlock(hdev
);
2721 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2722 struct mgmt_cp_pin_code_neg_reply
*cp
)
2724 struct pending_cmd
*cmd
;
2727 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2732 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2733 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2735 mgmt_pending_remove(cmd
);
2740 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2743 struct hci_conn
*conn
;
2744 struct mgmt_cp_pin_code_reply
*cp
= data
;
2745 struct hci_cp_pin_code_reply reply
;
2746 struct pending_cmd
*cmd
;
2753 if (!hdev_is_powered(hdev
)) {
2754 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2755 MGMT_STATUS_NOT_POWERED
);
2759 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2761 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2762 MGMT_STATUS_NOT_CONNECTED
);
2766 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2767 struct mgmt_cp_pin_code_neg_reply ncp
;
2769 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2771 BT_ERR("PIN code is not 16 bytes long");
2773 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2775 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2776 MGMT_STATUS_INVALID_PARAMS
);
2781 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2787 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2788 reply
.pin_len
= cp
->pin_len
;
2789 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2791 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2793 mgmt_pending_remove(cmd
);
2796 hci_dev_unlock(hdev
);
2800 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2803 struct mgmt_cp_set_io_capability
*cp
= data
;
2807 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
2808 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
2809 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
2813 hdev
->io_capability
= cp
->io_capability
;
2815 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2816 hdev
->io_capability
);
2818 hci_dev_unlock(hdev
);
2820 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
2824 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
2826 struct hci_dev
*hdev
= conn
->hdev
;
2827 struct pending_cmd
*cmd
;
2829 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2830 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2833 if (cmd
->user_data
!= conn
)
2842 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
2844 struct mgmt_rp_pair_device rp
;
2845 struct hci_conn
*conn
= cmd
->user_data
;
2847 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2848 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2850 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
2853 /* So we don't get further callbacks for this connection */
2854 conn
->connect_cfm_cb
= NULL
;
2855 conn
->security_cfm_cb
= NULL
;
2856 conn
->disconn_cfm_cb
= NULL
;
2858 hci_conn_drop(conn
);
2860 mgmt_pending_remove(cmd
);
2863 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2865 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2866 struct pending_cmd
*cmd
;
2868 cmd
= find_pairing(conn
);
2870 pairing_complete(cmd
, status
);
2873 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2875 struct pending_cmd
*cmd
;
2877 BT_DBG("status %u", status
);
2879 cmd
= find_pairing(conn
);
2881 BT_DBG("Unable to find a pending command");
2883 pairing_complete(cmd
, mgmt_status(status
));
2886 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2888 struct pending_cmd
*cmd
;
2890 BT_DBG("status %u", status
);
2895 cmd
= find_pairing(conn
);
2897 BT_DBG("Unable to find a pending command");
2899 pairing_complete(cmd
, mgmt_status(status
));
2902 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2905 struct mgmt_cp_pair_device
*cp
= data
;
2906 struct mgmt_rp_pair_device rp
;
2907 struct pending_cmd
*cmd
;
2908 u8 sec_level
, auth_type
;
2909 struct hci_conn
*conn
;
2914 memset(&rp
, 0, sizeof(rp
));
2915 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2916 rp
.addr
.type
= cp
->addr
.type
;
2918 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2919 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2920 MGMT_STATUS_INVALID_PARAMS
,
2923 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
2924 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2925 MGMT_STATUS_INVALID_PARAMS
,
2930 if (!hdev_is_powered(hdev
)) {
2931 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2932 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2936 sec_level
= BT_SECURITY_MEDIUM
;
2937 auth_type
= HCI_AT_DEDICATED_BONDING
;
2939 if (cp
->addr
.type
== BDADDR_BREDR
) {
2940 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2945 /* Convert from L2CAP channel address type to HCI address type
2947 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2948 addr_type
= ADDR_LE_DEV_PUBLIC
;
2950 addr_type
= ADDR_LE_DEV_RANDOM
;
2952 /* When pairing a new device, it is expected to remember
2953 * this device for future connections. Adding the connection
2954 * parameter information ahead of time allows tracking
2955 * of the slave preferred values and will speed up any
2956 * further connection establishment.
2958 * If connection parameters already exist, then they
2959 * will be kept and this function does nothing.
2961 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2963 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
2964 sec_level
, auth_type
);
2970 if (PTR_ERR(conn
) == -EBUSY
)
2971 status
= MGMT_STATUS_BUSY
;
2973 status
= MGMT_STATUS_CONNECT_FAILED
;
2975 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2981 if (conn
->connect_cfm_cb
) {
2982 hci_conn_drop(conn
);
2983 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2984 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2988 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2991 hci_conn_drop(conn
);
2995 /* For LE, just connecting isn't a proof that the pairing finished */
2996 if (cp
->addr
.type
== BDADDR_BREDR
) {
2997 conn
->connect_cfm_cb
= pairing_complete_cb
;
2998 conn
->security_cfm_cb
= pairing_complete_cb
;
2999 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3001 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3002 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3003 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3006 conn
->io_capability
= cp
->io_cap
;
3007 cmd
->user_data
= conn
;
3009 if (conn
->state
== BT_CONNECTED
&&
3010 hci_conn_security(conn
, sec_level
, auth_type
))
3011 pairing_complete(cmd
, 0);
3016 hci_dev_unlock(hdev
);
3020 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3023 struct mgmt_addr_info
*addr
= data
;
3024 struct pending_cmd
*cmd
;
3025 struct hci_conn
*conn
;
3032 if (!hdev_is_powered(hdev
)) {
3033 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3034 MGMT_STATUS_NOT_POWERED
);
3038 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3040 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3041 MGMT_STATUS_INVALID_PARAMS
);
3045 conn
= cmd
->user_data
;
3047 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3048 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3049 MGMT_STATUS_INVALID_PARAMS
);
3053 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
3055 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3056 addr
, sizeof(*addr
));
3058 hci_dev_unlock(hdev
);
3062 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3063 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3064 u16 hci_op
, __le32 passkey
)
3066 struct pending_cmd
*cmd
;
3067 struct hci_conn
*conn
;
3072 if (!hdev_is_powered(hdev
)) {
3073 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3074 MGMT_STATUS_NOT_POWERED
, addr
,
3079 if (addr
->type
== BDADDR_BREDR
)
3080 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3082 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3085 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3086 MGMT_STATUS_NOT_CONNECTED
, addr
,
3091 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3092 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3094 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3095 MGMT_STATUS_SUCCESS
, addr
,
3098 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3099 MGMT_STATUS_FAILED
, addr
,
3105 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3111 /* Continue with pairing via HCI */
3112 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3113 struct hci_cp_user_passkey_reply cp
;
3115 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3116 cp
.passkey
= passkey
;
3117 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3119 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3123 mgmt_pending_remove(cmd
);
3126 hci_dev_unlock(hdev
);
3130 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3131 void *data
, u16 len
)
3133 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3137 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3138 MGMT_OP_PIN_CODE_NEG_REPLY
,
3139 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3142 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3145 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3149 if (len
!= sizeof(*cp
))
3150 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3151 MGMT_STATUS_INVALID_PARAMS
);
3153 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3154 MGMT_OP_USER_CONFIRM_REPLY
,
3155 HCI_OP_USER_CONFIRM_REPLY
, 0);
3158 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3159 void *data
, u16 len
)
3161 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3165 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3166 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3167 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3170 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3173 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3177 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3178 MGMT_OP_USER_PASSKEY_REPLY
,
3179 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3182 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3183 void *data
, u16 len
)
3185 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3189 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3190 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3191 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3194 static void update_name(struct hci_request
*req
)
3196 struct hci_dev
*hdev
= req
->hdev
;
3197 struct hci_cp_write_local_name cp
;
3199 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3201 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3204 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3206 struct mgmt_cp_set_local_name
*cp
;
3207 struct pending_cmd
*cmd
;
3209 BT_DBG("status 0x%02x", status
);
3213 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3220 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3221 mgmt_status(status
));
3223 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3226 mgmt_pending_remove(cmd
);
3229 hci_dev_unlock(hdev
);
3232 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3235 struct mgmt_cp_set_local_name
*cp
= data
;
3236 struct pending_cmd
*cmd
;
3237 struct hci_request req
;
3244 /* If the old values are the same as the new ones just return a
3245 * direct command complete event.
3247 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3248 !memcmp(hdev
->short_name
, cp
->short_name
,
3249 sizeof(hdev
->short_name
))) {
3250 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3255 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3257 if (!hdev_is_powered(hdev
)) {
3258 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3260 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3265 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3271 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3277 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3279 hci_req_init(&req
, hdev
);
3281 if (lmp_bredr_capable(hdev
)) {
3286 /* The name is stored in the scan response data and so
3287 * no need to udpate the advertising data here.
3289 if (lmp_le_capable(hdev
))
3290 update_scan_rsp_data(&req
);
3292 err
= hci_req_run(&req
, set_name_complete
);
3294 mgmt_pending_remove(cmd
);
3297 hci_dev_unlock(hdev
);
3301 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3302 void *data
, u16 data_len
)
3304 struct pending_cmd
*cmd
;
3307 BT_DBG("%s", hdev
->name
);
3311 if (!hdev_is_powered(hdev
)) {
3312 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3313 MGMT_STATUS_NOT_POWERED
);
3317 if (!lmp_ssp_capable(hdev
)) {
3318 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3319 MGMT_STATUS_NOT_SUPPORTED
);
3323 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3324 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3329 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3335 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3336 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3339 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3342 mgmt_pending_remove(cmd
);
3345 hci_dev_unlock(hdev
);
3349 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3350 void *data
, u16 len
)
3354 BT_DBG("%s ", hdev
->name
);
3358 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3359 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3362 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3363 cp
->hash
, cp
->randomizer
);
3365 status
= MGMT_STATUS_FAILED
;
3367 status
= MGMT_STATUS_SUCCESS
;
3369 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3370 status
, &cp
->addr
, sizeof(cp
->addr
));
3371 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3372 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3375 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3381 status
= MGMT_STATUS_FAILED
;
3383 status
= MGMT_STATUS_SUCCESS
;
3385 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3386 status
, &cp
->addr
, sizeof(cp
->addr
));
3388 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3389 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3390 MGMT_STATUS_INVALID_PARAMS
);
3393 hci_dev_unlock(hdev
);
3397 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3398 void *data
, u16 len
)
3400 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3404 BT_DBG("%s", hdev
->name
);
3408 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3410 status
= MGMT_STATUS_INVALID_PARAMS
;
3412 status
= MGMT_STATUS_SUCCESS
;
3414 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3415 status
, &cp
->addr
, sizeof(cp
->addr
));
3417 hci_dev_unlock(hdev
);
3421 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3423 struct pending_cmd
*cmd
;
3427 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3429 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3433 type
= hdev
->discovery
.type
;
3435 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3436 &type
, sizeof(type
));
3437 mgmt_pending_remove(cmd
);
3442 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3444 unsigned long timeout
= 0;
3446 BT_DBG("status %d", status
);
3450 mgmt_start_discovery_failed(hdev
, status
);
3451 hci_dev_unlock(hdev
);
3456 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3457 hci_dev_unlock(hdev
);
3459 switch (hdev
->discovery
.type
) {
3460 case DISCOV_TYPE_LE
:
3461 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3464 case DISCOV_TYPE_INTERLEAVED
:
3465 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3468 case DISCOV_TYPE_BREDR
:
3472 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3478 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
, timeout
);
3481 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3482 void *data
, u16 len
)
3484 struct mgmt_cp_start_discovery
*cp
= data
;
3485 struct pending_cmd
*cmd
;
3486 struct hci_cp_le_set_scan_param param_cp
;
3487 struct hci_cp_le_set_scan_enable enable_cp
;
3488 struct hci_cp_inquiry inq_cp
;
3489 struct hci_request req
;
3490 /* General inquiry access code (GIAC) */
3491 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3492 u8 status
, own_addr_type
;
3495 BT_DBG("%s", hdev
->name
);
3499 if (!hdev_is_powered(hdev
)) {
3500 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3501 MGMT_STATUS_NOT_POWERED
);
3505 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3506 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3511 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3512 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3517 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3523 hdev
->discovery
.type
= cp
->type
;
3525 hci_req_init(&req
, hdev
);
3527 switch (hdev
->discovery
.type
) {
3528 case DISCOV_TYPE_BREDR
:
3529 status
= mgmt_bredr_support(hdev
);
3531 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3533 mgmt_pending_remove(cmd
);
3537 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3538 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3540 mgmt_pending_remove(cmd
);
3544 hci_inquiry_cache_flush(hdev
);
3546 memset(&inq_cp
, 0, sizeof(inq_cp
));
3547 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3548 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3549 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3552 case DISCOV_TYPE_LE
:
3553 case DISCOV_TYPE_INTERLEAVED
:
3554 status
= mgmt_le_support(hdev
);
3556 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3558 mgmt_pending_remove(cmd
);
3562 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3563 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3564 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3565 MGMT_STATUS_NOT_SUPPORTED
);
3566 mgmt_pending_remove(cmd
);
3570 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3571 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3572 MGMT_STATUS_REJECTED
);
3573 mgmt_pending_remove(cmd
);
3577 /* If controller is scanning, it means the background scanning
3578 * is running. Thus, we should temporarily stop it in order to
3579 * set the discovery scanning parameters.
3581 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3582 hci_req_add_le_scan_disable(&req
);
3584 memset(¶m_cp
, 0, sizeof(param_cp
));
3586 /* All active scans will be done with either a resolvable
3587 * private address (when privacy feature has been enabled)
3588 * or unresolvable private address.
3590 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3592 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3593 MGMT_STATUS_FAILED
);
3594 mgmt_pending_remove(cmd
);
3598 param_cp
.type
= LE_SCAN_ACTIVE
;
3599 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3600 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3601 param_cp
.own_address_type
= own_addr_type
;
3602 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3605 memset(&enable_cp
, 0, sizeof(enable_cp
));
3606 enable_cp
.enable
= LE_SCAN_ENABLE
;
3607 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3608 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3613 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3614 MGMT_STATUS_INVALID_PARAMS
);
3615 mgmt_pending_remove(cmd
);
3619 err
= hci_req_run(&req
, start_discovery_complete
);
3621 mgmt_pending_remove(cmd
);
3623 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3626 hci_dev_unlock(hdev
);
3630 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3632 struct pending_cmd
*cmd
;
3635 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3639 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3640 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3641 mgmt_pending_remove(cmd
);
3646 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3648 BT_DBG("status %d", status
);
3653 mgmt_stop_discovery_failed(hdev
, status
);
3657 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3660 hci_dev_unlock(hdev
);
3663 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3666 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3667 struct pending_cmd
*cmd
;
3668 struct hci_request req
;
3671 BT_DBG("%s", hdev
->name
);
3675 if (!hci_discovery_active(hdev
)) {
3676 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3677 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3678 sizeof(mgmt_cp
->type
));
3682 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3683 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3684 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3685 sizeof(mgmt_cp
->type
));
3689 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3695 hci_req_init(&req
, hdev
);
3697 hci_stop_discovery(&req
);
3699 err
= hci_req_run(&req
, stop_discovery_complete
);
3701 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3705 mgmt_pending_remove(cmd
);
3707 /* If no HCI commands were sent we're done */
3708 if (err
== -ENODATA
) {
3709 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
3710 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3711 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3715 hci_dev_unlock(hdev
);
3719 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3722 struct mgmt_cp_confirm_name
*cp
= data
;
3723 struct inquiry_entry
*e
;
3726 BT_DBG("%s", hdev
->name
);
3730 if (!hci_discovery_active(hdev
)) {
3731 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3732 MGMT_STATUS_FAILED
, &cp
->addr
,
3737 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3739 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3740 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3745 if (cp
->name_known
) {
3746 e
->name_state
= NAME_KNOWN
;
3749 e
->name_state
= NAME_NEEDED
;
3750 hci_inquiry_cache_update_resolve(hdev
, e
);
3753 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3757 hci_dev_unlock(hdev
);
3761 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3764 struct mgmt_cp_block_device
*cp
= data
;
3768 BT_DBG("%s", hdev
->name
);
3770 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3771 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3772 MGMT_STATUS_INVALID_PARAMS
,
3773 &cp
->addr
, sizeof(cp
->addr
));
3777 err
= hci_blacklist_add(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3779 status
= MGMT_STATUS_FAILED
;
3783 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3785 status
= MGMT_STATUS_SUCCESS
;
3788 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3789 &cp
->addr
, sizeof(cp
->addr
));
3791 hci_dev_unlock(hdev
);
3796 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3799 struct mgmt_cp_unblock_device
*cp
= data
;
3803 BT_DBG("%s", hdev
->name
);
3805 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3806 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3807 MGMT_STATUS_INVALID_PARAMS
,
3808 &cp
->addr
, sizeof(cp
->addr
));
3812 err
= hci_blacklist_del(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3814 status
= MGMT_STATUS_INVALID_PARAMS
;
3818 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3820 status
= MGMT_STATUS_SUCCESS
;
3823 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3824 &cp
->addr
, sizeof(cp
->addr
));
3826 hci_dev_unlock(hdev
);
3831 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3834 struct mgmt_cp_set_device_id
*cp
= data
;
3835 struct hci_request req
;
3839 BT_DBG("%s", hdev
->name
);
3841 source
= __le16_to_cpu(cp
->source
);
3843 if (source
> 0x0002)
3844 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3845 MGMT_STATUS_INVALID_PARAMS
);
3849 hdev
->devid_source
= source
;
3850 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3851 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3852 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3854 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
3856 hci_req_init(&req
, hdev
);
3858 hci_req_run(&req
, NULL
);
3860 hci_dev_unlock(hdev
);
3865 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
3867 struct cmd_lookup match
= { NULL
, hdev
};
3870 u8 mgmt_err
= mgmt_status(status
);
3872 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3873 cmd_status_rsp
, &mgmt_err
);
3877 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3880 new_settings(hdev
, match
.sk
);
3886 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3889 struct mgmt_mode
*cp
= data
;
3890 struct pending_cmd
*cmd
;
3891 struct hci_request req
;
3892 u8 val
, enabled
, status
;
3895 BT_DBG("request for %s", hdev
->name
);
3897 status
= mgmt_le_support(hdev
);
3899 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3902 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3903 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3904 MGMT_STATUS_INVALID_PARAMS
);
3909 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3911 /* The following conditions are ones which mean that we should
3912 * not do any HCI communication but directly send a mgmt
3913 * response to user space (after toggling the flag if
3916 if (!hdev_is_powered(hdev
) || val
== enabled
||
3917 hci_conn_num(hdev
, LE_LINK
) > 0) {
3918 bool changed
= false;
3920 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3921 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3925 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
3930 err
= new_settings(hdev
, sk
);
3935 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
3936 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
3937 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3942 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
3948 hci_req_init(&req
, hdev
);
3951 enable_advertising(&req
);
3953 disable_advertising(&req
);
3955 err
= hci_req_run(&req
, set_advertising_complete
);
3957 mgmt_pending_remove(cmd
);
3960 hci_dev_unlock(hdev
);
3964 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
3965 void *data
, u16 len
)
3967 struct mgmt_cp_set_static_address
*cp
= data
;
3970 BT_DBG("%s", hdev
->name
);
3972 if (!lmp_le_capable(hdev
))
3973 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3974 MGMT_STATUS_NOT_SUPPORTED
);
3976 if (hdev_is_powered(hdev
))
3977 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3978 MGMT_STATUS_REJECTED
);
3980 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
3981 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
3982 return cmd_status(sk
, hdev
->id
,
3983 MGMT_OP_SET_STATIC_ADDRESS
,
3984 MGMT_STATUS_INVALID_PARAMS
);
3986 /* Two most significant bits shall be set */
3987 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
3988 return cmd_status(sk
, hdev
->id
,
3989 MGMT_OP_SET_STATIC_ADDRESS
,
3990 MGMT_STATUS_INVALID_PARAMS
);
3995 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
3997 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
3999 hci_dev_unlock(hdev
);
4004 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4005 void *data
, u16 len
)
4007 struct mgmt_cp_set_scan_params
*cp
= data
;
4008 __u16 interval
, window
;
4011 BT_DBG("%s", hdev
->name
);
4013 if (!lmp_le_capable(hdev
))
4014 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4015 MGMT_STATUS_NOT_SUPPORTED
);
4017 interval
= __le16_to_cpu(cp
->interval
);
4019 if (interval
< 0x0004 || interval
> 0x4000)
4020 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4021 MGMT_STATUS_INVALID_PARAMS
);
4023 window
= __le16_to_cpu(cp
->window
);
4025 if (window
< 0x0004 || window
> 0x4000)
4026 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4027 MGMT_STATUS_INVALID_PARAMS
);
4029 if (window
> interval
)
4030 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4031 MGMT_STATUS_INVALID_PARAMS
);
4035 hdev
->le_scan_interval
= interval
;
4036 hdev
->le_scan_window
= window
;
4038 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4040 /* If background scan is running, restart it so new parameters are
4043 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4044 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4045 struct hci_request req
;
4047 hci_req_init(&req
, hdev
);
4049 hci_req_add_le_scan_disable(&req
);
4050 hci_req_add_le_passive_scan(&req
);
4052 hci_req_run(&req
, NULL
);
4055 hci_dev_unlock(hdev
);
4060 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4062 struct pending_cmd
*cmd
;
4064 BT_DBG("status 0x%02x", status
);
4068 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4073 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4074 mgmt_status(status
));
4076 struct mgmt_mode
*cp
= cmd
->param
;
4079 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4081 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4083 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4084 new_settings(hdev
, cmd
->sk
);
4087 mgmt_pending_remove(cmd
);
4090 hci_dev_unlock(hdev
);
4093 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4094 void *data
, u16 len
)
4096 struct mgmt_mode
*cp
= data
;
4097 struct pending_cmd
*cmd
;
4098 struct hci_request req
;
4101 BT_DBG("%s", hdev
->name
);
4103 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4104 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4105 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4106 MGMT_STATUS_NOT_SUPPORTED
);
4108 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4109 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4110 MGMT_STATUS_INVALID_PARAMS
);
4112 if (!hdev_is_powered(hdev
))
4113 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4114 MGMT_STATUS_NOT_POWERED
);
4116 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4117 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4118 MGMT_STATUS_REJECTED
);
4122 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4123 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4128 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4129 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4134 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4141 hci_req_init(&req
, hdev
);
4143 write_fast_connectable(&req
, cp
->val
);
4145 err
= hci_req_run(&req
, fast_connectable_complete
);
4147 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4148 MGMT_STATUS_FAILED
);
4149 mgmt_pending_remove(cmd
);
4153 hci_dev_unlock(hdev
);
4158 static void set_bredr_scan(struct hci_request
*req
)
4160 struct hci_dev
*hdev
= req
->hdev
;
4163 /* Ensure that fast connectable is disabled. This function will
4164 * not do anything if the page scan parameters are already what
4167 write_fast_connectable(req
, false);
4169 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4171 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
4172 scan
|= SCAN_INQUIRY
;
4175 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
4178 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4180 struct pending_cmd
*cmd
;
4182 BT_DBG("status 0x%02x", status
);
4186 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4191 u8 mgmt_err
= mgmt_status(status
);
4193 /* We need to restore the flag if related HCI commands
4196 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4198 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4200 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4201 new_settings(hdev
, cmd
->sk
);
4204 mgmt_pending_remove(cmd
);
4207 hci_dev_unlock(hdev
);
4210 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4212 struct mgmt_mode
*cp
= data
;
4213 struct pending_cmd
*cmd
;
4214 struct hci_request req
;
4217 BT_DBG("request for %s", hdev
->name
);
4219 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4220 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4221 MGMT_STATUS_NOT_SUPPORTED
);
4223 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4224 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4225 MGMT_STATUS_REJECTED
);
4227 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4228 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4229 MGMT_STATUS_INVALID_PARAMS
);
4233 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4234 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4238 if (!hdev_is_powered(hdev
)) {
4240 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4241 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4242 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4243 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4244 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4247 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4249 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4253 err
= new_settings(hdev
, sk
);
4257 /* Reject disabling when powered on */
4259 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4260 MGMT_STATUS_REJECTED
);
4264 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4265 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4270 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4276 /* We need to flip the bit already here so that update_adv_data
4277 * generates the correct flags.
4279 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4281 hci_req_init(&req
, hdev
);
4283 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4284 set_bredr_scan(&req
);
4286 /* Since only the advertising data flags will change, there
4287 * is no need to update the scan response data.
4289 update_adv_data(&req
);
4291 err
= hci_req_run(&req
, set_bredr_complete
);
4293 mgmt_pending_remove(cmd
);
4296 hci_dev_unlock(hdev
);
4300 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4301 void *data
, u16 len
)
4303 struct mgmt_mode
*cp
= data
;
4304 struct pending_cmd
*cmd
;
4308 BT_DBG("request for %s", hdev
->name
);
4310 status
= mgmt_bredr_support(hdev
);
4312 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4315 if (!lmp_sc_capable(hdev
) &&
4316 !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
4317 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4318 MGMT_STATUS_NOT_SUPPORTED
);
4320 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4321 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4322 MGMT_STATUS_INVALID_PARAMS
);
4326 if (!hdev_is_powered(hdev
)) {
4330 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4332 if (cp
->val
== 0x02)
4333 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4335 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4337 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4339 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4342 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4347 err
= new_settings(hdev
, sk
);
4352 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4353 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4360 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4361 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4362 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4366 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4372 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4374 mgmt_pending_remove(cmd
);
4378 if (cp
->val
== 0x02)
4379 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4381 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4384 hci_dev_unlock(hdev
);
4388 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4389 void *data
, u16 len
)
4391 struct mgmt_mode
*cp
= data
;
4392 bool changed
, use_changed
;
4395 BT_DBG("request for %s", hdev
->name
);
4397 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4398 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4399 MGMT_STATUS_INVALID_PARAMS
);
4404 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4407 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4410 if (cp
->val
== 0x02)
4411 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4414 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4417 if (hdev_is_powered(hdev
) && use_changed
&&
4418 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4419 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4420 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4421 sizeof(mode
), &mode
);
4424 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4429 err
= new_settings(hdev
, sk
);
4432 hci_dev_unlock(hdev
);
4436 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4439 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4443 BT_DBG("request for %s", hdev
->name
);
4445 if (!lmp_le_capable(hdev
))
4446 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4447 MGMT_STATUS_NOT_SUPPORTED
);
4449 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4450 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4451 MGMT_STATUS_INVALID_PARAMS
);
4453 if (hdev_is_powered(hdev
))
4454 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4455 MGMT_STATUS_REJECTED
);
4459 /* If user space supports this command it is also expected to
4460 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4462 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4465 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4466 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4467 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4469 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4470 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4471 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4474 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4479 err
= new_settings(hdev
, sk
);
4482 hci_dev_unlock(hdev
);
4486 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4488 switch (irk
->addr
.type
) {
4489 case BDADDR_LE_PUBLIC
:
4492 case BDADDR_LE_RANDOM
:
4493 /* Two most significant bits shall be set */
4494 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4502 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4505 struct mgmt_cp_load_irks
*cp
= cp_data
;
4506 u16 irk_count
, expected_len
;
4509 BT_DBG("request for %s", hdev
->name
);
4511 if (!lmp_le_capable(hdev
))
4512 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4513 MGMT_STATUS_NOT_SUPPORTED
);
4515 irk_count
= __le16_to_cpu(cp
->irk_count
);
4517 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4518 if (expected_len
!= len
) {
4519 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4521 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4522 MGMT_STATUS_INVALID_PARAMS
);
4525 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4527 for (i
= 0; i
< irk_count
; i
++) {
4528 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4530 if (!irk_is_valid(key
))
4531 return cmd_status(sk
, hdev
->id
,
4533 MGMT_STATUS_INVALID_PARAMS
);
4538 hci_smp_irks_clear(hdev
);
4540 for (i
= 0; i
< irk_count
; i
++) {
4541 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4544 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4545 addr_type
= ADDR_LE_DEV_PUBLIC
;
4547 addr_type
= ADDR_LE_DEV_RANDOM
;
4549 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4553 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4555 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4557 hci_dev_unlock(hdev
);
4562 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4564 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4567 switch (key
->addr
.type
) {
4568 case BDADDR_LE_PUBLIC
:
4571 case BDADDR_LE_RANDOM
:
4572 /* Two most significant bits shall be set */
4573 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4581 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4582 void *cp_data
, u16 len
)
4584 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4585 u16 key_count
, expected_len
;
4588 BT_DBG("request for %s", hdev
->name
);
4590 if (!lmp_le_capable(hdev
))
4591 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4592 MGMT_STATUS_NOT_SUPPORTED
);
4594 key_count
= __le16_to_cpu(cp
->key_count
);
4596 expected_len
= sizeof(*cp
) + key_count
*
4597 sizeof(struct mgmt_ltk_info
);
4598 if (expected_len
!= len
) {
4599 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4601 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4602 MGMT_STATUS_INVALID_PARAMS
);
4605 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4607 for (i
= 0; i
< key_count
; i
++) {
4608 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4610 if (!ltk_is_valid(key
))
4611 return cmd_status(sk
, hdev
->id
,
4612 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4613 MGMT_STATUS_INVALID_PARAMS
);
4618 hci_smp_ltks_clear(hdev
);
4620 for (i
= 0; i
< key_count
; i
++) {
4621 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4622 u8 type
, addr_type
, authenticated
;
4624 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4625 addr_type
= ADDR_LE_DEV_PUBLIC
;
4627 addr_type
= ADDR_LE_DEV_RANDOM
;
4632 type
= SMP_LTK_SLAVE
;
4634 switch (key
->type
) {
4635 case MGMT_LTK_UNAUTHENTICATED
:
4636 authenticated
= 0x00;
4638 case MGMT_LTK_AUTHENTICATED
:
4639 authenticated
= 0x01;
4645 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4646 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
4650 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4653 hci_dev_unlock(hdev
);
4658 struct cmd_conn_lookup
{
4659 struct hci_conn
*conn
;
4660 bool valid_tx_power
;
4664 static void get_conn_info_complete(struct pending_cmd
*cmd
, void *data
)
4666 struct cmd_conn_lookup
*match
= data
;
4667 struct mgmt_cp_get_conn_info
*cp
;
4668 struct mgmt_rp_get_conn_info rp
;
4669 struct hci_conn
*conn
= cmd
->user_data
;
4671 if (conn
!= match
->conn
)
4674 cp
= (struct mgmt_cp_get_conn_info
*) cmd
->param
;
4676 memset(&rp
, 0, sizeof(rp
));
4677 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4678 rp
.addr
.type
= cp
->addr
.type
;
4680 if (!match
->mgmt_status
) {
4681 rp
.rssi
= conn
->rssi
;
4683 if (match
->valid_tx_power
) {
4684 rp
.tx_power
= conn
->tx_power
;
4685 rp
.max_tx_power
= conn
->max_tx_power
;
4687 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4688 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4692 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4693 match
->mgmt_status
, &rp
, sizeof(rp
));
4695 hci_conn_drop(conn
);
4697 mgmt_pending_remove(cmd
);
4700 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 status
)
4702 struct hci_cp_read_rssi
*cp
;
4703 struct hci_conn
*conn
;
4704 struct cmd_conn_lookup match
;
4707 BT_DBG("status 0x%02x", status
);
4711 /* TX power data is valid in case request completed successfully,
4712 * otherwise we assume it's not valid. At the moment we assume that
4713 * either both or none of current and max values are valid to keep code
4716 match
.valid_tx_power
= !status
;
4718 /* Commands sent in request are either Read RSSI or Read Transmit Power
4719 * Level so we check which one was last sent to retrieve connection
4720 * handle. Both commands have handle as first parameter so it's safe to
4721 * cast data on the same command struct.
4723 * First command sent is always Read RSSI and we fail only if it fails.
4724 * In other case we simply override error to indicate success as we
4725 * already remembered if TX power value is actually valid.
4727 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4729 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4734 BT_ERR("invalid sent_cmd in response");
4738 handle
= __le16_to_cpu(cp
->handle
);
4739 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4741 BT_ERR("unknown handle (%d) in response", handle
);
4746 match
.mgmt_status
= mgmt_status(status
);
4748 /* Cache refresh is complete, now reply for mgmt request for given
4751 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO
, hdev
,
4752 get_conn_info_complete
, &match
);
4755 hci_dev_unlock(hdev
);
4758 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4761 struct mgmt_cp_get_conn_info
*cp
= data
;
4762 struct mgmt_rp_get_conn_info rp
;
4763 struct hci_conn
*conn
;
4764 unsigned long conn_info_age
;
4767 BT_DBG("%s", hdev
->name
);
4769 memset(&rp
, 0, sizeof(rp
));
4770 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4771 rp
.addr
.type
= cp
->addr
.type
;
4773 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4774 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4775 MGMT_STATUS_INVALID_PARAMS
,
4780 if (!hdev_is_powered(hdev
)) {
4781 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4782 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
4786 if (cp
->addr
.type
== BDADDR_BREDR
)
4787 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4790 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
4792 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4793 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4794 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
4798 /* To avoid client trying to guess when to poll again for information we
4799 * calculate conn info age as random value between min/max set in hdev.
4801 conn_info_age
= hdev
->conn_info_min_age
+
4802 prandom_u32_max(hdev
->conn_info_max_age
-
4803 hdev
->conn_info_min_age
);
4805 /* Query controller to refresh cached values if they are too old or were
4808 if (time_after(jiffies
, conn
->conn_info_timestamp
+
4809 msecs_to_jiffies(conn_info_age
)) ||
4810 !conn
->conn_info_timestamp
) {
4811 struct hci_request req
;
4812 struct hci_cp_read_tx_power req_txp_cp
;
4813 struct hci_cp_read_rssi req_rssi_cp
;
4814 struct pending_cmd
*cmd
;
4816 hci_req_init(&req
, hdev
);
4817 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
4818 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
4821 /* For LE links TX power does not change thus we don't need to
4822 * query for it once value is known.
4824 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
4825 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
4826 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4827 req_txp_cp
.type
= 0x00;
4828 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4829 sizeof(req_txp_cp
), &req_txp_cp
);
4832 /* Max TX power needs to be read only once per connection */
4833 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
4834 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4835 req_txp_cp
.type
= 0x01;
4836 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4837 sizeof(req_txp_cp
), &req_txp_cp
);
4840 err
= hci_req_run(&req
, conn_info_refresh_complete
);
4844 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
4851 hci_conn_hold(conn
);
4852 cmd
->user_data
= conn
;
4854 conn
->conn_info_timestamp
= jiffies
;
4856 /* Cache is valid, just reply with values cached in hci_conn */
4857 rp
.rssi
= conn
->rssi
;
4858 rp
.tx_power
= conn
->tx_power
;
4859 rp
.max_tx_power
= conn
->max_tx_power
;
4861 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4862 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
4866 hci_dev_unlock(hdev
);
4870 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
)
4872 struct mgmt_cp_get_clock_info
*cp
;
4873 struct mgmt_rp_get_clock_info rp
;
4874 struct hci_cp_read_clock
*hci_cp
;
4875 struct pending_cmd
*cmd
;
4876 struct hci_conn
*conn
;
4878 BT_DBG("%s status %u", hdev
->name
, status
);
4882 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
4886 if (hci_cp
->which
) {
4887 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
4888 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4893 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
4899 memset(&rp
, 0, sizeof(rp
));
4900 memcpy(&rp
.addr
, &cp
->addr
, sizeof(rp
.addr
));
4905 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
4908 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
4909 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
4913 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
4915 mgmt_pending_remove(cmd
);
4917 hci_conn_drop(conn
);
4920 hci_dev_unlock(hdev
);
4923 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4926 struct mgmt_cp_get_clock_info
*cp
= data
;
4927 struct mgmt_rp_get_clock_info rp
;
4928 struct hci_cp_read_clock hci_cp
;
4929 struct pending_cmd
*cmd
;
4930 struct hci_request req
;
4931 struct hci_conn
*conn
;
4934 BT_DBG("%s", hdev
->name
);
4936 memset(&rp
, 0, sizeof(rp
));
4937 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4938 rp
.addr
.type
= cp
->addr
.type
;
4940 if (cp
->addr
.type
!= BDADDR_BREDR
)
4941 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
4942 MGMT_STATUS_INVALID_PARAMS
,
4947 if (!hdev_is_powered(hdev
)) {
4948 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
4949 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
4953 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4954 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4956 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4957 err
= cmd_complete(sk
, hdev
->id
,
4958 MGMT_OP_GET_CLOCK_INFO
,
4959 MGMT_STATUS_NOT_CONNECTED
,
4967 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
4973 hci_req_init(&req
, hdev
);
4975 memset(&hci_cp
, 0, sizeof(hci_cp
));
4976 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
4979 hci_conn_hold(conn
);
4980 cmd
->user_data
= conn
;
4982 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
4983 hci_cp
.which
= 0x01; /* Piconet clock */
4984 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
4987 err
= hci_req_run(&req
, get_clock_info_complete
);
4989 mgmt_pending_remove(cmd
);
4992 hci_dev_unlock(hdev
);
4996 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
4997 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
4999 struct mgmt_ev_device_added ev
;
5001 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5002 ev
.addr
.type
= type
;
5005 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5008 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5009 void *data
, u16 len
)
5011 struct mgmt_cp_add_device
*cp
= data
;
5012 u8 auto_conn
, addr_type
;
5015 BT_DBG("%s", hdev
->name
);
5017 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5018 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5019 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5020 MGMT_STATUS_INVALID_PARAMS
,
5021 &cp
->addr
, sizeof(cp
->addr
));
5023 if (cp
->action
!= 0x00 && cp
->action
!= 0x01)
5024 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5025 MGMT_STATUS_INVALID_PARAMS
,
5026 &cp
->addr
, sizeof(cp
->addr
));
5030 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5031 addr_type
= ADDR_LE_DEV_PUBLIC
;
5033 addr_type
= ADDR_LE_DEV_RANDOM
;
5036 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5038 auto_conn
= HCI_AUTO_CONN_REPORT
;
5040 /* If the connection parameters don't exist for this device,
5041 * they will be created and configured with defaults.
5043 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5045 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5047 &cp
->addr
, sizeof(cp
->addr
));
5051 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5053 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5054 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5057 hci_dev_unlock(hdev
);
5061 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5062 bdaddr_t
*bdaddr
, u8 type
)
5064 struct mgmt_ev_device_removed ev
;
5066 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5067 ev
.addr
.type
= type
;
5069 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5072 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5073 void *data
, u16 len
)
5075 struct mgmt_cp_remove_device
*cp
= data
;
5078 BT_DBG("%s", hdev
->name
);
5082 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5083 struct hci_conn_params
*params
;
5086 if (!bdaddr_type_is_le(cp
->addr
.type
)) {
5087 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5088 MGMT_STATUS_INVALID_PARAMS
,
5089 &cp
->addr
, sizeof(cp
->addr
));
5093 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5094 addr_type
= ADDR_LE_DEV_PUBLIC
;
5096 addr_type
= ADDR_LE_DEV_RANDOM
;
5098 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5101 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5102 MGMT_STATUS_INVALID_PARAMS
,
5103 &cp
->addr
, sizeof(cp
->addr
));
5107 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
5108 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5109 MGMT_STATUS_INVALID_PARAMS
,
5110 &cp
->addr
, sizeof(cp
->addr
));
5114 hci_pend_le_conn_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
5115 list_del(¶ms
->list
);
5118 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5120 if (cp
->addr
.type
) {
5121 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5122 MGMT_STATUS_INVALID_PARAMS
,
5123 &cp
->addr
, sizeof(cp
->addr
));
5127 hci_conn_params_clear_enabled(hdev
);
5130 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5131 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5134 hci_dev_unlock(hdev
);
5138 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5141 struct mgmt_cp_load_conn_param
*cp
= data
;
5142 u16 param_count
, expected_len
;
5145 if (!lmp_le_capable(hdev
))
5146 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5147 MGMT_STATUS_NOT_SUPPORTED
);
5149 param_count
= __le16_to_cpu(cp
->param_count
);
5151 expected_len
= sizeof(*cp
) + param_count
*
5152 sizeof(struct mgmt_conn_param
);
5153 if (expected_len
!= len
) {
5154 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5156 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5157 MGMT_STATUS_INVALID_PARAMS
);
5160 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5164 hci_conn_params_clear_disabled(hdev
);
5166 for (i
= 0; i
< param_count
; i
++) {
5167 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5168 struct hci_conn_params
*hci_param
;
5169 u16 min
, max
, latency
, timeout
;
5172 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5175 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5176 addr_type
= ADDR_LE_DEV_PUBLIC
;
5177 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5178 addr_type
= ADDR_LE_DEV_RANDOM
;
5180 BT_ERR("Ignoring invalid connection parameters");
5184 min
= le16_to_cpu(param
->min_interval
);
5185 max
= le16_to_cpu(param
->max_interval
);
5186 latency
= le16_to_cpu(param
->latency
);
5187 timeout
= le16_to_cpu(param
->timeout
);
5189 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5190 min
, max
, latency
, timeout
);
5192 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5193 BT_ERR("Ignoring invalid connection parameters");
5197 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5200 BT_ERR("Failed to add connection parameters");
5204 hci_param
->conn_min_interval
= min
;
5205 hci_param
->conn_max_interval
= max
;
5206 hci_param
->conn_latency
= latency
;
5207 hci_param
->supervision_timeout
= timeout
;
5210 hci_dev_unlock(hdev
);
5212 return cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0, NULL
, 0);
5215 static const struct mgmt_handler
{
5216 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5220 } mgmt_handlers
[] = {
5221 { NULL
}, /* 0x0000 (no command) */
5222 { read_version
, false, MGMT_READ_VERSION_SIZE
},
5223 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
5224 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
5225 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
5226 { set_powered
, false, MGMT_SETTING_SIZE
},
5227 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
5228 { set_connectable
, false, MGMT_SETTING_SIZE
},
5229 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
5230 { set_pairable
, false, MGMT_SETTING_SIZE
},
5231 { set_link_security
, false, MGMT_SETTING_SIZE
},
5232 { set_ssp
, false, MGMT_SETTING_SIZE
},
5233 { set_hs
, false, MGMT_SETTING_SIZE
},
5234 { set_le
, false, MGMT_SETTING_SIZE
},
5235 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
5236 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
5237 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
5238 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
5239 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
5240 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
5241 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
5242 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
5243 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
5244 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
5245 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
5246 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
5247 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
5248 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
5249 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
5250 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
5251 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
5252 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
5253 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
5254 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
5255 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
5256 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
5257 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
5258 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
5259 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
5260 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
5261 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
5262 { set_advertising
, false, MGMT_SETTING_SIZE
},
5263 { set_bredr
, false, MGMT_SETTING_SIZE
},
5264 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
5265 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
5266 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
5267 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
5268 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
5269 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
5270 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
5271 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
5272 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
5273 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
5274 { load_conn_param
, true, MGMT_LOAD_CONN_PARAM_SIZE
},
5277 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
5281 struct mgmt_hdr
*hdr
;
5282 u16 opcode
, index
, len
;
5283 struct hci_dev
*hdev
= NULL
;
5284 const struct mgmt_handler
*handler
;
5287 BT_DBG("got %zu bytes", msglen
);
5289 if (msglen
< sizeof(*hdr
))
5292 buf
= kmalloc(msglen
, GFP_KERNEL
);
5296 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
5302 opcode
= __le16_to_cpu(hdr
->opcode
);
5303 index
= __le16_to_cpu(hdr
->index
);
5304 len
= __le16_to_cpu(hdr
->len
);
5306 if (len
!= msglen
- sizeof(*hdr
)) {
5311 if (index
!= MGMT_INDEX_NONE
) {
5312 hdev
= hci_dev_get(index
);
5314 err
= cmd_status(sk
, index
, opcode
,
5315 MGMT_STATUS_INVALID_INDEX
);
5319 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5320 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) ||
5321 test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
)) {
5322 err
= cmd_status(sk
, index
, opcode
,
5323 MGMT_STATUS_INVALID_INDEX
);
5328 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
5329 mgmt_handlers
[opcode
].func
== NULL
) {
5330 BT_DBG("Unknown op %u", opcode
);
5331 err
= cmd_status(sk
, index
, opcode
,
5332 MGMT_STATUS_UNKNOWN_COMMAND
);
5336 if ((hdev
&& opcode
< MGMT_OP_READ_INFO
) ||
5337 (!hdev
&& opcode
>= MGMT_OP_READ_INFO
)) {
5338 err
= cmd_status(sk
, index
, opcode
,
5339 MGMT_STATUS_INVALID_INDEX
);
5343 handler
= &mgmt_handlers
[opcode
];
5345 if ((handler
->var_len
&& len
< handler
->data_len
) ||
5346 (!handler
->var_len
&& len
!= handler
->data_len
)) {
5347 err
= cmd_status(sk
, index
, opcode
,
5348 MGMT_STATUS_INVALID_PARAMS
);
5353 mgmt_init_hdev(sk
, hdev
);
5355 cp
= buf
+ sizeof(*hdr
);
5357 err
= handler
->func(sk
, hdev
, cp
, len
);
5371 void mgmt_index_added(struct hci_dev
*hdev
)
5373 if (hdev
->dev_type
!= HCI_BREDR
)
5376 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
5379 void mgmt_index_removed(struct hci_dev
*hdev
)
5381 u8 status
= MGMT_STATUS_INVALID_INDEX
;
5383 if (hdev
->dev_type
!= HCI_BREDR
)
5386 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
5388 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
5391 /* This function requires the caller holds hdev->lock */
5392 static void restart_le_auto_conns(struct hci_dev
*hdev
)
5394 struct hci_conn_params
*p
;
5397 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
5398 if (p
->auto_connect
== HCI_AUTO_CONN_ALWAYS
) {
5399 hci_pend_le_conn_add(hdev
, &p
->addr
, p
->addr_type
);
5404 /* Calling hci_pend_le_conn_add will actually already trigger
5405 * background scanning when needed. So no need to trigger it
5406 * just another time.
5408 * This check is here to avoid an unneeded restart of the
5409 * passive scanning. Since this is during the controller
5410 * power up phase the duplicate filtering is not an issue.
5415 hci_update_background_scan(hdev
);
5418 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
5420 struct cmd_lookup match
= { NULL
, hdev
};
5422 BT_DBG("status 0x%02x", status
);
5426 restart_le_auto_conns(hdev
);
5428 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5430 new_settings(hdev
, match
.sk
);
5432 hci_dev_unlock(hdev
);
5438 static int powered_update_hci(struct hci_dev
*hdev
)
5440 struct hci_request req
;
5443 hci_req_init(&req
, hdev
);
5445 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
5446 !lmp_host_ssp_capable(hdev
)) {
5449 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
5452 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
5453 lmp_bredr_capable(hdev
)) {
5454 struct hci_cp_write_le_host_supported cp
;
5457 cp
.simul
= lmp_le_br_capable(hdev
);
5459 /* Check first if we already have the right
5460 * host state (host features set)
5462 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
5463 cp
.simul
!= lmp_host_le_br_capable(hdev
))
5464 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
5468 if (lmp_le_capable(hdev
)) {
5469 /* Make sure the controller has a good default for
5470 * advertising data. This also applies to the case
5471 * where BR/EDR was toggled during the AUTO_OFF phase.
5473 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
5474 update_adv_data(&req
);
5475 update_scan_rsp_data(&req
);
5478 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5479 enable_advertising(&req
);
5482 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
5483 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
5484 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
5485 sizeof(link_sec
), &link_sec
);
5487 if (lmp_bredr_capable(hdev
)) {
5488 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5489 set_bredr_scan(&req
);
5495 return hci_req_run(&req
, powered_complete
);
5498 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
5500 struct cmd_lookup match
= { NULL
, hdev
};
5501 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
5502 u8 zero_cod
[] = { 0, 0, 0 };
5505 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
5509 if (powered_update_hci(hdev
) == 0)
5512 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
5517 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5518 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
5520 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
5521 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
5522 zero_cod
, sizeof(zero_cod
), NULL
);
5525 err
= new_settings(hdev
, match
.sk
);
5533 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
5535 struct pending_cmd
*cmd
;
5538 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5542 if (err
== -ERFKILL
)
5543 status
= MGMT_STATUS_RFKILLED
;
5545 status
= MGMT_STATUS_FAILED
;
5547 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
5549 mgmt_pending_remove(cmd
);
5552 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
5554 struct hci_request req
;
5558 /* When discoverable timeout triggers, then just make sure
5559 * the limited discoverable flag is cleared. Even in the case
5560 * of a timeout triggered from general discoverable, it is
5561 * safe to unconditionally clear the flag.
5563 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5564 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5566 hci_req_init(&req
, hdev
);
5567 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
5568 u8 scan
= SCAN_PAGE
;
5569 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
5570 sizeof(scan
), &scan
);
5573 update_adv_data(&req
);
5574 hci_req_run(&req
, NULL
);
5576 hdev
->discov_timeout
= 0;
5578 new_settings(hdev
, NULL
);
5580 hci_dev_unlock(hdev
);
5583 void mgmt_discoverable(struct hci_dev
*hdev
, u8 discoverable
)
5587 /* Nothing needed here if there's a pending command since that
5588 * commands request completion callback takes care of everything
5591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
5594 /* Powering off may clear the scan mode - don't let that interfere */
5595 if (!discoverable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5599 changed
= !test_and_set_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5601 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5602 changed
= test_and_clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5606 struct hci_request req
;
5608 /* In case this change in discoverable was triggered by
5609 * a disabling of connectable there could be a need to
5610 * update the advertising flags.
5612 hci_req_init(&req
, hdev
);
5613 update_adv_data(&req
);
5614 hci_req_run(&req
, NULL
);
5616 new_settings(hdev
, NULL
);
5620 void mgmt_connectable(struct hci_dev
*hdev
, u8 connectable
)
5624 /* Nothing needed here if there's a pending command since that
5625 * commands request completion callback takes care of everything
5628 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
5631 /* Powering off may clear the scan mode - don't let that interfere */
5632 if (!connectable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5636 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5638 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5641 new_settings(hdev
, NULL
);
5644 void mgmt_advertising(struct hci_dev
*hdev
, u8 advertising
)
5646 /* Powering off may stop advertising - don't let that interfere */
5647 if (!advertising
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5651 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5653 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5656 void mgmt_write_scan_failed(struct hci_dev
*hdev
, u8 scan
, u8 status
)
5658 u8 mgmt_err
= mgmt_status(status
);
5660 if (scan
& SCAN_PAGE
)
5661 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE
, hdev
,
5662 cmd_status_rsp
, &mgmt_err
);
5664 if (scan
& SCAN_INQUIRY
)
5665 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE
, hdev
,
5666 cmd_status_rsp
, &mgmt_err
);
5669 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
5672 struct mgmt_ev_new_link_key ev
;
5674 memset(&ev
, 0, sizeof(ev
));
5676 ev
.store_hint
= persistent
;
5677 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5678 ev
.key
.addr
.type
= BDADDR_BREDR
;
5679 ev
.key
.type
= key
->type
;
5680 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
5681 ev
.key
.pin_len
= key
->pin_len
;
5683 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5686 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
5688 if (ltk
->authenticated
)
5689 return MGMT_LTK_AUTHENTICATED
;
5691 return MGMT_LTK_UNAUTHENTICATED
;
5694 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
5696 struct mgmt_ev_new_long_term_key ev
;
5698 memset(&ev
, 0, sizeof(ev
));
5700 /* Devices using resolvable or non-resolvable random addresses
5701 * without providing an indentity resolving key don't require
5702 * to store long term keys. Their addresses will change the
5705 * Only when a remote device provides an identity address
5706 * make sure the long term key is stored. If the remote
5707 * identity is known, the long term keys are internally
5708 * mapped to the identity address. So allow static random
5709 * and public addresses here.
5711 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5712 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5713 ev
.store_hint
= 0x00;
5715 ev
.store_hint
= persistent
;
5717 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5718 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
5719 ev
.key
.type
= mgmt_ltk_type(key
);
5720 ev
.key
.enc_size
= key
->enc_size
;
5721 ev
.key
.ediv
= key
->ediv
;
5722 ev
.key
.rand
= key
->rand
;
5724 if (key
->type
== SMP_LTK
)
5727 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
5729 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5732 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
5734 struct mgmt_ev_new_irk ev
;
5736 memset(&ev
, 0, sizeof(ev
));
5738 /* For identity resolving keys from devices that are already
5739 * using a public address or static random address, do not
5740 * ask for storing this key. The identity resolving key really
5741 * is only mandatory for devices using resovlable random
5744 * Storing all identity resolving keys has the downside that
5745 * they will be also loaded on next boot of they system. More
5746 * identity resolving keys, means more time during scanning is
5747 * needed to actually resolve these addresses.
5749 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
5750 ev
.store_hint
= 0x01;
5752 ev
.store_hint
= 0x00;
5754 bacpy(&ev
.rpa
, &irk
->rpa
);
5755 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
5756 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
5757 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
5759 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
5762 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
5765 struct mgmt_ev_new_csrk ev
;
5767 memset(&ev
, 0, sizeof(ev
));
5769 /* Devices using resolvable or non-resolvable random addresses
5770 * without providing an indentity resolving key don't require
5771 * to store signature resolving keys. Their addresses will change
5772 * the next time around.
5774 * Only when a remote device provides an identity address
5775 * make sure the signature resolving key is stored. So allow
5776 * static random and public addresses here.
5778 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5779 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5780 ev
.store_hint
= 0x00;
5782 ev
.store_hint
= persistent
;
5784 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
5785 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
5786 ev
.key
.master
= csrk
->master
;
5787 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
5789 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
5792 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5793 u8 bdaddr_type
, u16 min_interval
, u16 max_interval
,
5794 u16 latency
, u16 timeout
)
5796 struct mgmt_ev_new_conn_param ev
;
5798 memset(&ev
, 0, sizeof(ev
));
5799 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5800 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
5801 ev
.store_hint
= 0x00;
5802 ev
.min_interval
= cpu_to_le16(min_interval
);
5803 ev
.max_interval
= cpu_to_le16(max_interval
);
5804 ev
.latency
= cpu_to_le16(latency
);
5805 ev
.timeout
= cpu_to_le16(timeout
);
5807 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
5810 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
5813 eir
[eir_len
++] = sizeof(type
) + data_len
;
5814 eir
[eir_len
++] = type
;
5815 memcpy(&eir
[eir_len
], data
, data_len
);
5816 eir_len
+= data_len
;
5821 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5822 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
5826 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
5829 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5830 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5832 ev
->flags
= __cpu_to_le32(flags
);
5835 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
5838 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
5839 eir_len
= eir_append_data(ev
->eir
, eir_len
,
5840 EIR_CLASS_OF_DEV
, dev_class
, 3);
5842 ev
->eir_len
= cpu_to_le16(eir_len
);
5844 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
5845 sizeof(*ev
) + eir_len
, NULL
);
5848 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
5850 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
5851 struct sock
**sk
= data
;
5852 struct mgmt_rp_disconnect rp
;
5854 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5855 rp
.addr
.type
= cp
->addr
.type
;
5857 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
5863 mgmt_pending_remove(cmd
);
5866 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
5868 struct hci_dev
*hdev
= data
;
5869 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
5870 struct mgmt_rp_unpair_device rp
;
5872 memset(&rp
, 0, sizeof(rp
));
5873 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5874 rp
.addr
.type
= cp
->addr
.type
;
5876 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
5878 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
5880 mgmt_pending_remove(cmd
);
5883 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5884 u8 link_type
, u8 addr_type
, u8 reason
,
5885 bool mgmt_connected
)
5887 struct mgmt_ev_device_disconnected ev
;
5888 struct pending_cmd
*power_off
;
5889 struct sock
*sk
= NULL
;
5891 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5893 struct mgmt_mode
*cp
= power_off
->param
;
5895 /* The connection is still in hci_conn_hash so test for 1
5896 * instead of 0 to know if this is the last one.
5898 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
5899 cancel_delayed_work(&hdev
->power_off
);
5900 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5904 if (!mgmt_connected
)
5907 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
5910 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
5912 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5913 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5916 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
5921 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5925 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5926 u8 link_type
, u8 addr_type
, u8 status
)
5928 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
5929 struct mgmt_cp_disconnect
*cp
;
5930 struct mgmt_rp_disconnect rp
;
5931 struct pending_cmd
*cmd
;
5933 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5936 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
5942 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
5945 if (cp
->addr
.type
!= bdaddr_type
)
5948 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5949 rp
.addr
.type
= bdaddr_type
;
5951 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
5952 mgmt_status(status
), &rp
, sizeof(rp
));
5954 mgmt_pending_remove(cmd
);
5957 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5958 u8 addr_type
, u8 status
)
5960 struct mgmt_ev_connect_failed ev
;
5961 struct pending_cmd
*power_off
;
5963 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5965 struct mgmt_mode
*cp
= power_off
->param
;
5967 /* The connection is still in hci_conn_hash so test for 1
5968 * instead of 0 to know if this is the last one.
5970 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
5971 cancel_delayed_work(&hdev
->power_off
);
5972 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5976 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5977 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5978 ev
.status
= mgmt_status(status
);
5980 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5983 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
5985 struct mgmt_ev_pin_code_request ev
;
5987 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5988 ev
.addr
.type
= BDADDR_BREDR
;
5991 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
5994 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5997 struct pending_cmd
*cmd
;
5998 struct mgmt_rp_pin_code_reply rp
;
6000 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6004 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6005 rp
.addr
.type
= BDADDR_BREDR
;
6007 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
6008 mgmt_status(status
), &rp
, sizeof(rp
));
6010 mgmt_pending_remove(cmd
);
6013 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6016 struct pending_cmd
*cmd
;
6017 struct mgmt_rp_pin_code_reply rp
;
6019 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6023 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6024 rp
.addr
.type
= BDADDR_BREDR
;
6026 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
6027 mgmt_status(status
), &rp
, sizeof(rp
));
6029 mgmt_pending_remove(cmd
);
6032 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6033 u8 link_type
, u8 addr_type
, u32 value
,
6036 struct mgmt_ev_user_confirm_request ev
;
6038 BT_DBG("%s", hdev
->name
);
6040 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6041 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6042 ev
.confirm_hint
= confirm_hint
;
6043 ev
.value
= cpu_to_le32(value
);
6045 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6049 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6050 u8 link_type
, u8 addr_type
)
6052 struct mgmt_ev_user_passkey_request ev
;
6054 BT_DBG("%s", hdev
->name
);
6056 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6057 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6059 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6063 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6064 u8 link_type
, u8 addr_type
, u8 status
,
6067 struct pending_cmd
*cmd
;
6068 struct mgmt_rp_user_confirm_reply rp
;
6071 cmd
= mgmt_pending_find(opcode
, hdev
);
6075 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6076 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6077 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
6080 mgmt_pending_remove(cmd
);
6085 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6086 u8 link_type
, u8 addr_type
, u8 status
)
6088 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6089 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6092 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6093 u8 link_type
, u8 addr_type
, u8 status
)
6095 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6097 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6100 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6101 u8 link_type
, u8 addr_type
, u8 status
)
6103 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6104 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6107 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6108 u8 link_type
, u8 addr_type
, u8 status
)
6110 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6112 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6115 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6116 u8 link_type
, u8 addr_type
, u32 passkey
,
6119 struct mgmt_ev_passkey_notify ev
;
6121 BT_DBG("%s", hdev
->name
);
6123 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6124 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6125 ev
.passkey
= __cpu_to_le32(passkey
);
6126 ev
.entered
= entered
;
6128 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6131 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6132 u8 addr_type
, u8 status
)
6134 struct mgmt_ev_auth_failed ev
;
6136 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6137 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6138 ev
.status
= mgmt_status(status
);
6140 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6143 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
6145 struct cmd_lookup match
= { NULL
, hdev
};
6149 u8 mgmt_err
= mgmt_status(status
);
6150 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
6151 cmd_status_rsp
, &mgmt_err
);
6155 if (test_bit(HCI_AUTH
, &hdev
->flags
))
6156 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
6159 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
6162 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
6166 new_settings(hdev
, match
.sk
);
6172 static void clear_eir(struct hci_request
*req
)
6174 struct hci_dev
*hdev
= req
->hdev
;
6175 struct hci_cp_write_eir cp
;
6177 if (!lmp_ext_inq_capable(hdev
))
6180 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
6182 memset(&cp
, 0, sizeof(cp
));
6184 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
6187 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6189 struct cmd_lookup match
= { NULL
, hdev
};
6190 struct hci_request req
;
6191 bool changed
= false;
6194 u8 mgmt_err
= mgmt_status(status
);
6196 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
6197 &hdev
->dev_flags
)) {
6198 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6199 new_settings(hdev
, NULL
);
6202 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
6208 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6210 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6212 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
6215 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6218 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
6221 new_settings(hdev
, match
.sk
);
6226 hci_req_init(&req
, hdev
);
6228 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
6229 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
6230 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
6231 sizeof(enable
), &enable
);
6237 hci_req_run(&req
, NULL
);
6240 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6242 struct cmd_lookup match
= { NULL
, hdev
};
6243 bool changed
= false;
6246 u8 mgmt_err
= mgmt_status(status
);
6249 if (test_and_clear_bit(HCI_SC_ENABLED
,
6251 new_settings(hdev
, NULL
);
6252 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6255 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6256 cmd_status_rsp
, &mgmt_err
);
6261 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6263 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6264 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6267 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6268 settings_rsp
, &match
);
6271 new_settings(hdev
, match
.sk
);
6277 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
6279 struct cmd_lookup
*match
= data
;
6281 if (match
->sk
== NULL
) {
6282 match
->sk
= cmd
->sk
;
6283 sock_hold(match
->sk
);
6287 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
6290 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
6292 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
6293 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
6294 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
6297 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
6304 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
6306 struct mgmt_cp_set_local_name ev
;
6307 struct pending_cmd
*cmd
;
6312 memset(&ev
, 0, sizeof(ev
));
6313 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
6314 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
6316 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
6318 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
6320 /* If this is a HCI command related to powering on the
6321 * HCI dev don't send any mgmt signals.
6323 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
6327 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
6328 cmd
? cmd
->sk
: NULL
);
6331 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
6332 u8
*randomizer192
, u8
*hash256
,
6333 u8
*randomizer256
, u8 status
)
6335 struct pending_cmd
*cmd
;
6337 BT_DBG("%s status %u", hdev
->name
, status
);
6339 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
6344 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
6345 mgmt_status(status
));
6347 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
6348 hash256
&& randomizer256
) {
6349 struct mgmt_rp_read_local_oob_ext_data rp
;
6351 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
6352 memcpy(rp
.randomizer192
, randomizer192
,
6353 sizeof(rp
.randomizer192
));
6355 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
6356 memcpy(rp
.randomizer256
, randomizer256
,
6357 sizeof(rp
.randomizer256
));
6359 cmd_complete(cmd
->sk
, hdev
->id
,
6360 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6363 struct mgmt_rp_read_local_oob_data rp
;
6365 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
6366 memcpy(rp
.randomizer
, randomizer192
,
6367 sizeof(rp
.randomizer
));
6369 cmd_complete(cmd
->sk
, hdev
->id
,
6370 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6375 mgmt_pending_remove(cmd
);
6378 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6379 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
6380 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
6383 struct mgmt_ev_device_found
*ev
= (void *) buf
;
6384 struct smp_irk
*irk
;
6387 if (!hci_discovery_active(hdev
))
6390 /* Make sure that the buffer is big enough. The 5 extra bytes
6391 * are for the potential CoD field.
6393 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
6396 memset(buf
, 0, sizeof(buf
));
6398 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
6400 bacpy(&ev
->addr
.bdaddr
, &irk
->bdaddr
);
6401 ev
->addr
.type
= link_to_bdaddr(link_type
, irk
->addr_type
);
6403 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6404 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6408 ev
->flags
= cpu_to_le32(flags
);
6411 memcpy(ev
->eir
, eir
, eir_len
);
6413 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
6414 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
6417 if (scan_rsp_len
> 0)
6418 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
6420 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
6421 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
6423 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
6426 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6427 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
6429 struct mgmt_ev_device_found
*ev
;
6430 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
6433 ev
= (struct mgmt_ev_device_found
*) buf
;
6435 memset(buf
, 0, sizeof(buf
));
6437 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6438 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6441 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
6444 ev
->eir_len
= cpu_to_le16(eir_len
);
6446 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
6449 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
6451 struct mgmt_ev_discovering ev
;
6452 struct pending_cmd
*cmd
;
6454 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
6457 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
6459 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
6462 u8 type
= hdev
->discovery
.type
;
6464 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
6466 mgmt_pending_remove(cmd
);
6469 memset(&ev
, 0, sizeof(ev
));
6470 ev
.type
= hdev
->discovery
.type
;
6471 ev
.discovering
= discovering
;
6473 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
6476 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
6478 BT_DBG("%s status %u", hdev
->name
, status
);
6480 /* Clear the advertising mgmt setting if we failed to re-enable it */
6482 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6483 new_settings(hdev
, NULL
);
6487 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
6489 struct hci_request req
;
6491 if (hci_conn_num(hdev
, LE_LINK
) > 0)
6494 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6497 hci_req_init(&req
, hdev
);
6498 enable_advertising(&req
);
6500 /* If this fails we have no option but to let user space know
6501 * that we've disabled advertising.
6503 if (hci_req_run(&req
, adv_enable_complete
) < 0) {
6504 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6505 new_settings(hdev
, NULL
);