2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 14
43 static const u16 mgmt_commands
[] = {
44 MGMT_OP_READ_INDEX_LIST
,
47 MGMT_OP_SET_DISCOVERABLE
,
48 MGMT_OP_SET_CONNECTABLE
,
49 MGMT_OP_SET_FAST_CONNECTABLE
,
51 MGMT_OP_SET_LINK_SECURITY
,
55 MGMT_OP_SET_DEV_CLASS
,
56 MGMT_OP_SET_LOCAL_NAME
,
59 MGMT_OP_LOAD_LINK_KEYS
,
60 MGMT_OP_LOAD_LONG_TERM_KEYS
,
62 MGMT_OP_GET_CONNECTIONS
,
63 MGMT_OP_PIN_CODE_REPLY
,
64 MGMT_OP_PIN_CODE_NEG_REPLY
,
65 MGMT_OP_SET_IO_CAPABILITY
,
67 MGMT_OP_CANCEL_PAIR_DEVICE
,
68 MGMT_OP_UNPAIR_DEVICE
,
69 MGMT_OP_USER_CONFIRM_REPLY
,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
71 MGMT_OP_USER_PASSKEY_REPLY
,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
73 MGMT_OP_READ_LOCAL_OOB_DATA
,
74 MGMT_OP_ADD_REMOTE_OOB_DATA
,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
76 MGMT_OP_START_DISCOVERY
,
77 MGMT_OP_STOP_DISCOVERY
,
80 MGMT_OP_UNBLOCK_DEVICE
,
81 MGMT_OP_SET_DEVICE_ID
,
82 MGMT_OP_SET_ADVERTISING
,
84 MGMT_OP_SET_STATIC_ADDRESS
,
85 MGMT_OP_SET_SCAN_PARAMS
,
86 MGMT_OP_SET_SECURE_CONN
,
87 MGMT_OP_SET_DEBUG_KEYS
,
90 MGMT_OP_GET_CONN_INFO
,
91 MGMT_OP_GET_CLOCK_INFO
,
93 MGMT_OP_REMOVE_DEVICE
,
94 MGMT_OP_LOAD_CONN_PARAM
,
95 MGMT_OP_READ_UNCONF_INDEX_LIST
,
96 MGMT_OP_READ_CONFIG_INFO
,
97 MGMT_OP_SET_EXTERNAL_CONFIG
,
98 MGMT_OP_SET_PUBLIC_ADDRESS
,
99 MGMT_OP_START_SERVICE_DISCOVERY
,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
101 MGMT_OP_READ_EXT_INDEX_LIST
,
102 MGMT_OP_READ_ADV_FEATURES
,
103 MGMT_OP_ADD_ADVERTISING
,
104 MGMT_OP_REMOVE_ADVERTISING
,
105 MGMT_OP_GET_ADV_SIZE_INFO
,
106 MGMT_OP_START_LIMITED_DISCOVERY
,
107 MGMT_OP_READ_EXT_INFO
,
108 MGMT_OP_SET_APPEARANCE
,
111 static const u16 mgmt_events
[] = {
112 MGMT_EV_CONTROLLER_ERROR
,
114 MGMT_EV_INDEX_REMOVED
,
115 MGMT_EV_NEW_SETTINGS
,
116 MGMT_EV_CLASS_OF_DEV_CHANGED
,
117 MGMT_EV_LOCAL_NAME_CHANGED
,
118 MGMT_EV_NEW_LINK_KEY
,
119 MGMT_EV_NEW_LONG_TERM_KEY
,
120 MGMT_EV_DEVICE_CONNECTED
,
121 MGMT_EV_DEVICE_DISCONNECTED
,
122 MGMT_EV_CONNECT_FAILED
,
123 MGMT_EV_PIN_CODE_REQUEST
,
124 MGMT_EV_USER_CONFIRM_REQUEST
,
125 MGMT_EV_USER_PASSKEY_REQUEST
,
127 MGMT_EV_DEVICE_FOUND
,
129 MGMT_EV_DEVICE_BLOCKED
,
130 MGMT_EV_DEVICE_UNBLOCKED
,
131 MGMT_EV_DEVICE_UNPAIRED
,
132 MGMT_EV_PASSKEY_NOTIFY
,
135 MGMT_EV_DEVICE_ADDED
,
136 MGMT_EV_DEVICE_REMOVED
,
137 MGMT_EV_NEW_CONN_PARAM
,
138 MGMT_EV_UNCONF_INDEX_ADDED
,
139 MGMT_EV_UNCONF_INDEX_REMOVED
,
140 MGMT_EV_NEW_CONFIG_OPTIONS
,
141 MGMT_EV_EXT_INDEX_ADDED
,
142 MGMT_EV_EXT_INDEX_REMOVED
,
143 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
144 MGMT_EV_ADVERTISING_ADDED
,
145 MGMT_EV_ADVERTISING_REMOVED
,
146 MGMT_EV_EXT_INFO_CHANGED
,
149 static const u16 mgmt_untrusted_commands
[] = {
150 MGMT_OP_READ_INDEX_LIST
,
152 MGMT_OP_READ_UNCONF_INDEX_LIST
,
153 MGMT_OP_READ_CONFIG_INFO
,
154 MGMT_OP_READ_EXT_INDEX_LIST
,
155 MGMT_OP_READ_EXT_INFO
,
158 static const u16 mgmt_untrusted_events
[] = {
160 MGMT_EV_INDEX_REMOVED
,
161 MGMT_EV_NEW_SETTINGS
,
162 MGMT_EV_CLASS_OF_DEV_CHANGED
,
163 MGMT_EV_LOCAL_NAME_CHANGED
,
164 MGMT_EV_UNCONF_INDEX_ADDED
,
165 MGMT_EV_UNCONF_INDEX_REMOVED
,
166 MGMT_EV_NEW_CONFIG_OPTIONS
,
167 MGMT_EV_EXT_INDEX_ADDED
,
168 MGMT_EV_EXT_INDEX_REMOVED
,
169 MGMT_EV_EXT_INFO_CHANGED
,
172 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 "\x00\x00\x00\x00\x00\x00\x00\x00"
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table
[] = {
180 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
181 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
182 MGMT_STATUS_FAILED
, /* Hardware Failure */
183 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
184 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
185 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
186 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
187 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
188 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
189 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
190 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
191 MGMT_STATUS_BUSY
, /* Command Disallowed */
192 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
193 MGMT_STATUS_REJECTED
, /* Rejected Security */
194 MGMT_STATUS_REJECTED
, /* Rejected Personal */
195 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
196 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
197 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
198 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
199 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
200 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
201 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
202 MGMT_STATUS_BUSY
, /* Repeated Attempts */
203 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
204 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
205 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
206 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
207 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
208 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
209 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
210 MGMT_STATUS_FAILED
, /* Unspecified Error */
211 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
212 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
213 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
214 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
215 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
216 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
217 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
218 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
219 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
220 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
221 MGMT_STATUS_FAILED
, /* Transaction Collision */
222 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
223 MGMT_STATUS_REJECTED
, /* QoS Rejected */
224 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
225 MGMT_STATUS_REJECTED
, /* Insufficient Security */
226 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
227 MGMT_STATUS_BUSY
, /* Role Switch Pending */
228 MGMT_STATUS_FAILED
, /* Slot Violation */
229 MGMT_STATUS_FAILED
, /* Role Switch Failed */
230 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
231 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
232 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
233 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
234 MGMT_STATUS_BUSY
, /* Controller Busy */
235 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
236 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
237 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
238 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
239 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
242 static u8
mgmt_status(u8 hci_status
)
244 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
245 return mgmt_status_table
[hci_status
];
247 return MGMT_STATUS_FAILED
;
250 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
253 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
257 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
258 u16 len
, int flag
, struct sock
*skip_sk
)
260 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
264 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
265 struct sock
*skip_sk
)
267 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
268 HCI_SOCK_TRUSTED
, skip_sk
);
271 static u8
le_addr_type(u8 mgmt_addr_type
)
273 if (mgmt_addr_type
== BDADDR_LE_PUBLIC
)
274 return ADDR_LE_DEV_PUBLIC
;
276 return ADDR_LE_DEV_RANDOM
;
279 void mgmt_fill_version_info(void *ver
)
281 struct mgmt_rp_read_version
*rp
= ver
;
283 rp
->version
= MGMT_VERSION
;
284 rp
->revision
= cpu_to_le16(MGMT_REVISION
);
287 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
290 struct mgmt_rp_read_version rp
;
292 BT_DBG("sock %p", sk
);
294 mgmt_fill_version_info(&rp
);
296 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
300 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
303 struct mgmt_rp_read_commands
*rp
;
304 u16 num_commands
, num_events
;
308 BT_DBG("sock %p", sk
);
310 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
311 num_commands
= ARRAY_SIZE(mgmt_commands
);
312 num_events
= ARRAY_SIZE(mgmt_events
);
314 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
315 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
318 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
320 rp
= kmalloc(rp_size
, GFP_KERNEL
);
324 rp
->num_commands
= cpu_to_le16(num_commands
);
325 rp
->num_events
= cpu_to_le16(num_events
);
327 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
328 __le16
*opcode
= rp
->opcodes
;
330 for (i
= 0; i
< num_commands
; i
++, opcode
++)
331 put_unaligned_le16(mgmt_commands
[i
], opcode
);
333 for (i
= 0; i
< num_events
; i
++, opcode
++)
334 put_unaligned_le16(mgmt_events
[i
], opcode
);
336 __le16
*opcode
= rp
->opcodes
;
338 for (i
= 0; i
< num_commands
; i
++, opcode
++)
339 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
341 for (i
= 0; i
< num_events
; i
++, opcode
++)
342 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
345 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
352 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
355 struct mgmt_rp_read_index_list
*rp
;
361 BT_DBG("sock %p", sk
);
363 read_lock(&hci_dev_list_lock
);
366 list_for_each_entry(d
, &hci_dev_list
, list
) {
367 if (d
->dev_type
== HCI_PRIMARY
&&
368 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
372 rp_len
= sizeof(*rp
) + (2 * count
);
373 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
375 read_unlock(&hci_dev_list_lock
);
380 list_for_each_entry(d
, &hci_dev_list
, list
) {
381 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
382 hci_dev_test_flag(d
, HCI_CONFIG
) ||
383 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
386 /* Devices marked as raw-only are neither configured
387 * nor unconfigured controllers.
389 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
392 if (d
->dev_type
== HCI_PRIMARY
&&
393 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
394 rp
->index
[count
++] = cpu_to_le16(d
->id
);
395 BT_DBG("Added hci%u", d
->id
);
399 rp
->num_controllers
= cpu_to_le16(count
);
400 rp_len
= sizeof(*rp
) + (2 * count
);
402 read_unlock(&hci_dev_list_lock
);
404 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
412 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
413 void *data
, u16 data_len
)
415 struct mgmt_rp_read_unconf_index_list
*rp
;
421 BT_DBG("sock %p", sk
);
423 read_lock(&hci_dev_list_lock
);
426 list_for_each_entry(d
, &hci_dev_list
, list
) {
427 if (d
->dev_type
== HCI_PRIMARY
&&
428 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
432 rp_len
= sizeof(*rp
) + (2 * count
);
433 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
435 read_unlock(&hci_dev_list_lock
);
440 list_for_each_entry(d
, &hci_dev_list
, list
) {
441 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
442 hci_dev_test_flag(d
, HCI_CONFIG
) ||
443 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
446 /* Devices marked as raw-only are neither configured
447 * nor unconfigured controllers.
449 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
452 if (d
->dev_type
== HCI_PRIMARY
&&
453 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
454 rp
->index
[count
++] = cpu_to_le16(d
->id
);
455 BT_DBG("Added hci%u", d
->id
);
459 rp
->num_controllers
= cpu_to_le16(count
);
460 rp_len
= sizeof(*rp
) + (2 * count
);
462 read_unlock(&hci_dev_list_lock
);
464 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
465 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
472 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
473 void *data
, u16 data_len
)
475 struct mgmt_rp_read_ext_index_list
*rp
;
481 BT_DBG("sock %p", sk
);
483 read_lock(&hci_dev_list_lock
);
486 list_for_each_entry(d
, &hci_dev_list
, list
) {
487 if (d
->dev_type
== HCI_PRIMARY
|| d
->dev_type
== HCI_AMP
)
491 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
492 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
494 read_unlock(&hci_dev_list_lock
);
499 list_for_each_entry(d
, &hci_dev_list
, list
) {
500 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
501 hci_dev_test_flag(d
, HCI_CONFIG
) ||
502 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
505 /* Devices marked as raw-only are neither configured
506 * nor unconfigured controllers.
508 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
511 if (d
->dev_type
== HCI_PRIMARY
) {
512 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
513 rp
->entry
[count
].type
= 0x01;
515 rp
->entry
[count
].type
= 0x00;
516 } else if (d
->dev_type
== HCI_AMP
) {
517 rp
->entry
[count
].type
= 0x02;
522 rp
->entry
[count
].bus
= d
->bus
;
523 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
524 BT_DBG("Added hci%u", d
->id
);
527 rp
->num_controllers
= cpu_to_le16(count
);
528 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
530 read_unlock(&hci_dev_list_lock
);
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
536 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
537 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
538 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
540 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
541 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
, rp_len
);
548 static bool is_configured(struct hci_dev
*hdev
)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
551 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
555 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
561 static __le32
get_missing_options(struct hci_dev
*hdev
)
565 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
566 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
567 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
569 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
570 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
571 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
573 return cpu_to_le32(options
);
576 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
578 __le32 options
= get_missing_options(hdev
);
580 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
581 sizeof(options
), HCI_MGMT_OPTION_EVENTS
, skip
);
584 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
586 __le32 options
= get_missing_options(hdev
);
588 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
592 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
593 void *data
, u16 data_len
)
595 struct mgmt_rp_read_config_info rp
;
598 BT_DBG("sock %p %s", sk
, hdev
->name
);
602 memset(&rp
, 0, sizeof(rp
));
603 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
605 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
606 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
608 if (hdev
->set_bdaddr
)
609 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
611 rp
.supported_options
= cpu_to_le32(options
);
612 rp
.missing_options
= get_missing_options(hdev
);
614 hci_dev_unlock(hdev
);
616 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
620 static u32
get_supported_settings(struct hci_dev
*hdev
)
624 settings
|= MGMT_SETTING_POWERED
;
625 settings
|= MGMT_SETTING_BONDABLE
;
626 settings
|= MGMT_SETTING_DEBUG_KEYS
;
627 settings
|= MGMT_SETTING_CONNECTABLE
;
628 settings
|= MGMT_SETTING_DISCOVERABLE
;
630 if (lmp_bredr_capable(hdev
)) {
631 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
632 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
633 settings
|= MGMT_SETTING_BREDR
;
634 settings
|= MGMT_SETTING_LINK_SECURITY
;
636 if (lmp_ssp_capable(hdev
)) {
637 settings
|= MGMT_SETTING_SSP
;
638 settings
|= MGMT_SETTING_HS
;
641 if (lmp_sc_capable(hdev
))
642 settings
|= MGMT_SETTING_SECURE_CONN
;
645 if (lmp_le_capable(hdev
)) {
646 settings
|= MGMT_SETTING_LE
;
647 settings
|= MGMT_SETTING_ADVERTISING
;
648 settings
|= MGMT_SETTING_SECURE_CONN
;
649 settings
|= MGMT_SETTING_PRIVACY
;
650 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
653 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
655 settings
|= MGMT_SETTING_CONFIGURATION
;
660 static u32
get_current_settings(struct hci_dev
*hdev
)
664 if (hdev_is_powered(hdev
))
665 settings
|= MGMT_SETTING_POWERED
;
667 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
668 settings
|= MGMT_SETTING_CONNECTABLE
;
670 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
671 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
673 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
674 settings
|= MGMT_SETTING_DISCOVERABLE
;
676 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
677 settings
|= MGMT_SETTING_BONDABLE
;
679 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
680 settings
|= MGMT_SETTING_BREDR
;
682 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
683 settings
|= MGMT_SETTING_LE
;
685 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
686 settings
|= MGMT_SETTING_LINK_SECURITY
;
688 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
689 settings
|= MGMT_SETTING_SSP
;
691 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
692 settings
|= MGMT_SETTING_HS
;
694 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
695 settings
|= MGMT_SETTING_ADVERTISING
;
697 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
698 settings
|= MGMT_SETTING_SECURE_CONN
;
700 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
701 settings
|= MGMT_SETTING_DEBUG_KEYS
;
703 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
704 settings
|= MGMT_SETTING_PRIVACY
;
706 /* The current setting for static address has two purposes. The
707 * first is to indicate if the static address will be used and
708 * the second is to indicate if it is actually set.
710 * This means if the static address is not configured, this flag
711 * will never be set. If the address is configured, then if the
712 * address is actually used decides if the flag is set or not.
714 * For single mode LE only controllers and dual-mode controllers
715 * with BR/EDR disabled, the existence of the static address will
718 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
719 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
720 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
721 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
722 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
728 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
730 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
733 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
734 struct hci_dev
*hdev
,
737 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
740 u8
mgmt_get_adv_discov_flags(struct hci_dev
*hdev
)
742 struct mgmt_pending_cmd
*cmd
;
744 /* If there's a pending mgmt command the flags will not yet have
745 * their final values, so check for this first.
747 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
749 struct mgmt_mode
*cp
= cmd
->param
;
751 return LE_AD_GENERAL
;
752 else if (cp
->val
== 0x02)
753 return LE_AD_LIMITED
;
755 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
756 return LE_AD_LIMITED
;
757 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
758 return LE_AD_GENERAL
;
764 bool mgmt_get_connectable(struct hci_dev
*hdev
)
766 struct mgmt_pending_cmd
*cmd
;
768 /* If there's a pending mgmt command the flag will not yet have
769 * it's final value, so check for this first.
771 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
773 struct mgmt_mode
*cp
= cmd
->param
;
778 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
781 static void service_cache_off(struct work_struct
*work
)
783 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
785 struct hci_request req
;
787 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
790 hci_req_init(&req
, hdev
);
794 __hci_req_update_eir(&req
);
795 __hci_req_update_class(&req
);
797 hci_dev_unlock(hdev
);
799 hci_req_run(&req
, NULL
);
802 static void rpa_expired(struct work_struct
*work
)
804 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
806 struct hci_request req
;
810 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
812 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
815 /* The generation of a new RPA and programming it into the
816 * controller happens in the hci_req_enable_advertising()
819 hci_req_init(&req
, hdev
);
820 __hci_req_enable_advertising(&req
);
821 hci_req_run(&req
, NULL
);
824 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
826 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
829 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
830 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
832 /* Non-mgmt controlled devices get this bit set
833 * implicitly so that pairing works for them, however
834 * for mgmt we require user-space to explicitly enable
837 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
840 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
841 void *data
, u16 data_len
)
843 struct mgmt_rp_read_info rp
;
845 BT_DBG("sock %p %s", sk
, hdev
->name
);
849 memset(&rp
, 0, sizeof(rp
));
851 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
853 rp
.version
= hdev
->hci_ver
;
854 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
856 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
857 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
859 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
861 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
862 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
864 hci_dev_unlock(hdev
);
866 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
870 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
873 eir
[eir_len
++] = sizeof(type
) + data_len
;
874 eir
[eir_len
++] = type
;
875 memcpy(&eir
[eir_len
], data
, data_len
);
881 static inline u16
eir_append_le16(u8
*eir
, u16 eir_len
, u8 type
, u16 data
)
883 eir
[eir_len
++] = sizeof(type
) + sizeof(data
);
884 eir
[eir_len
++] = type
;
885 put_unaligned_le16(data
, &eir
[eir_len
]);
886 eir_len
+= sizeof(data
);
891 static u16
append_eir_data_to_buf(struct hci_dev
*hdev
, u8
*eir
)
896 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
897 eir_len
= eir_append_data(eir
, eir_len
, EIR_CLASS_OF_DEV
,
900 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
901 eir_len
= eir_append_le16(eir
, eir_len
, EIR_APPEARANCE
,
904 name_len
= strlen(hdev
->dev_name
);
905 eir_len
= eir_append_data(eir
, eir_len
, EIR_NAME_COMPLETE
,
906 hdev
->dev_name
, name_len
);
908 name_len
= strlen(hdev
->short_name
);
909 eir_len
= eir_append_data(eir
, eir_len
, EIR_NAME_SHORT
,
910 hdev
->short_name
, name_len
);
915 static int read_ext_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
916 void *data
, u16 data_len
)
919 struct mgmt_rp_read_ext_info
*rp
= (void *)buf
;
922 BT_DBG("sock %p %s", sk
, hdev
->name
);
924 memset(&buf
, 0, sizeof(buf
));
928 bacpy(&rp
->bdaddr
, &hdev
->bdaddr
);
930 rp
->version
= hdev
->hci_ver
;
931 rp
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
933 rp
->supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
934 rp
->current_settings
= cpu_to_le32(get_current_settings(hdev
));
937 eir_len
= append_eir_data_to_buf(hdev
, rp
->eir
);
938 rp
->eir_len
= cpu_to_le16(eir_len
);
940 hci_dev_unlock(hdev
);
942 /* If this command is called at least once, then the events
943 * for class of device and local name changes are disabled
944 * and only the new extended controller information event
947 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INFO_EVENTS
);
948 hci_sock_clear_flag(sk
, HCI_MGMT_DEV_CLASS_EVENTS
);
949 hci_sock_clear_flag(sk
, HCI_MGMT_LOCAL_NAME_EVENTS
);
951 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_EXT_INFO
, 0, rp
,
952 sizeof(*rp
) + eir_len
);
955 static int ext_info_changed(struct hci_dev
*hdev
, struct sock
*skip
)
958 struct mgmt_ev_ext_info_changed
*ev
= (void *)buf
;
961 memset(buf
, 0, sizeof(buf
));
963 eir_len
= append_eir_data_to_buf(hdev
, ev
->eir
);
964 ev
->eir_len
= cpu_to_le16(eir_len
);
966 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED
, hdev
, ev
,
967 sizeof(*ev
) + eir_len
,
968 HCI_MGMT_EXT_INFO_EVENTS
, skip
);
971 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
973 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
975 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
979 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
981 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
983 if (hci_conn_count(hdev
) == 0) {
984 cancel_delayed_work(&hdev
->power_off
);
985 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
989 void mgmt_advertising_added(struct sock
*sk
, struct hci_dev
*hdev
, u8 instance
)
991 struct mgmt_ev_advertising_added ev
;
993 ev
.instance
= instance
;
995 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
998 void mgmt_advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1001 struct mgmt_ev_advertising_removed ev
;
1003 ev
.instance
= instance
;
1005 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1008 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1010 if (hdev
->adv_instance_timeout
) {
1011 hdev
->adv_instance_timeout
= 0;
1012 cancel_delayed_work(&hdev
->adv_instance_expire
);
1016 static int clean_up_hci_state(struct hci_dev
*hdev
)
1018 struct hci_request req
;
1019 struct hci_conn
*conn
;
1020 bool discov_stopped
;
1023 hci_req_init(&req
, hdev
);
1025 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1026 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1028 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1031 hci_req_clear_adv_instance(hdev
, NULL
, NULL
, 0x00, false);
1033 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1034 __hci_req_disable_advertising(&req
);
1036 discov_stopped
= hci_req_stop_discovery(&req
);
1038 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1039 /* 0x15 == Terminated due to Power Off */
1040 __hci_abort_conn(&req
, conn
, 0x15);
1043 err
= hci_req_run(&req
, clean_up_hci_complete
);
1044 if (!err
&& discov_stopped
)
1045 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1050 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1053 struct mgmt_mode
*cp
= data
;
1054 struct mgmt_pending_cmd
*cmd
;
1057 BT_DBG("request for %s", hdev
->name
);
1059 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1060 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1061 MGMT_STATUS_INVALID_PARAMS
);
1065 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1066 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1071 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1072 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1076 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1083 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1086 /* Disconnect connections, stop scans, etc */
1087 err
= clean_up_hci_state(hdev
);
1089 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1090 HCI_POWER_OFF_TIMEOUT
);
1092 /* ENODATA means there were no HCI commands queued */
1093 if (err
== -ENODATA
) {
1094 cancel_delayed_work(&hdev
->power_off
);
1095 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1101 hci_dev_unlock(hdev
);
1105 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1107 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1109 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1110 sizeof(ev
), HCI_MGMT_SETTING_EVENTS
, skip
);
1113 int mgmt_new_settings(struct hci_dev
*hdev
)
1115 return new_settings(hdev
, NULL
);
1120 struct hci_dev
*hdev
;
1124 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1126 struct cmd_lookup
*match
= data
;
1128 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1130 list_del(&cmd
->list
);
1132 if (match
->sk
== NULL
) {
1133 match
->sk
= cmd
->sk
;
1134 sock_hold(match
->sk
);
1137 mgmt_pending_free(cmd
);
1140 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1144 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1145 mgmt_pending_remove(cmd
);
1148 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1150 if (cmd
->cmd_complete
) {
1153 cmd
->cmd_complete(cmd
, *status
);
1154 mgmt_pending_remove(cmd
);
1159 cmd_status_rsp(cmd
, data
);
1162 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1164 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1165 cmd
->param
, cmd
->param_len
);
1168 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1170 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1171 cmd
->param
, sizeof(struct mgmt_addr_info
));
1174 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1176 if (!lmp_bredr_capable(hdev
))
1177 return MGMT_STATUS_NOT_SUPPORTED
;
1178 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1179 return MGMT_STATUS_REJECTED
;
1181 return MGMT_STATUS_SUCCESS
;
1184 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1186 if (!lmp_le_capable(hdev
))
1187 return MGMT_STATUS_NOT_SUPPORTED
;
1188 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1189 return MGMT_STATUS_REJECTED
;
1191 return MGMT_STATUS_SUCCESS
;
1194 void mgmt_set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1196 struct mgmt_pending_cmd
*cmd
;
1198 BT_DBG("status 0x%02x", status
);
1202 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1207 u8 mgmt_err
= mgmt_status(status
);
1208 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1209 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1213 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1214 hdev
->discov_timeout
> 0) {
1215 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1216 queue_delayed_work(hdev
->req_workqueue
, &hdev
->discov_off
, to
);
1219 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1220 new_settings(hdev
, cmd
->sk
);
1223 mgmt_pending_remove(cmd
);
1226 hci_dev_unlock(hdev
);
1229 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1232 struct mgmt_cp_set_discoverable
*cp
= data
;
1233 struct mgmt_pending_cmd
*cmd
;
1237 BT_DBG("request for %s", hdev
->name
);
1239 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1240 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1241 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1242 MGMT_STATUS_REJECTED
);
1244 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1245 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1246 MGMT_STATUS_INVALID_PARAMS
);
1248 timeout
= __le16_to_cpu(cp
->timeout
);
1250 /* Disabling discoverable requires that no timeout is set,
1251 * and enabling limited discoverable requires a timeout.
1253 if ((cp
->val
== 0x00 && timeout
> 0) ||
1254 (cp
->val
== 0x02 && timeout
== 0))
1255 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1256 MGMT_STATUS_INVALID_PARAMS
);
1260 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1261 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1262 MGMT_STATUS_NOT_POWERED
);
1266 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1267 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1268 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1273 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1274 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1275 MGMT_STATUS_REJECTED
);
1279 if (!hdev_is_powered(hdev
)) {
1280 bool changed
= false;
1282 /* Setting limited discoverable when powered off is
1283 * not a valid operation since it requires a timeout
1284 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1286 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1287 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1291 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1296 err
= new_settings(hdev
, sk
);
1301 /* If the current mode is the same, then just update the timeout
1302 * value with the new value. And if only the timeout gets updated,
1303 * then no need for any HCI transactions.
1305 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1306 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1307 HCI_LIMITED_DISCOVERABLE
)) {
1308 cancel_delayed_work(&hdev
->discov_off
);
1309 hdev
->discov_timeout
= timeout
;
1311 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1312 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1313 queue_delayed_work(hdev
->req_workqueue
,
1314 &hdev
->discov_off
, to
);
1317 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1321 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1327 /* Cancel any potential discoverable timeout that might be
1328 * still active and store new timeout value. The arming of
1329 * the timeout happens in the complete handler.
1331 cancel_delayed_work(&hdev
->discov_off
);
1332 hdev
->discov_timeout
= timeout
;
1335 hci_dev_set_flag(hdev
, HCI_DISCOVERABLE
);
1337 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1339 /* Limited discoverable mode */
1340 if (cp
->val
== 0x02)
1341 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1343 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1345 queue_work(hdev
->req_workqueue
, &hdev
->discoverable_update
);
1349 hci_dev_unlock(hdev
);
1353 void mgmt_set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1355 struct mgmt_pending_cmd
*cmd
;
1357 BT_DBG("status 0x%02x", status
);
1361 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1366 u8 mgmt_err
= mgmt_status(status
);
1367 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1371 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1372 new_settings(hdev
, cmd
->sk
);
1375 mgmt_pending_remove(cmd
);
1378 hci_dev_unlock(hdev
);
1381 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1382 struct sock
*sk
, u8 val
)
1384 bool changed
= false;
1387 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
1391 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1393 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1394 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1397 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1402 hci_req_update_scan(hdev
);
1403 hci_update_background_scan(hdev
);
1404 return new_settings(hdev
, sk
);
1410 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1413 struct mgmt_mode
*cp
= data
;
1414 struct mgmt_pending_cmd
*cmd
;
1417 BT_DBG("request for %s", hdev
->name
);
1419 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1420 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1421 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1422 MGMT_STATUS_REJECTED
);
1424 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1425 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1426 MGMT_STATUS_INVALID_PARAMS
);
1430 if (!hdev_is_powered(hdev
)) {
1431 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1435 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1436 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1437 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1442 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1449 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1451 if (hdev
->discov_timeout
> 0)
1452 cancel_delayed_work(&hdev
->discov_off
);
1454 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1455 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1456 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1459 queue_work(hdev
->req_workqueue
, &hdev
->connectable_update
);
1463 hci_dev_unlock(hdev
);
1467 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1470 struct mgmt_mode
*cp
= data
;
1474 BT_DBG("request for %s", hdev
->name
);
1476 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1477 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1478 MGMT_STATUS_INVALID_PARAMS
);
1483 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
1485 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
1487 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1492 /* In limited privacy mode the change of bondable mode
1493 * may affect the local advertising address.
1495 if (hdev_is_powered(hdev
) &&
1496 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
1497 hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1498 hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
1499 queue_work(hdev
->req_workqueue
,
1500 &hdev
->discoverable_update
);
1502 err
= new_settings(hdev
, sk
);
1506 hci_dev_unlock(hdev
);
1510 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1513 struct mgmt_mode
*cp
= data
;
1514 struct mgmt_pending_cmd
*cmd
;
1518 BT_DBG("request for %s", hdev
->name
);
1520 status
= mgmt_bredr_support(hdev
);
1522 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1525 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1526 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1527 MGMT_STATUS_INVALID_PARAMS
);
1531 if (!hdev_is_powered(hdev
)) {
1532 bool changed
= false;
1534 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
1535 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
1539 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1544 err
= new_settings(hdev
, sk
);
1549 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1550 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1557 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1558 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1562 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1568 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1570 mgmt_pending_remove(cmd
);
1575 hci_dev_unlock(hdev
);
1579 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1581 struct mgmt_mode
*cp
= data
;
1582 struct mgmt_pending_cmd
*cmd
;
1586 BT_DBG("request for %s", hdev
->name
);
1588 status
= mgmt_bredr_support(hdev
);
1590 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1592 if (!lmp_ssp_capable(hdev
))
1593 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1594 MGMT_STATUS_NOT_SUPPORTED
);
1596 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1597 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1598 MGMT_STATUS_INVALID_PARAMS
);
1602 if (!hdev_is_powered(hdev
)) {
1606 changed
= !hci_dev_test_and_set_flag(hdev
,
1609 changed
= hci_dev_test_and_clear_flag(hdev
,
1612 changed
= hci_dev_test_and_clear_flag(hdev
,
1615 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
1618 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1623 err
= new_settings(hdev
, sk
);
1628 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
1629 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1634 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
1635 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1639 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1645 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
1646 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
1647 sizeof(cp
->val
), &cp
->val
);
1649 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1651 mgmt_pending_remove(cmd
);
1656 hci_dev_unlock(hdev
);
1660 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1662 struct mgmt_mode
*cp
= data
;
1667 BT_DBG("request for %s", hdev
->name
);
1669 status
= mgmt_bredr_support(hdev
);
1671 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1673 if (!lmp_ssp_capable(hdev
))
1674 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1675 MGMT_STATUS_NOT_SUPPORTED
);
1677 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1678 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1679 MGMT_STATUS_REJECTED
);
1681 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1682 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1683 MGMT_STATUS_INVALID_PARAMS
);
1687 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
1688 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1694 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
1696 if (hdev_is_powered(hdev
)) {
1697 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1698 MGMT_STATUS_REJECTED
);
1702 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
1705 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1710 err
= new_settings(hdev
, sk
);
1713 hci_dev_unlock(hdev
);
1717 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1719 struct cmd_lookup match
= { NULL
, hdev
};
1724 u8 mgmt_err
= mgmt_status(status
);
1726 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1731 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1733 new_settings(hdev
, match
.sk
);
1738 /* Make sure the controller has a good default for
1739 * advertising data. Restrict the update to when LE
1740 * has actually been enabled. During power on, the
1741 * update in powered_update_hci will take care of it.
1743 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
1744 struct hci_request req
;
1746 hci_req_init(&req
, hdev
);
1747 __hci_req_update_adv_data(&req
, 0x00);
1748 __hci_req_update_scan_rsp_data(&req
, 0x00);
1749 hci_req_run(&req
, NULL
);
1750 hci_update_background_scan(hdev
);
1754 hci_dev_unlock(hdev
);
1757 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1759 struct mgmt_mode
*cp
= data
;
1760 struct hci_cp_write_le_host_supported hci_cp
;
1761 struct mgmt_pending_cmd
*cmd
;
1762 struct hci_request req
;
1766 BT_DBG("request for %s", hdev
->name
);
1768 if (!lmp_le_capable(hdev
))
1769 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1770 MGMT_STATUS_NOT_SUPPORTED
);
1772 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1773 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1774 MGMT_STATUS_INVALID_PARAMS
);
1776 /* Bluetooth single mode LE only controllers or dual-mode
1777 * controllers configured as LE only devices, do not allow
1778 * switching LE off. These have either LE enabled explicitly
1779 * or BR/EDR has been previously switched off.
1781 * When trying to enable an already enabled LE, then gracefully
1782 * send a positive response. Trying to disable it however will
1783 * result into rejection.
1785 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1786 if (cp
->val
== 0x01)
1787 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1789 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1790 MGMT_STATUS_REJECTED
);
1796 enabled
= lmp_host_le_capable(hdev
);
1799 hci_req_clear_adv_instance(hdev
, NULL
, NULL
, 0x00, true);
1801 if (!hdev_is_powered(hdev
) || val
== enabled
) {
1802 bool changed
= false;
1804 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
1805 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
1809 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
1810 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
1814 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1819 err
= new_settings(hdev
, sk
);
1824 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
1825 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
1826 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1831 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
1837 hci_req_init(&req
, hdev
);
1839 memset(&hci_cp
, 0, sizeof(hci_cp
));
1843 hci_cp
.simul
= 0x00;
1845 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1846 __hci_req_disable_advertising(&req
);
1849 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
1852 err
= hci_req_run(&req
, le_enable_complete
);
1854 mgmt_pending_remove(cmd
);
1857 hci_dev_unlock(hdev
);
1861 /* This is a helper function to test for pending mgmt commands that can
1862 * cause CoD or EIR HCI commands. We can only allow one such pending
1863 * mgmt command at a time since otherwise we cannot easily track what
1864 * the current values are, will be, and based on that calculate if a new
1865 * HCI command needs to be sent and if yes with what value.
1867 static bool pending_eir_or_class(struct hci_dev
*hdev
)
1869 struct mgmt_pending_cmd
*cmd
;
1871 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
1872 switch (cmd
->opcode
) {
1873 case MGMT_OP_ADD_UUID
:
1874 case MGMT_OP_REMOVE_UUID
:
1875 case MGMT_OP_SET_DEV_CLASS
:
1876 case MGMT_OP_SET_POWERED
:
1884 static const u8 bluetooth_base_uuid
[] = {
1885 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1886 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1889 static u8
get_uuid_size(const u8
*uuid
)
1893 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
1896 val
= get_unaligned_le32(&uuid
[12]);
1903 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
1905 struct mgmt_pending_cmd
*cmd
;
1909 cmd
= pending_find(mgmt_op
, hdev
);
1913 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
1914 mgmt_status(status
), hdev
->dev_class
, 3);
1916 mgmt_pending_remove(cmd
);
1919 hci_dev_unlock(hdev
);
1922 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1924 BT_DBG("status 0x%02x", status
);
1926 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
1929 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1931 struct mgmt_cp_add_uuid
*cp
= data
;
1932 struct mgmt_pending_cmd
*cmd
;
1933 struct hci_request req
;
1934 struct bt_uuid
*uuid
;
1937 BT_DBG("request for %s", hdev
->name
);
1941 if (pending_eir_or_class(hdev
)) {
1942 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
1947 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
1953 memcpy(uuid
->uuid
, cp
->uuid
, 16);
1954 uuid
->svc_hint
= cp
->svc_hint
;
1955 uuid
->size
= get_uuid_size(cp
->uuid
);
1957 list_add_tail(&uuid
->list
, &hdev
->uuids
);
1959 hci_req_init(&req
, hdev
);
1961 __hci_req_update_class(&req
);
1962 __hci_req_update_eir(&req
);
1964 err
= hci_req_run(&req
, add_uuid_complete
);
1966 if (err
!= -ENODATA
)
1969 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
1970 hdev
->dev_class
, 3);
1974 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
1983 hci_dev_unlock(hdev
);
1987 static bool enable_service_cache(struct hci_dev
*hdev
)
1989 if (!hdev_is_powered(hdev
))
1992 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
1993 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2001 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2003 BT_DBG("status 0x%02x", status
);
2005 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2008 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2011 struct mgmt_cp_remove_uuid
*cp
= data
;
2012 struct mgmt_pending_cmd
*cmd
;
2013 struct bt_uuid
*match
, *tmp
;
2014 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2015 struct hci_request req
;
2018 BT_DBG("request for %s", hdev
->name
);
2022 if (pending_eir_or_class(hdev
)) {
2023 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2028 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2029 hci_uuids_clear(hdev
);
2031 if (enable_service_cache(hdev
)) {
2032 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2033 MGMT_OP_REMOVE_UUID
,
2034 0, hdev
->dev_class
, 3);
2043 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2044 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2047 list_del(&match
->list
);
2053 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2054 MGMT_STATUS_INVALID_PARAMS
);
2059 hci_req_init(&req
, hdev
);
2061 __hci_req_update_class(&req
);
2062 __hci_req_update_eir(&req
);
2064 err
= hci_req_run(&req
, remove_uuid_complete
);
2066 if (err
!= -ENODATA
)
2069 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2070 hdev
->dev_class
, 3);
2074 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2083 hci_dev_unlock(hdev
);
2087 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2089 BT_DBG("status 0x%02x", status
);
2091 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2094 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2097 struct mgmt_cp_set_dev_class
*cp
= data
;
2098 struct mgmt_pending_cmd
*cmd
;
2099 struct hci_request req
;
2102 BT_DBG("request for %s", hdev
->name
);
2104 if (!lmp_bredr_capable(hdev
))
2105 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2106 MGMT_STATUS_NOT_SUPPORTED
);
2110 if (pending_eir_or_class(hdev
)) {
2111 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2116 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2117 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2118 MGMT_STATUS_INVALID_PARAMS
);
2122 hdev
->major_class
= cp
->major
;
2123 hdev
->minor_class
= cp
->minor
;
2125 if (!hdev_is_powered(hdev
)) {
2126 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2127 hdev
->dev_class
, 3);
2131 hci_req_init(&req
, hdev
);
2133 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2134 hci_dev_unlock(hdev
);
2135 cancel_delayed_work_sync(&hdev
->service_cache
);
2137 __hci_req_update_eir(&req
);
2140 __hci_req_update_class(&req
);
2142 err
= hci_req_run(&req
, set_class_complete
);
2144 if (err
!= -ENODATA
)
2147 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2148 hdev
->dev_class
, 3);
2152 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2161 hci_dev_unlock(hdev
);
2165 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2168 struct mgmt_cp_load_link_keys
*cp
= data
;
2169 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2170 sizeof(struct mgmt_link_key_info
));
2171 u16 key_count
, expected_len
;
2175 BT_DBG("request for %s", hdev
->name
);
2177 if (!lmp_bredr_capable(hdev
))
2178 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2179 MGMT_STATUS_NOT_SUPPORTED
);
2181 key_count
= __le16_to_cpu(cp
->key_count
);
2182 if (key_count
> max_key_count
) {
2183 BT_ERR("load_link_keys: too big key_count value %u",
2185 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2186 MGMT_STATUS_INVALID_PARAMS
);
2189 expected_len
= sizeof(*cp
) + key_count
*
2190 sizeof(struct mgmt_link_key_info
);
2191 if (expected_len
!= len
) {
2192 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2194 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2195 MGMT_STATUS_INVALID_PARAMS
);
2198 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2199 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2200 MGMT_STATUS_INVALID_PARAMS
);
2202 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2205 for (i
= 0; i
< key_count
; i
++) {
2206 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2208 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2209 return mgmt_cmd_status(sk
, hdev
->id
,
2210 MGMT_OP_LOAD_LINK_KEYS
,
2211 MGMT_STATUS_INVALID_PARAMS
);
2216 hci_link_keys_clear(hdev
);
2219 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2221 changed
= hci_dev_test_and_clear_flag(hdev
,
2222 HCI_KEEP_DEBUG_KEYS
);
2225 new_settings(hdev
, NULL
);
2227 for (i
= 0; i
< key_count
; i
++) {
2228 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2230 /* Always ignore debug keys and require a new pairing if
2231 * the user wants to use them.
2233 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2236 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2237 key
->type
, key
->pin_len
, NULL
);
2240 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2242 hci_dev_unlock(hdev
);
2247 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2248 u8 addr_type
, struct sock
*skip_sk
)
2250 struct mgmt_ev_device_unpaired ev
;
2252 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2253 ev
.addr
.type
= addr_type
;
2255 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2259 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2262 struct mgmt_cp_unpair_device
*cp
= data
;
2263 struct mgmt_rp_unpair_device rp
;
2264 struct hci_conn_params
*params
;
2265 struct mgmt_pending_cmd
*cmd
;
2266 struct hci_conn
*conn
;
2270 memset(&rp
, 0, sizeof(rp
));
2271 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2272 rp
.addr
.type
= cp
->addr
.type
;
2274 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2275 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2276 MGMT_STATUS_INVALID_PARAMS
,
2279 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2280 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2281 MGMT_STATUS_INVALID_PARAMS
,
2286 if (!hdev_is_powered(hdev
)) {
2287 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2288 MGMT_STATUS_NOT_POWERED
, &rp
,
2293 if (cp
->addr
.type
== BDADDR_BREDR
) {
2294 /* If disconnection is requested, then look up the
2295 * connection. If the remote device is connected, it
2296 * will be later used to terminate the link.
2298 * Setting it to NULL explicitly will cause no
2299 * termination of the link.
2302 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2307 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2309 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2310 MGMT_OP_UNPAIR_DEVICE
,
2311 MGMT_STATUS_NOT_PAIRED
, &rp
,
2319 /* LE address type */
2320 addr_type
= le_addr_type(cp
->addr
.type
);
2322 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2324 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2326 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2327 MGMT_STATUS_NOT_PAIRED
, &rp
,
2332 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2334 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2338 /* Abort any ongoing SMP pairing */
2339 smp_cancel_pairing(conn
);
2341 /* Defer clearing up the connection parameters until closing to
2342 * give a chance of keeping them if a repairing happens.
2344 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2346 /* Disable auto-connection parameters if present */
2347 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2349 if (params
->explicit_connect
)
2350 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2352 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2355 /* If disconnection is not requested, then clear the connection
2356 * variable so that the link is not terminated.
2358 if (!cp
->disconnect
)
2362 /* If the connection variable is set, then termination of the
2363 * link is requested.
2366 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2368 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2372 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2379 cmd
->cmd_complete
= addr_cmd_complete
;
2381 err
= hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2383 mgmt_pending_remove(cmd
);
2386 hci_dev_unlock(hdev
);
2390 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2393 struct mgmt_cp_disconnect
*cp
= data
;
2394 struct mgmt_rp_disconnect rp
;
2395 struct mgmt_pending_cmd
*cmd
;
2396 struct hci_conn
*conn
;
2401 memset(&rp
, 0, sizeof(rp
));
2402 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2403 rp
.addr
.type
= cp
->addr
.type
;
2405 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2406 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2407 MGMT_STATUS_INVALID_PARAMS
,
2412 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2413 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2414 MGMT_STATUS_NOT_POWERED
, &rp
,
2419 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2420 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2421 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2425 if (cp
->addr
.type
== BDADDR_BREDR
)
2426 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2429 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
2430 le_addr_type(cp
->addr
.type
));
2432 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2433 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2434 MGMT_STATUS_NOT_CONNECTED
, &rp
,
2439 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2445 cmd
->cmd_complete
= generic_cmd_complete
;
2447 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2449 mgmt_pending_remove(cmd
);
2452 hci_dev_unlock(hdev
);
2456 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2458 switch (link_type
) {
2460 switch (addr_type
) {
2461 case ADDR_LE_DEV_PUBLIC
:
2462 return BDADDR_LE_PUBLIC
;
2465 /* Fallback to LE Random address type */
2466 return BDADDR_LE_RANDOM
;
2470 /* Fallback to BR/EDR type */
2471 return BDADDR_BREDR
;
2475 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2478 struct mgmt_rp_get_connections
*rp
;
2488 if (!hdev_is_powered(hdev
)) {
2489 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2490 MGMT_STATUS_NOT_POWERED
);
2495 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2496 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2500 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2501 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2508 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2509 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2511 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2512 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2513 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2518 rp
->conn_count
= cpu_to_le16(i
);
2520 /* Recalculate length in case of filtered SCO connections, etc */
2521 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2523 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2529 hci_dev_unlock(hdev
);
2533 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2534 struct mgmt_cp_pin_code_neg_reply
*cp
)
2536 struct mgmt_pending_cmd
*cmd
;
2539 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2544 cmd
->cmd_complete
= addr_cmd_complete
;
2546 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2547 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2549 mgmt_pending_remove(cmd
);
2554 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2557 struct hci_conn
*conn
;
2558 struct mgmt_cp_pin_code_reply
*cp
= data
;
2559 struct hci_cp_pin_code_reply reply
;
2560 struct mgmt_pending_cmd
*cmd
;
2567 if (!hdev_is_powered(hdev
)) {
2568 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2569 MGMT_STATUS_NOT_POWERED
);
2573 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2575 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2576 MGMT_STATUS_NOT_CONNECTED
);
2580 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2581 struct mgmt_cp_pin_code_neg_reply ncp
;
2583 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2585 BT_ERR("PIN code is not 16 bytes long");
2587 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2589 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2590 MGMT_STATUS_INVALID_PARAMS
);
2595 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2601 cmd
->cmd_complete
= addr_cmd_complete
;
2603 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2604 reply
.pin_len
= cp
->pin_len
;
2605 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2607 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2609 mgmt_pending_remove(cmd
);
2612 hci_dev_unlock(hdev
);
2616 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2619 struct mgmt_cp_set_io_capability
*cp
= data
;
2623 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
2624 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
2625 MGMT_STATUS_INVALID_PARAMS
);
2629 hdev
->io_capability
= cp
->io_capability
;
2631 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2632 hdev
->io_capability
);
2634 hci_dev_unlock(hdev
);
2636 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
2640 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
2642 struct hci_dev
*hdev
= conn
->hdev
;
2643 struct mgmt_pending_cmd
*cmd
;
2645 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2646 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2649 if (cmd
->user_data
!= conn
)
2658 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
2660 struct mgmt_rp_pair_device rp
;
2661 struct hci_conn
*conn
= cmd
->user_data
;
2664 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2665 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2667 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
2668 status
, &rp
, sizeof(rp
));
2670 /* So we don't get further callbacks for this connection */
2671 conn
->connect_cfm_cb
= NULL
;
2672 conn
->security_cfm_cb
= NULL
;
2673 conn
->disconn_cfm_cb
= NULL
;
2675 hci_conn_drop(conn
);
2677 /* The device is paired so there is no need to remove
2678 * its connection parameters anymore.
2680 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2687 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2689 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2690 struct mgmt_pending_cmd
*cmd
;
2692 cmd
= find_pairing(conn
);
2694 cmd
->cmd_complete(cmd
, status
);
2695 mgmt_pending_remove(cmd
);
2699 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2701 struct mgmt_pending_cmd
*cmd
;
2703 BT_DBG("status %u", status
);
2705 cmd
= find_pairing(conn
);
2707 BT_DBG("Unable to find a pending command");
2711 cmd
->cmd_complete(cmd
, mgmt_status(status
));
2712 mgmt_pending_remove(cmd
);
2715 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2717 struct mgmt_pending_cmd
*cmd
;
2719 BT_DBG("status %u", status
);
2724 cmd
= find_pairing(conn
);
2726 BT_DBG("Unable to find a pending command");
2730 cmd
->cmd_complete(cmd
, mgmt_status(status
));
2731 mgmt_pending_remove(cmd
);
2734 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2737 struct mgmt_cp_pair_device
*cp
= data
;
2738 struct mgmt_rp_pair_device rp
;
2739 struct mgmt_pending_cmd
*cmd
;
2740 u8 sec_level
, auth_type
;
2741 struct hci_conn
*conn
;
2746 memset(&rp
, 0, sizeof(rp
));
2747 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2748 rp
.addr
.type
= cp
->addr
.type
;
2750 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2751 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2752 MGMT_STATUS_INVALID_PARAMS
,
2755 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
2756 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2757 MGMT_STATUS_INVALID_PARAMS
,
2762 if (!hdev_is_powered(hdev
)) {
2763 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2764 MGMT_STATUS_NOT_POWERED
, &rp
,
2769 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
2770 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2771 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
2776 sec_level
= BT_SECURITY_MEDIUM
;
2777 auth_type
= HCI_AT_DEDICATED_BONDING
;
2779 if (cp
->addr
.type
== BDADDR_BREDR
) {
2780 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2783 u8 addr_type
= le_addr_type(cp
->addr
.type
);
2784 struct hci_conn_params
*p
;
2786 /* When pairing a new device, it is expected to remember
2787 * this device for future connections. Adding the connection
2788 * parameter information ahead of time allows tracking
2789 * of the slave preferred values and will speed up any
2790 * further connection establishment.
2792 * If connection parameters already exist, then they
2793 * will be kept and this function does nothing.
2795 p
= hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2797 if (p
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
)
2798 p
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2800 conn
= hci_connect_le_scan(hdev
, &cp
->addr
.bdaddr
,
2801 addr_type
, sec_level
,
2802 HCI_LE_CONN_TIMEOUT
);
2808 if (PTR_ERR(conn
) == -EBUSY
)
2809 status
= MGMT_STATUS_BUSY
;
2810 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
2811 status
= MGMT_STATUS_NOT_SUPPORTED
;
2812 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
2813 status
= MGMT_STATUS_REJECTED
;
2815 status
= MGMT_STATUS_CONNECT_FAILED
;
2817 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2818 status
, &rp
, sizeof(rp
));
2822 if (conn
->connect_cfm_cb
) {
2823 hci_conn_drop(conn
);
2824 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2825 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2829 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2832 hci_conn_drop(conn
);
2836 cmd
->cmd_complete
= pairing_complete
;
2838 /* For LE, just connecting isn't a proof that the pairing finished */
2839 if (cp
->addr
.type
== BDADDR_BREDR
) {
2840 conn
->connect_cfm_cb
= pairing_complete_cb
;
2841 conn
->security_cfm_cb
= pairing_complete_cb
;
2842 conn
->disconn_cfm_cb
= pairing_complete_cb
;
2844 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
2845 conn
->security_cfm_cb
= le_pairing_complete_cb
;
2846 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
2849 conn
->io_capability
= cp
->io_cap
;
2850 cmd
->user_data
= hci_conn_get(conn
);
2852 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
2853 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
2854 cmd
->cmd_complete(cmd
, 0);
2855 mgmt_pending_remove(cmd
);
2861 hci_dev_unlock(hdev
);
2865 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2868 struct mgmt_addr_info
*addr
= data
;
2869 struct mgmt_pending_cmd
*cmd
;
2870 struct hci_conn
*conn
;
2877 if (!hdev_is_powered(hdev
)) {
2878 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2879 MGMT_STATUS_NOT_POWERED
);
2883 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
2885 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2886 MGMT_STATUS_INVALID_PARAMS
);
2890 conn
= cmd
->user_data
;
2892 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
2893 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2894 MGMT_STATUS_INVALID_PARAMS
);
2898 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
2899 mgmt_pending_remove(cmd
);
2901 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
2902 addr
, sizeof(*addr
));
2904 hci_dev_unlock(hdev
);
2908 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
2909 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
2910 u16 hci_op
, __le32 passkey
)
2912 struct mgmt_pending_cmd
*cmd
;
2913 struct hci_conn
*conn
;
2918 if (!hdev_is_powered(hdev
)) {
2919 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2920 MGMT_STATUS_NOT_POWERED
, addr
,
2925 if (addr
->type
== BDADDR_BREDR
)
2926 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
2928 conn
= hci_conn_hash_lookup_le(hdev
, &addr
->bdaddr
,
2929 le_addr_type(addr
->type
));
2932 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2933 MGMT_STATUS_NOT_CONNECTED
, addr
,
2938 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
2939 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
2941 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2942 MGMT_STATUS_SUCCESS
, addr
,
2945 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
2946 MGMT_STATUS_FAILED
, addr
,
2952 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
2958 cmd
->cmd_complete
= addr_cmd_complete
;
2960 /* Continue with pairing via HCI */
2961 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
2962 struct hci_cp_user_passkey_reply cp
;
2964 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
2965 cp
.passkey
= passkey
;
2966 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
2968 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
2972 mgmt_pending_remove(cmd
);
2975 hci_dev_unlock(hdev
);
2979 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2980 void *data
, u16 len
)
2982 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
2986 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
2987 MGMT_OP_PIN_CODE_NEG_REPLY
,
2988 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
2991 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2994 struct mgmt_cp_user_confirm_reply
*cp
= data
;
2998 if (len
!= sizeof(*cp
))
2999 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3000 MGMT_STATUS_INVALID_PARAMS
);
3002 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3003 MGMT_OP_USER_CONFIRM_REPLY
,
3004 HCI_OP_USER_CONFIRM_REPLY
, 0);
3007 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3008 void *data
, u16 len
)
3010 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3014 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3015 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3016 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3019 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3022 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3026 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3027 MGMT_OP_USER_PASSKEY_REPLY
,
3028 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3031 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3032 void *data
, u16 len
)
3034 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3038 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3039 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3040 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3043 static void adv_expire(struct hci_dev
*hdev
, u32 flags
)
3045 struct adv_info
*adv_instance
;
3046 struct hci_request req
;
3049 adv_instance
= hci_find_adv_instance(hdev
, hdev
->cur_adv_instance
);
3053 /* stop if current instance doesn't need to be changed */
3054 if (!(adv_instance
->flags
& flags
))
3057 cancel_adv_timeout(hdev
);
3059 adv_instance
= hci_get_next_instance(hdev
, adv_instance
->instance
);
3063 hci_req_init(&req
, hdev
);
3064 err
= __hci_req_schedule_adv_instance(&req
, adv_instance
->instance
,
3069 hci_req_run(&req
, NULL
);
3072 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3074 struct mgmt_cp_set_local_name
*cp
;
3075 struct mgmt_pending_cmd
*cmd
;
3077 BT_DBG("status 0x%02x", status
);
3081 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3088 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3089 mgmt_status(status
));
3091 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3094 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3095 adv_expire(hdev
, MGMT_ADV_FLAG_LOCAL_NAME
);
3098 mgmt_pending_remove(cmd
);
3101 hci_dev_unlock(hdev
);
3104 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3107 struct mgmt_cp_set_local_name
*cp
= data
;
3108 struct mgmt_pending_cmd
*cmd
;
3109 struct hci_request req
;
3116 /* If the old values are the same as the new ones just return a
3117 * direct command complete event.
3119 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3120 !memcmp(hdev
->short_name
, cp
->short_name
,
3121 sizeof(hdev
->short_name
))) {
3122 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3127 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3129 if (!hdev_is_powered(hdev
)) {
3130 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3132 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3137 err
= mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
,
3138 len
, HCI_MGMT_LOCAL_NAME_EVENTS
, sk
);
3139 ext_info_changed(hdev
, sk
);
3144 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3150 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3152 hci_req_init(&req
, hdev
);
3154 if (lmp_bredr_capable(hdev
)) {
3155 __hci_req_update_name(&req
);
3156 __hci_req_update_eir(&req
);
3159 /* The name is stored in the scan response data and so
3160 * no need to udpate the advertising data here.
3162 if (lmp_le_capable(hdev
) && hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
3163 __hci_req_update_scan_rsp_data(&req
, hdev
->cur_adv_instance
);
3165 err
= hci_req_run(&req
, set_name_complete
);
3167 mgmt_pending_remove(cmd
);
3170 hci_dev_unlock(hdev
);
3174 static int set_appearance(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3177 struct mgmt_cp_set_appearance
*cp
= data
;
3183 if (!lmp_le_capable(hdev
))
3184 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_APPEARANCE
,
3185 MGMT_STATUS_NOT_SUPPORTED
);
3187 apperance
= le16_to_cpu(cp
->appearance
);
3191 if (hdev
->appearance
!= apperance
) {
3192 hdev
->appearance
= apperance
;
3194 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3195 adv_expire(hdev
, MGMT_ADV_FLAG_APPEARANCE
);
3197 ext_info_changed(hdev
, sk
);
3200 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_APPEARANCE
, 0, NULL
,
3203 hci_dev_unlock(hdev
);
3208 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
3209 u16 opcode
, struct sk_buff
*skb
)
3211 struct mgmt_rp_read_local_oob_data mgmt_rp
;
3212 size_t rp_size
= sizeof(mgmt_rp
);
3213 struct mgmt_pending_cmd
*cmd
;
3215 BT_DBG("%s status %u", hdev
->name
, status
);
3217 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
3221 if (status
|| !skb
) {
3222 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3223 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
3227 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
3229 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
3230 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
3232 if (skb
->len
< sizeof(*rp
)) {
3233 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3234 MGMT_OP_READ_LOCAL_OOB_DATA
,
3235 MGMT_STATUS_FAILED
);
3239 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
3240 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
3242 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
3244 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
3246 if (skb
->len
< sizeof(*rp
)) {
3247 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3248 MGMT_OP_READ_LOCAL_OOB_DATA
,
3249 MGMT_STATUS_FAILED
);
3253 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
3254 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
3256 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
3257 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
3260 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3261 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
3264 mgmt_pending_remove(cmd
);
3267 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3268 void *data
, u16 data_len
)
3270 struct mgmt_pending_cmd
*cmd
;
3271 struct hci_request req
;
3274 BT_DBG("%s", hdev
->name
);
3278 if (!hdev_is_powered(hdev
)) {
3279 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3280 MGMT_STATUS_NOT_POWERED
);
3284 if (!lmp_ssp_capable(hdev
)) {
3285 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3286 MGMT_STATUS_NOT_SUPPORTED
);
3290 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3291 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3296 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3302 hci_req_init(&req
, hdev
);
3304 if (bredr_sc_enabled(hdev
))
3305 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
3307 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3309 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
3311 mgmt_pending_remove(cmd
);
3314 hci_dev_unlock(hdev
);
3318 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3319 void *data
, u16 len
)
3321 struct mgmt_addr_info
*addr
= data
;
3324 BT_DBG("%s ", hdev
->name
);
3326 if (!bdaddr_type_is_valid(addr
->type
))
3327 return mgmt_cmd_complete(sk
, hdev
->id
,
3328 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3329 MGMT_STATUS_INVALID_PARAMS
,
3330 addr
, sizeof(*addr
));
3334 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3335 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3338 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3339 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3340 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3341 MGMT_STATUS_INVALID_PARAMS
,
3342 &cp
->addr
, sizeof(cp
->addr
));
3346 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3347 cp
->addr
.type
, cp
->hash
,
3348 cp
->rand
, NULL
, NULL
);
3350 status
= MGMT_STATUS_FAILED
;
3352 status
= MGMT_STATUS_SUCCESS
;
3354 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3355 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
3356 &cp
->addr
, sizeof(cp
->addr
));
3357 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3358 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3359 u8
*rand192
, *hash192
, *rand256
, *hash256
;
3362 if (bdaddr_type_is_le(cp
->addr
.type
)) {
3363 /* Enforce zero-valued 192-bit parameters as
3364 * long as legacy SMP OOB isn't implemented.
3366 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3367 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3368 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3369 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3370 MGMT_STATUS_INVALID_PARAMS
,
3371 addr
, sizeof(*addr
));
3378 /* In case one of the P-192 values is set to zero,
3379 * then just disable OOB data for P-192.
3381 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3382 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3386 rand192
= cp
->rand192
;
3387 hash192
= cp
->hash192
;
3391 /* In case one of the P-256 values is set to zero, then just
3392 * disable OOB data for P-256.
3394 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
3395 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
3399 rand256
= cp
->rand256
;
3400 hash256
= cp
->hash256
;
3403 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3404 cp
->addr
.type
, hash192
, rand192
,
3407 status
= MGMT_STATUS_FAILED
;
3409 status
= MGMT_STATUS_SUCCESS
;
3411 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3412 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3413 status
, &cp
->addr
, sizeof(cp
->addr
));
3415 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3416 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3417 MGMT_STATUS_INVALID_PARAMS
);
3421 hci_dev_unlock(hdev
);
3425 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3426 void *data
, u16 len
)
3428 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3432 BT_DBG("%s", hdev
->name
);
3434 if (cp
->addr
.type
!= BDADDR_BREDR
)
3435 return mgmt_cmd_complete(sk
, hdev
->id
,
3436 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3437 MGMT_STATUS_INVALID_PARAMS
,
3438 &cp
->addr
, sizeof(cp
->addr
));
3442 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
3443 hci_remote_oob_data_clear(hdev
);
3444 status
= MGMT_STATUS_SUCCESS
;
3448 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3450 status
= MGMT_STATUS_INVALID_PARAMS
;
3452 status
= MGMT_STATUS_SUCCESS
;
3455 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3456 status
, &cp
->addr
, sizeof(cp
->addr
));
3458 hci_dev_unlock(hdev
);
3462 void mgmt_start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3464 struct mgmt_pending_cmd
*cmd
;
3466 BT_DBG("status %d", status
);
3470 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3472 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
3475 cmd
= pending_find(MGMT_OP_START_LIMITED_DISCOVERY
, hdev
);
3478 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3479 mgmt_pending_remove(cmd
);
3482 hci_dev_unlock(hdev
);
3485 static bool discovery_type_is_valid(struct hci_dev
*hdev
, uint8_t type
,
3486 uint8_t *mgmt_status
)
3489 case DISCOV_TYPE_LE
:
3490 *mgmt_status
= mgmt_le_support(hdev
);
3494 case DISCOV_TYPE_INTERLEAVED
:
3495 *mgmt_status
= mgmt_le_support(hdev
);
3498 /* Intentional fall-through */
3499 case DISCOV_TYPE_BREDR
:
3500 *mgmt_status
= mgmt_bredr_support(hdev
);
3505 *mgmt_status
= MGMT_STATUS_INVALID_PARAMS
;
3512 static int start_discovery_internal(struct sock
*sk
, struct hci_dev
*hdev
,
3513 u16 op
, void *data
, u16 len
)
3515 struct mgmt_cp_start_discovery
*cp
= data
;
3516 struct mgmt_pending_cmd
*cmd
;
3520 BT_DBG("%s", hdev
->name
);
3524 if (!hdev_is_powered(hdev
)) {
3525 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
,
3526 MGMT_STATUS_NOT_POWERED
,
3527 &cp
->type
, sizeof(cp
->type
));
3531 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3532 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
3533 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, MGMT_STATUS_BUSY
,
3534 &cp
->type
, sizeof(cp
->type
));
3538 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
3539 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, status
,
3540 &cp
->type
, sizeof(cp
->type
));
3544 /* Clear the discovery filter first to free any previously
3545 * allocated memory for the UUID list.
3547 hci_discovery_filter_clear(hdev
);
3549 hdev
->discovery
.type
= cp
->type
;
3550 hdev
->discovery
.report_invalid_rssi
= false;
3551 if (op
== MGMT_OP_START_LIMITED_DISCOVERY
)
3552 hdev
->discovery
.limited
= true;
3554 hdev
->discovery
.limited
= false;
3556 cmd
= mgmt_pending_add(sk
, op
, hdev
, data
, len
);
3562 cmd
->cmd_complete
= generic_cmd_complete
;
3564 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3565 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
3569 hci_dev_unlock(hdev
);
3573 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3574 void *data
, u16 len
)
3576 return start_discovery_internal(sk
, hdev
, MGMT_OP_START_DISCOVERY
,
3580 static int start_limited_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3581 void *data
, u16 len
)
3583 return start_discovery_internal(sk
, hdev
,
3584 MGMT_OP_START_LIMITED_DISCOVERY
,
3588 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
3591 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
3595 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3596 void *data
, u16 len
)
3598 struct mgmt_cp_start_service_discovery
*cp
= data
;
3599 struct mgmt_pending_cmd
*cmd
;
3600 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
3601 u16 uuid_count
, expected_len
;
3605 BT_DBG("%s", hdev
->name
);
3609 if (!hdev_is_powered(hdev
)) {
3610 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3611 MGMT_OP_START_SERVICE_DISCOVERY
,
3612 MGMT_STATUS_NOT_POWERED
,
3613 &cp
->type
, sizeof(cp
->type
));
3617 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3618 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
3619 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3620 MGMT_OP_START_SERVICE_DISCOVERY
,
3621 MGMT_STATUS_BUSY
, &cp
->type
,
3626 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
3627 if (uuid_count
> max_uuid_count
) {
3628 BT_ERR("service_discovery: too big uuid_count value %u",
3630 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3631 MGMT_OP_START_SERVICE_DISCOVERY
,
3632 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3637 expected_len
= sizeof(*cp
) + uuid_count
* 16;
3638 if (expected_len
!= len
) {
3639 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3641 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3642 MGMT_OP_START_SERVICE_DISCOVERY
,
3643 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3648 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
3649 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3650 MGMT_OP_START_SERVICE_DISCOVERY
,
3651 status
, &cp
->type
, sizeof(cp
->type
));
3655 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
3662 cmd
->cmd_complete
= service_discovery_cmd_complete
;
3664 /* Clear the discovery filter first to free any previously
3665 * allocated memory for the UUID list.
3667 hci_discovery_filter_clear(hdev
);
3669 hdev
->discovery
.result_filtering
= true;
3670 hdev
->discovery
.type
= cp
->type
;
3671 hdev
->discovery
.rssi
= cp
->rssi
;
3672 hdev
->discovery
.uuid_count
= uuid_count
;
3674 if (uuid_count
> 0) {
3675 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
3677 if (!hdev
->discovery
.uuids
) {
3678 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3679 MGMT_OP_START_SERVICE_DISCOVERY
,
3681 &cp
->type
, sizeof(cp
->type
));
3682 mgmt_pending_remove(cmd
);
3687 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3688 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
3692 hci_dev_unlock(hdev
);
3696 void mgmt_stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3698 struct mgmt_pending_cmd
*cmd
;
3700 BT_DBG("status %d", status
);
3704 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3706 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3707 mgmt_pending_remove(cmd
);
3710 hci_dev_unlock(hdev
);
3713 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3716 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3717 struct mgmt_pending_cmd
*cmd
;
3720 BT_DBG("%s", hdev
->name
);
3724 if (!hci_discovery_active(hdev
)) {
3725 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3726 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3727 sizeof(mgmt_cp
->type
));
3731 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3732 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3733 MGMT_STATUS_INVALID_PARAMS
,
3734 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3738 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
3744 cmd
->cmd_complete
= generic_cmd_complete
;
3746 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3747 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
3751 hci_dev_unlock(hdev
);
3755 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3758 struct mgmt_cp_confirm_name
*cp
= data
;
3759 struct inquiry_entry
*e
;
3762 BT_DBG("%s", hdev
->name
);
3766 if (!hci_discovery_active(hdev
)) {
3767 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3768 MGMT_STATUS_FAILED
, &cp
->addr
,
3773 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3775 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3776 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3781 if (cp
->name_known
) {
3782 e
->name_state
= NAME_KNOWN
;
3785 e
->name_state
= NAME_NEEDED
;
3786 hci_inquiry_cache_update_resolve(hdev
, e
);
3789 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
3790 &cp
->addr
, sizeof(cp
->addr
));
3793 hci_dev_unlock(hdev
);
3797 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3800 struct mgmt_cp_block_device
*cp
= data
;
3804 BT_DBG("%s", hdev
->name
);
3806 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3807 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3808 MGMT_STATUS_INVALID_PARAMS
,
3809 &cp
->addr
, sizeof(cp
->addr
));
3813 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
3816 status
= MGMT_STATUS_FAILED
;
3820 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3822 status
= MGMT_STATUS_SUCCESS
;
3825 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3826 &cp
->addr
, sizeof(cp
->addr
));
3828 hci_dev_unlock(hdev
);
3833 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3836 struct mgmt_cp_unblock_device
*cp
= data
;
3840 BT_DBG("%s", hdev
->name
);
3842 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3843 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3844 MGMT_STATUS_INVALID_PARAMS
,
3845 &cp
->addr
, sizeof(cp
->addr
));
3849 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
3852 status
= MGMT_STATUS_INVALID_PARAMS
;
3856 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3858 status
= MGMT_STATUS_SUCCESS
;
3861 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3862 &cp
->addr
, sizeof(cp
->addr
));
3864 hci_dev_unlock(hdev
);
3869 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3872 struct mgmt_cp_set_device_id
*cp
= data
;
3873 struct hci_request req
;
3877 BT_DBG("%s", hdev
->name
);
3879 source
= __le16_to_cpu(cp
->source
);
3881 if (source
> 0x0002)
3882 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3883 MGMT_STATUS_INVALID_PARAMS
);
3887 hdev
->devid_source
= source
;
3888 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3889 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3890 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3892 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
3895 hci_req_init(&req
, hdev
);
3896 __hci_req_update_eir(&req
);
3897 hci_req_run(&req
, NULL
);
3899 hci_dev_unlock(hdev
);
3904 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
3907 BT_DBG("status %d", status
);
3910 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
3913 struct cmd_lookup match
= { NULL
, hdev
};
3914 struct hci_request req
;
3916 struct adv_info
*adv_instance
;
3922 u8 mgmt_err
= mgmt_status(status
);
3924 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3925 cmd_status_rsp
, &mgmt_err
);
3929 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3930 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
3932 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
3934 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3937 new_settings(hdev
, match
.sk
);
3942 /* If "Set Advertising" was just disabled and instance advertising was
3943 * set up earlier, then re-enable multi-instance advertising.
3945 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
3946 list_empty(&hdev
->adv_instances
))
3949 instance
= hdev
->cur_adv_instance
;
3951 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
3952 struct adv_info
, list
);
3956 instance
= adv_instance
->instance
;
3959 hci_req_init(&req
, hdev
);
3961 err
= __hci_req_schedule_adv_instance(&req
, instance
, true);
3964 err
= hci_req_run(&req
, enable_advertising_instance
);
3967 BT_ERR("Failed to re-configure advertising");
3970 hci_dev_unlock(hdev
);
3973 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3976 struct mgmt_mode
*cp
= data
;
3977 struct mgmt_pending_cmd
*cmd
;
3978 struct hci_request req
;
3982 BT_DBG("request for %s", hdev
->name
);
3984 status
= mgmt_le_support(hdev
);
3986 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3989 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
3990 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3991 MGMT_STATUS_INVALID_PARAMS
);
3997 /* The following conditions are ones which mean that we should
3998 * not do any HCI communication but directly send a mgmt
3999 * response to user space (after toggling the flag if
4002 if (!hdev_is_powered(hdev
) ||
4003 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
4004 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
4005 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4006 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4007 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4011 hdev
->cur_adv_instance
= 0x00;
4012 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
4013 if (cp
->val
== 0x02)
4014 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4016 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4018 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
4019 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4022 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4027 err
= new_settings(hdev
, sk
);
4032 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4033 pending_find(MGMT_OP_SET_LE
, hdev
)) {
4034 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4039 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4045 hci_req_init(&req
, hdev
);
4047 if (cp
->val
== 0x02)
4048 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4050 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4052 cancel_adv_timeout(hdev
);
4055 /* Switch to instance "0" for the Set Advertising setting.
4056 * We cannot use update_[adv|scan_rsp]_data() here as the
4057 * HCI_ADVERTISING flag is not yet set.
4059 hdev
->cur_adv_instance
= 0x00;
4060 __hci_req_update_adv_data(&req
, 0x00);
4061 __hci_req_update_scan_rsp_data(&req
, 0x00);
4062 __hci_req_enable_advertising(&req
);
4064 __hci_req_disable_advertising(&req
);
4067 err
= hci_req_run(&req
, set_advertising_complete
);
4069 mgmt_pending_remove(cmd
);
4072 hci_dev_unlock(hdev
);
4076 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4077 void *data
, u16 len
)
4079 struct mgmt_cp_set_static_address
*cp
= data
;
4082 BT_DBG("%s", hdev
->name
);
4084 if (!lmp_le_capable(hdev
))
4085 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4086 MGMT_STATUS_NOT_SUPPORTED
);
4088 if (hdev_is_powered(hdev
))
4089 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4090 MGMT_STATUS_REJECTED
);
4092 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4093 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4094 return mgmt_cmd_status(sk
, hdev
->id
,
4095 MGMT_OP_SET_STATIC_ADDRESS
,
4096 MGMT_STATUS_INVALID_PARAMS
);
4098 /* Two most significant bits shall be set */
4099 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4100 return mgmt_cmd_status(sk
, hdev
->id
,
4101 MGMT_OP_SET_STATIC_ADDRESS
,
4102 MGMT_STATUS_INVALID_PARAMS
);
4107 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4109 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
4113 err
= new_settings(hdev
, sk
);
4116 hci_dev_unlock(hdev
);
4120 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4121 void *data
, u16 len
)
4123 struct mgmt_cp_set_scan_params
*cp
= data
;
4124 __u16 interval
, window
;
4127 BT_DBG("%s", hdev
->name
);
4129 if (!lmp_le_capable(hdev
))
4130 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4131 MGMT_STATUS_NOT_SUPPORTED
);
4133 interval
= __le16_to_cpu(cp
->interval
);
4135 if (interval
< 0x0004 || interval
> 0x4000)
4136 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4137 MGMT_STATUS_INVALID_PARAMS
);
4139 window
= __le16_to_cpu(cp
->window
);
4141 if (window
< 0x0004 || window
> 0x4000)
4142 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4143 MGMT_STATUS_INVALID_PARAMS
);
4145 if (window
> interval
)
4146 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4147 MGMT_STATUS_INVALID_PARAMS
);
4151 hdev
->le_scan_interval
= interval
;
4152 hdev
->le_scan_window
= window
;
4154 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
4157 /* If background scan is running, restart it so new parameters are
4160 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4161 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4162 struct hci_request req
;
4164 hci_req_init(&req
, hdev
);
4166 hci_req_add_le_scan_disable(&req
);
4167 hci_req_add_le_passive_scan(&req
);
4169 hci_req_run(&req
, NULL
);
4172 hci_dev_unlock(hdev
);
4177 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
4180 struct mgmt_pending_cmd
*cmd
;
4182 BT_DBG("status 0x%02x", status
);
4186 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4191 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4192 mgmt_status(status
));
4194 struct mgmt_mode
*cp
= cmd
->param
;
4197 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
4199 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4201 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4202 new_settings(hdev
, cmd
->sk
);
4205 mgmt_pending_remove(cmd
);
4208 hci_dev_unlock(hdev
);
4211 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4212 void *data
, u16 len
)
4214 struct mgmt_mode
*cp
= data
;
4215 struct mgmt_pending_cmd
*cmd
;
4216 struct hci_request req
;
4219 BT_DBG("%s", hdev
->name
);
4221 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
4222 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4223 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4224 MGMT_STATUS_NOT_SUPPORTED
);
4226 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4227 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4228 MGMT_STATUS_INVALID_PARAMS
);
4232 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4233 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4238 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
4239 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4244 if (!hdev_is_powered(hdev
)) {
4245 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
4246 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4248 new_settings(hdev
, sk
);
4252 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4259 hci_req_init(&req
, hdev
);
4261 __hci_req_write_fast_connectable(&req
, cp
->val
);
4263 err
= hci_req_run(&req
, fast_connectable_complete
);
4265 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4266 MGMT_STATUS_FAILED
);
4267 mgmt_pending_remove(cmd
);
4271 hci_dev_unlock(hdev
);
4276 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4278 struct mgmt_pending_cmd
*cmd
;
4280 BT_DBG("status 0x%02x", status
);
4284 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
4289 u8 mgmt_err
= mgmt_status(status
);
4291 /* We need to restore the flag if related HCI commands
4294 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
4296 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4298 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4299 new_settings(hdev
, cmd
->sk
);
4302 mgmt_pending_remove(cmd
);
4305 hci_dev_unlock(hdev
);
4308 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4310 struct mgmt_mode
*cp
= data
;
4311 struct mgmt_pending_cmd
*cmd
;
4312 struct hci_request req
;
4315 BT_DBG("request for %s", hdev
->name
);
4317 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4318 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4319 MGMT_STATUS_NOT_SUPPORTED
);
4321 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
4322 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4323 MGMT_STATUS_REJECTED
);
4325 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4326 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4327 MGMT_STATUS_INVALID_PARAMS
);
4331 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4332 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4336 if (!hdev_is_powered(hdev
)) {
4338 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
4339 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
4340 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
4341 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4342 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
4345 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
4347 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4351 err
= new_settings(hdev
, sk
);
4355 /* Reject disabling when powered on */
4357 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4358 MGMT_STATUS_REJECTED
);
4361 /* When configuring a dual-mode controller to operate
4362 * with LE only and using a static address, then switching
4363 * BR/EDR back on is not allowed.
4365 * Dual-mode controllers shall operate with the public
4366 * address as its identity address for BR/EDR and LE. So
4367 * reject the attempt to create an invalid configuration.
4369 * The same restrictions applies when secure connections
4370 * has been enabled. For BR/EDR this is a controller feature
4371 * while for LE it is a host stack feature. This means that
4372 * switching BR/EDR back on when secure connections has been
4373 * enabled is not a supported transaction.
4375 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
4376 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
4377 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
4378 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4379 MGMT_STATUS_REJECTED
);
4384 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4385 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4390 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4396 /* We need to flip the bit already here so that
4397 * hci_req_update_adv_data generates the correct flags.
4399 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
4401 hci_req_init(&req
, hdev
);
4403 __hci_req_write_fast_connectable(&req
, false);
4404 __hci_req_update_scan(&req
);
4406 /* Since only the advertising data flags will change, there
4407 * is no need to update the scan response data.
4409 __hci_req_update_adv_data(&req
, hdev
->cur_adv_instance
);
4411 err
= hci_req_run(&req
, set_bredr_complete
);
4413 mgmt_pending_remove(cmd
);
4416 hci_dev_unlock(hdev
);
4420 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4422 struct mgmt_pending_cmd
*cmd
;
4423 struct mgmt_mode
*cp
;
4425 BT_DBG("%s status %u", hdev
->name
, status
);
4429 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
4434 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
4435 mgmt_status(status
));
4443 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
4444 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4447 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
4448 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4451 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
4452 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
4456 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4457 new_settings(hdev
, cmd
->sk
);
4460 mgmt_pending_remove(cmd
);
4462 hci_dev_unlock(hdev
);
4465 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4466 void *data
, u16 len
)
4468 struct mgmt_mode
*cp
= data
;
4469 struct mgmt_pending_cmd
*cmd
;
4470 struct hci_request req
;
4474 BT_DBG("request for %s", hdev
->name
);
4476 if (!lmp_sc_capable(hdev
) &&
4477 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
4478 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4479 MGMT_STATUS_NOT_SUPPORTED
);
4481 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
4482 lmp_sc_capable(hdev
) &&
4483 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
4484 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4485 MGMT_STATUS_REJECTED
);
4487 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4488 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4489 MGMT_STATUS_INVALID_PARAMS
);
4493 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
4494 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4498 changed
= !hci_dev_test_and_set_flag(hdev
,
4500 if (cp
->val
== 0x02)
4501 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
4503 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4505 changed
= hci_dev_test_and_clear_flag(hdev
,
4507 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
4510 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4515 err
= new_settings(hdev
, sk
);
4520 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4521 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4528 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
4529 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
4530 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4534 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4540 hci_req_init(&req
, hdev
);
4541 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4542 err
= hci_req_run(&req
, sc_enable_complete
);
4544 mgmt_pending_remove(cmd
);
4549 hci_dev_unlock(hdev
);
4553 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4554 void *data
, u16 len
)
4556 struct mgmt_mode
*cp
= data
;
4557 bool changed
, use_changed
;
4560 BT_DBG("request for %s", hdev
->name
);
4562 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4563 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4564 MGMT_STATUS_INVALID_PARAMS
);
4569 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
4571 changed
= hci_dev_test_and_clear_flag(hdev
,
4572 HCI_KEEP_DEBUG_KEYS
);
4574 if (cp
->val
== 0x02)
4575 use_changed
= !hci_dev_test_and_set_flag(hdev
,
4576 HCI_USE_DEBUG_KEYS
);
4578 use_changed
= hci_dev_test_and_clear_flag(hdev
,
4579 HCI_USE_DEBUG_KEYS
);
4581 if (hdev_is_powered(hdev
) && use_changed
&&
4582 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
4583 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4584 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4585 sizeof(mode
), &mode
);
4588 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4593 err
= new_settings(hdev
, sk
);
4596 hci_dev_unlock(hdev
);
4600 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4603 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4607 BT_DBG("request for %s", hdev
->name
);
4609 if (!lmp_le_capable(hdev
))
4610 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4611 MGMT_STATUS_NOT_SUPPORTED
);
4613 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01 && cp
->privacy
!= 0x02)
4614 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4615 MGMT_STATUS_INVALID_PARAMS
);
4617 if (hdev_is_powered(hdev
))
4618 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4619 MGMT_STATUS_REJECTED
);
4623 /* If user space supports this command it is also expected to
4624 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4626 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
4629 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
4630 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4631 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
4632 if (cp
->privacy
== 0x02)
4633 hci_dev_set_flag(hdev
, HCI_LIMITED_PRIVACY
);
4635 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
4637 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
4638 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4639 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
4640 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
4643 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4648 err
= new_settings(hdev
, sk
);
4651 hci_dev_unlock(hdev
);
4655 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4657 switch (irk
->addr
.type
) {
4658 case BDADDR_LE_PUBLIC
:
4661 case BDADDR_LE_RANDOM
:
4662 /* Two most significant bits shall be set */
4663 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4671 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4674 struct mgmt_cp_load_irks
*cp
= cp_data
;
4675 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
4676 sizeof(struct mgmt_irk_info
));
4677 u16 irk_count
, expected_len
;
4680 BT_DBG("request for %s", hdev
->name
);
4682 if (!lmp_le_capable(hdev
))
4683 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4684 MGMT_STATUS_NOT_SUPPORTED
);
4686 irk_count
= __le16_to_cpu(cp
->irk_count
);
4687 if (irk_count
> max_irk_count
) {
4688 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
4689 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4690 MGMT_STATUS_INVALID_PARAMS
);
4693 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4694 if (expected_len
!= len
) {
4695 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4697 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4698 MGMT_STATUS_INVALID_PARAMS
);
4701 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4703 for (i
= 0; i
< irk_count
; i
++) {
4704 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4706 if (!irk_is_valid(key
))
4707 return mgmt_cmd_status(sk
, hdev
->id
,
4709 MGMT_STATUS_INVALID_PARAMS
);
4714 hci_smp_irks_clear(hdev
);
4716 for (i
= 0; i
< irk_count
; i
++) {
4717 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4719 hci_add_irk(hdev
, &irk
->addr
.bdaddr
,
4720 le_addr_type(irk
->addr
.type
), irk
->val
,
4724 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
4726 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4728 hci_dev_unlock(hdev
);
4733 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4735 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4738 switch (key
->addr
.type
) {
4739 case BDADDR_LE_PUBLIC
:
4742 case BDADDR_LE_RANDOM
:
4743 /* Two most significant bits shall be set */
4744 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4752 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4753 void *cp_data
, u16 len
)
4755 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4756 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
4757 sizeof(struct mgmt_ltk_info
));
4758 u16 key_count
, expected_len
;
4761 BT_DBG("request for %s", hdev
->name
);
4763 if (!lmp_le_capable(hdev
))
4764 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4765 MGMT_STATUS_NOT_SUPPORTED
);
4767 key_count
= __le16_to_cpu(cp
->key_count
);
4768 if (key_count
> max_key_count
) {
4769 BT_ERR("load_ltks: too big key_count value %u", key_count
);
4770 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4771 MGMT_STATUS_INVALID_PARAMS
);
4774 expected_len
= sizeof(*cp
) + key_count
*
4775 sizeof(struct mgmt_ltk_info
);
4776 if (expected_len
!= len
) {
4777 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4779 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4780 MGMT_STATUS_INVALID_PARAMS
);
4783 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4785 for (i
= 0; i
< key_count
; i
++) {
4786 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4788 if (!ltk_is_valid(key
))
4789 return mgmt_cmd_status(sk
, hdev
->id
,
4790 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4791 MGMT_STATUS_INVALID_PARAMS
);
4796 hci_smp_ltks_clear(hdev
);
4798 for (i
= 0; i
< key_count
; i
++) {
4799 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4800 u8 type
, authenticated
;
4802 switch (key
->type
) {
4803 case MGMT_LTK_UNAUTHENTICATED
:
4804 authenticated
= 0x00;
4805 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
4807 case MGMT_LTK_AUTHENTICATED
:
4808 authenticated
= 0x01;
4809 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
4811 case MGMT_LTK_P256_UNAUTH
:
4812 authenticated
= 0x00;
4813 type
= SMP_LTK_P256
;
4815 case MGMT_LTK_P256_AUTH
:
4816 authenticated
= 0x01;
4817 type
= SMP_LTK_P256
;
4819 case MGMT_LTK_P256_DEBUG
:
4820 authenticated
= 0x00;
4821 type
= SMP_LTK_P256_DEBUG
;
4826 hci_add_ltk(hdev
, &key
->addr
.bdaddr
,
4827 le_addr_type(key
->addr
.type
), type
, authenticated
,
4828 key
->val
, key
->enc_size
, key
->ediv
, key
->rand
);
4831 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4834 hci_dev_unlock(hdev
);
4839 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
4841 struct hci_conn
*conn
= cmd
->user_data
;
4842 struct mgmt_rp_get_conn_info rp
;
4845 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
4847 if (status
== MGMT_STATUS_SUCCESS
) {
4848 rp
.rssi
= conn
->rssi
;
4849 rp
.tx_power
= conn
->tx_power
;
4850 rp
.max_tx_power
= conn
->max_tx_power
;
4852 rp
.rssi
= HCI_RSSI_INVALID
;
4853 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4854 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4857 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4858 status
, &rp
, sizeof(rp
));
4860 hci_conn_drop(conn
);
4866 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
4869 struct hci_cp_read_rssi
*cp
;
4870 struct mgmt_pending_cmd
*cmd
;
4871 struct hci_conn
*conn
;
4875 BT_DBG("status 0x%02x", hci_status
);
4879 /* Commands sent in request are either Read RSSI or Read Transmit Power
4880 * Level so we check which one was last sent to retrieve connection
4881 * handle. Both commands have handle as first parameter so it's safe to
4882 * cast data on the same command struct.
4884 * First command sent is always Read RSSI and we fail only if it fails.
4885 * In other case we simply override error to indicate success as we
4886 * already remembered if TX power value is actually valid.
4888 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4890 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4891 status
= MGMT_STATUS_SUCCESS
;
4893 status
= mgmt_status(hci_status
);
4897 BT_ERR("invalid sent_cmd in conn_info response");
4901 handle
= __le16_to_cpu(cp
->handle
);
4902 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4904 BT_ERR("unknown handle (%d) in conn_info response", handle
);
4908 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
4912 cmd
->cmd_complete(cmd
, status
);
4913 mgmt_pending_remove(cmd
);
4916 hci_dev_unlock(hdev
);
4919 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4922 struct mgmt_cp_get_conn_info
*cp
= data
;
4923 struct mgmt_rp_get_conn_info rp
;
4924 struct hci_conn
*conn
;
4925 unsigned long conn_info_age
;
4928 BT_DBG("%s", hdev
->name
);
4930 memset(&rp
, 0, sizeof(rp
));
4931 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4932 rp
.addr
.type
= cp
->addr
.type
;
4934 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4935 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4936 MGMT_STATUS_INVALID_PARAMS
,
4941 if (!hdev_is_powered(hdev
)) {
4942 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4943 MGMT_STATUS_NOT_POWERED
, &rp
,
4948 if (cp
->addr
.type
== BDADDR_BREDR
)
4949 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4952 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
4954 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4955 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4956 MGMT_STATUS_NOT_CONNECTED
, &rp
,
4961 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
4962 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4963 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
4967 /* To avoid client trying to guess when to poll again for information we
4968 * calculate conn info age as random value between min/max set in hdev.
4970 conn_info_age
= hdev
->conn_info_min_age
+
4971 prandom_u32_max(hdev
->conn_info_max_age
-
4972 hdev
->conn_info_min_age
);
4974 /* Query controller to refresh cached values if they are too old or were
4977 if (time_after(jiffies
, conn
->conn_info_timestamp
+
4978 msecs_to_jiffies(conn_info_age
)) ||
4979 !conn
->conn_info_timestamp
) {
4980 struct hci_request req
;
4981 struct hci_cp_read_tx_power req_txp_cp
;
4982 struct hci_cp_read_rssi req_rssi_cp
;
4983 struct mgmt_pending_cmd
*cmd
;
4985 hci_req_init(&req
, hdev
);
4986 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
4987 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
4990 /* For LE links TX power does not change thus we don't need to
4991 * query for it once value is known.
4993 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
4994 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
4995 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4996 req_txp_cp
.type
= 0x00;
4997 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4998 sizeof(req_txp_cp
), &req_txp_cp
);
5001 /* Max TX power needs to be read only once per connection */
5002 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5003 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5004 req_txp_cp
.type
= 0x01;
5005 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5006 sizeof(req_txp_cp
), &req_txp_cp
);
5009 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5013 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5020 hci_conn_hold(conn
);
5021 cmd
->user_data
= hci_conn_get(conn
);
5022 cmd
->cmd_complete
= conn_info_cmd_complete
;
5024 conn
->conn_info_timestamp
= jiffies
;
5026 /* Cache is valid, just reply with values cached in hci_conn */
5027 rp
.rssi
= conn
->rssi
;
5028 rp
.tx_power
= conn
->tx_power
;
5029 rp
.max_tx_power
= conn
->max_tx_power
;
5031 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5032 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5036 hci_dev_unlock(hdev
);
5040 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5042 struct hci_conn
*conn
= cmd
->user_data
;
5043 struct mgmt_rp_get_clock_info rp
;
5044 struct hci_dev
*hdev
;
5047 memset(&rp
, 0, sizeof(rp
));
5048 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5053 hdev
= hci_dev_get(cmd
->index
);
5055 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5060 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5061 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5065 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
5069 hci_conn_drop(conn
);
5076 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5078 struct hci_cp_read_clock
*hci_cp
;
5079 struct mgmt_pending_cmd
*cmd
;
5080 struct hci_conn
*conn
;
5082 BT_DBG("%s status %u", hdev
->name
, status
);
5086 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5090 if (hci_cp
->which
) {
5091 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5092 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5097 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5101 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5102 mgmt_pending_remove(cmd
);
5105 hci_dev_unlock(hdev
);
5108 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5111 struct mgmt_cp_get_clock_info
*cp
= data
;
5112 struct mgmt_rp_get_clock_info rp
;
5113 struct hci_cp_read_clock hci_cp
;
5114 struct mgmt_pending_cmd
*cmd
;
5115 struct hci_request req
;
5116 struct hci_conn
*conn
;
5119 BT_DBG("%s", hdev
->name
);
5121 memset(&rp
, 0, sizeof(rp
));
5122 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5123 rp
.addr
.type
= cp
->addr
.type
;
5125 if (cp
->addr
.type
!= BDADDR_BREDR
)
5126 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5127 MGMT_STATUS_INVALID_PARAMS
,
5132 if (!hdev_is_powered(hdev
)) {
5133 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5134 MGMT_STATUS_NOT_POWERED
, &rp
,
5139 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5140 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5142 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5143 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5144 MGMT_OP_GET_CLOCK_INFO
,
5145 MGMT_STATUS_NOT_CONNECTED
,
5153 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5159 cmd
->cmd_complete
= clock_info_cmd_complete
;
5161 hci_req_init(&req
, hdev
);
5163 memset(&hci_cp
, 0, sizeof(hci_cp
));
5164 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5167 hci_conn_hold(conn
);
5168 cmd
->user_data
= hci_conn_get(conn
);
5170 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5171 hci_cp
.which
= 0x01; /* Piconet clock */
5172 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5175 err
= hci_req_run(&req
, get_clock_info_complete
);
5177 mgmt_pending_remove(cmd
);
5180 hci_dev_unlock(hdev
);
5184 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
5186 struct hci_conn
*conn
;
5188 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
5192 if (conn
->dst_type
!= type
)
5195 if (conn
->state
!= BT_CONNECTED
)
5201 /* This function requires the caller holds hdev->lock */
5202 static int hci_conn_params_set(struct hci_dev
*hdev
, bdaddr_t
*addr
,
5203 u8 addr_type
, u8 auto_connect
)
5205 struct hci_conn_params
*params
;
5207 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
5211 if (params
->auto_connect
== auto_connect
)
5214 list_del_init(¶ms
->action
);
5216 switch (auto_connect
) {
5217 case HCI_AUTO_CONN_DISABLED
:
5218 case HCI_AUTO_CONN_LINK_LOSS
:
5219 /* If auto connect is being disabled when we're trying to
5220 * connect to device, keep connecting.
5222 if (params
->explicit_connect
)
5223 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5225 case HCI_AUTO_CONN_REPORT
:
5226 if (params
->explicit_connect
)
5227 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5229 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
5231 case HCI_AUTO_CONN_DIRECT
:
5232 case HCI_AUTO_CONN_ALWAYS
:
5233 if (!is_connected(hdev
, addr
, addr_type
))
5234 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5238 params
->auto_connect
= auto_connect
;
5240 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
5246 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5247 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5249 struct mgmt_ev_device_added ev
;
5251 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5252 ev
.addr
.type
= type
;
5255 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5258 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5259 void *data
, u16 len
)
5261 struct mgmt_cp_add_device
*cp
= data
;
5262 u8 auto_conn
, addr_type
;
5265 BT_DBG("%s", hdev
->name
);
5267 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5268 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5269 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5270 MGMT_STATUS_INVALID_PARAMS
,
5271 &cp
->addr
, sizeof(cp
->addr
));
5273 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5274 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5275 MGMT_STATUS_INVALID_PARAMS
,
5276 &cp
->addr
, sizeof(cp
->addr
));
5280 if (cp
->addr
.type
== BDADDR_BREDR
) {
5281 /* Only incoming connections action is supported for now */
5282 if (cp
->action
!= 0x01) {
5283 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5285 MGMT_STATUS_INVALID_PARAMS
,
5286 &cp
->addr
, sizeof(cp
->addr
));
5290 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5295 hci_req_update_scan(hdev
);
5300 addr_type
= le_addr_type(cp
->addr
.type
);
5302 if (cp
->action
== 0x02)
5303 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5304 else if (cp
->action
== 0x01)
5305 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5307 auto_conn
= HCI_AUTO_CONN_REPORT
;
5309 /* Kernel internally uses conn_params with resolvable private
5310 * address, but Add Device allows only identity addresses.
5311 * Make sure it is enforced before calling
5312 * hci_conn_params_lookup.
5314 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
5315 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5316 MGMT_STATUS_INVALID_PARAMS
,
5317 &cp
->addr
, sizeof(cp
->addr
));
5321 /* If the connection parameters don't exist for this device,
5322 * they will be created and configured with defaults.
5324 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5326 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5327 MGMT_STATUS_FAILED
, &cp
->addr
,
5332 hci_update_background_scan(hdev
);
5335 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5337 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5338 MGMT_STATUS_SUCCESS
, &cp
->addr
,
5342 hci_dev_unlock(hdev
);
5346 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5347 bdaddr_t
*bdaddr
, u8 type
)
5349 struct mgmt_ev_device_removed ev
;
5351 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5352 ev
.addr
.type
= type
;
5354 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5357 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5358 void *data
, u16 len
)
5360 struct mgmt_cp_remove_device
*cp
= data
;
5363 BT_DBG("%s", hdev
->name
);
5367 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5368 struct hci_conn_params
*params
;
5371 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
5372 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5373 MGMT_OP_REMOVE_DEVICE
,
5374 MGMT_STATUS_INVALID_PARAMS
,
5375 &cp
->addr
, sizeof(cp
->addr
));
5379 if (cp
->addr
.type
== BDADDR_BREDR
) {
5380 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
5384 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5385 MGMT_OP_REMOVE_DEVICE
,
5386 MGMT_STATUS_INVALID_PARAMS
,
5392 hci_req_update_scan(hdev
);
5394 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
5399 addr_type
= le_addr_type(cp
->addr
.type
);
5401 /* Kernel internally uses conn_params with resolvable private
5402 * address, but Remove Device allows only identity addresses.
5403 * Make sure it is enforced before calling
5404 * hci_conn_params_lookup.
5406 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
5407 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5408 MGMT_OP_REMOVE_DEVICE
,
5409 MGMT_STATUS_INVALID_PARAMS
,
5410 &cp
->addr
, sizeof(cp
->addr
));
5414 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5417 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5418 MGMT_OP_REMOVE_DEVICE
,
5419 MGMT_STATUS_INVALID_PARAMS
,
5420 &cp
->addr
, sizeof(cp
->addr
));
5424 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
||
5425 params
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
) {
5426 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5427 MGMT_OP_REMOVE_DEVICE
,
5428 MGMT_STATUS_INVALID_PARAMS
,
5429 &cp
->addr
, sizeof(cp
->addr
));
5433 list_del(¶ms
->action
);
5434 list_del(¶ms
->list
);
5436 hci_update_background_scan(hdev
);
5438 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5440 struct hci_conn_params
*p
, *tmp
;
5441 struct bdaddr_list
*b
, *btmp
;
5443 if (cp
->addr
.type
) {
5444 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5445 MGMT_OP_REMOVE_DEVICE
,
5446 MGMT_STATUS_INVALID_PARAMS
,
5447 &cp
->addr
, sizeof(cp
->addr
));
5451 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
5452 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
5457 hci_req_update_scan(hdev
);
5459 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
5460 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
5462 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
5463 if (p
->explicit_connect
) {
5464 p
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
5467 list_del(&p
->action
);
5472 BT_DBG("All LE connection parameters were removed");
5474 hci_update_background_scan(hdev
);
5478 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5479 MGMT_STATUS_SUCCESS
, &cp
->addr
,
5482 hci_dev_unlock(hdev
);
5486 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5489 struct mgmt_cp_load_conn_param
*cp
= data
;
5490 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5491 sizeof(struct mgmt_conn_param
));
5492 u16 param_count
, expected_len
;
5495 if (!lmp_le_capable(hdev
))
5496 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5497 MGMT_STATUS_NOT_SUPPORTED
);
5499 param_count
= __le16_to_cpu(cp
->param_count
);
5500 if (param_count
> max_param_count
) {
5501 BT_ERR("load_conn_param: too big param_count value %u",
5503 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5504 MGMT_STATUS_INVALID_PARAMS
);
5507 expected_len
= sizeof(*cp
) + param_count
*
5508 sizeof(struct mgmt_conn_param
);
5509 if (expected_len
!= len
) {
5510 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5512 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5513 MGMT_STATUS_INVALID_PARAMS
);
5516 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5520 hci_conn_params_clear_disabled(hdev
);
5522 for (i
= 0; i
< param_count
; i
++) {
5523 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5524 struct hci_conn_params
*hci_param
;
5525 u16 min
, max
, latency
, timeout
;
5528 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5531 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5532 addr_type
= ADDR_LE_DEV_PUBLIC
;
5533 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5534 addr_type
= ADDR_LE_DEV_RANDOM
;
5536 BT_ERR("Ignoring invalid connection parameters");
5540 min
= le16_to_cpu(param
->min_interval
);
5541 max
= le16_to_cpu(param
->max_interval
);
5542 latency
= le16_to_cpu(param
->latency
);
5543 timeout
= le16_to_cpu(param
->timeout
);
5545 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5546 min
, max
, latency
, timeout
);
5548 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5549 BT_ERR("Ignoring invalid connection parameters");
5553 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5556 BT_ERR("Failed to add connection parameters");
5560 hci_param
->conn_min_interval
= min
;
5561 hci_param
->conn_max_interval
= max
;
5562 hci_param
->conn_latency
= latency
;
5563 hci_param
->supervision_timeout
= timeout
;
5566 hci_dev_unlock(hdev
);
5568 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
5572 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5573 void *data
, u16 len
)
5575 struct mgmt_cp_set_external_config
*cp
= data
;
5579 BT_DBG("%s", hdev
->name
);
5581 if (hdev_is_powered(hdev
))
5582 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5583 MGMT_STATUS_REJECTED
);
5585 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
5586 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5587 MGMT_STATUS_INVALID_PARAMS
);
5589 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
5590 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5591 MGMT_STATUS_NOT_SUPPORTED
);
5596 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
5598 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
5600 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
5607 err
= new_options(hdev
, sk
);
5609 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
5610 mgmt_index_removed(hdev
);
5612 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
5613 hci_dev_set_flag(hdev
, HCI_CONFIG
);
5614 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
5616 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5618 set_bit(HCI_RAW
, &hdev
->flags
);
5619 mgmt_index_added(hdev
);
5624 hci_dev_unlock(hdev
);
5628 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
5629 void *data
, u16 len
)
5631 struct mgmt_cp_set_public_address
*cp
= data
;
5635 BT_DBG("%s", hdev
->name
);
5637 if (hdev_is_powered(hdev
))
5638 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5639 MGMT_STATUS_REJECTED
);
5641 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
5642 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5643 MGMT_STATUS_INVALID_PARAMS
);
5645 if (!hdev
->set_bdaddr
)
5646 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5647 MGMT_STATUS_NOT_SUPPORTED
);
5651 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
5652 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
5654 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
5661 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
5662 err
= new_options(hdev
, sk
);
5664 if (is_configured(hdev
)) {
5665 mgmt_index_removed(hdev
);
5667 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
5669 hci_dev_set_flag(hdev
, HCI_CONFIG
);
5670 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
5672 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5676 hci_dev_unlock(hdev
);
5680 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
5681 u16 opcode
, struct sk_buff
*skb
)
5683 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
5684 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
5685 u8
*h192
, *r192
, *h256
, *r256
;
5686 struct mgmt_pending_cmd
*cmd
;
5690 BT_DBG("%s status %u", hdev
->name
, status
);
5692 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
5696 mgmt_cp
= cmd
->param
;
5699 status
= mgmt_status(status
);
5706 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
5707 struct hci_rp_read_local_oob_data
*rp
;
5709 if (skb
->len
!= sizeof(*rp
)) {
5710 status
= MGMT_STATUS_FAILED
;
5713 status
= MGMT_STATUS_SUCCESS
;
5714 rp
= (void *)skb
->data
;
5716 eir_len
= 5 + 18 + 18;
5723 struct hci_rp_read_local_oob_ext_data
*rp
;
5725 if (skb
->len
!= sizeof(*rp
)) {
5726 status
= MGMT_STATUS_FAILED
;
5729 status
= MGMT_STATUS_SUCCESS
;
5730 rp
= (void *)skb
->data
;
5732 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5733 eir_len
= 5 + 18 + 18;
5737 eir_len
= 5 + 18 + 18 + 18 + 18;
5747 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
5754 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
5755 hdev
->dev_class
, 3);
5758 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5759 EIR_SSP_HASH_C192
, h192
, 16);
5760 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5761 EIR_SSP_RAND_R192
, r192
, 16);
5765 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5766 EIR_SSP_HASH_C256
, h256
, 16);
5767 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
5768 EIR_SSP_RAND_R256
, r256
, 16);
5772 mgmt_rp
->type
= mgmt_cp
->type
;
5773 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
5775 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
5776 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
5777 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
5778 if (err
< 0 || status
)
5781 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
5783 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
5784 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
5785 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
5788 mgmt_pending_remove(cmd
);
5791 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
5792 struct mgmt_cp_read_local_oob_ext_data
*cp
)
5794 struct mgmt_pending_cmd
*cmd
;
5795 struct hci_request req
;
5798 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
5803 hci_req_init(&req
, hdev
);
5805 if (bredr_sc_enabled(hdev
))
5806 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
5808 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
5810 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
5812 mgmt_pending_remove(cmd
);
5819 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
5820 void *data
, u16 data_len
)
5822 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
5823 struct mgmt_rp_read_local_oob_ext_data
*rp
;
5826 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
5829 BT_DBG("%s", hdev
->name
);
5831 if (hdev_is_powered(hdev
)) {
5833 case BIT(BDADDR_BREDR
):
5834 status
= mgmt_bredr_support(hdev
);
5840 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
5841 status
= mgmt_le_support(hdev
);
5845 eir_len
= 9 + 3 + 18 + 18 + 3;
5848 status
= MGMT_STATUS_INVALID_PARAMS
;
5853 status
= MGMT_STATUS_NOT_POWERED
;
5857 rp_len
= sizeof(*rp
) + eir_len
;
5858 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
5869 case BIT(BDADDR_BREDR
):
5870 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5871 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
5872 hci_dev_unlock(hdev
);
5876 status
= MGMT_STATUS_FAILED
;
5879 eir_len
= eir_append_data(rp
->eir
, eir_len
,
5881 hdev
->dev_class
, 3);
5884 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
5885 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5886 smp_generate_oob(hdev
, hash
, rand
) < 0) {
5887 hci_dev_unlock(hdev
);
5888 status
= MGMT_STATUS_FAILED
;
5892 /* This should return the active RPA, but since the RPA
5893 * is only programmed on demand, it is really hard to fill
5894 * this in at the moment. For now disallow retrieving
5895 * local out-of-band data when privacy is in use.
5897 * Returning the identity address will not help here since
5898 * pairing happens before the identity resolving key is
5899 * known and thus the connection establishment happens
5900 * based on the RPA and not the identity address.
5902 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
5903 hci_dev_unlock(hdev
);
5904 status
= MGMT_STATUS_REJECTED
;
5908 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
5909 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
5910 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5911 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
5912 memcpy(addr
, &hdev
->static_addr
, 6);
5915 memcpy(addr
, &hdev
->bdaddr
, 6);
5919 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
5920 addr
, sizeof(addr
));
5922 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
5927 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
5928 &role
, sizeof(role
));
5930 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
5931 eir_len
= eir_append_data(rp
->eir
, eir_len
,
5933 hash
, sizeof(hash
));
5935 eir_len
= eir_append_data(rp
->eir
, eir_len
,
5937 rand
, sizeof(rand
));
5940 flags
= mgmt_get_adv_discov_flags(hdev
);
5942 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
5943 flags
|= LE_AD_NO_BREDR
;
5945 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
5946 &flags
, sizeof(flags
));
5950 hci_dev_unlock(hdev
);
5952 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
5954 status
= MGMT_STATUS_SUCCESS
;
5957 rp
->type
= cp
->type
;
5958 rp
->eir_len
= cpu_to_le16(eir_len
);
5960 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
5961 status
, rp
, sizeof(*rp
) + eir_len
);
5962 if (err
< 0 || status
)
5965 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
5966 rp
, sizeof(*rp
) + eir_len
,
5967 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
5975 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
5979 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
5980 flags
|= MGMT_ADV_FLAG_DISCOV
;
5981 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
5982 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
5983 flags
|= MGMT_ADV_FLAG_APPEARANCE
;
5984 flags
|= MGMT_ADV_FLAG_LOCAL_NAME
;
5986 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
)
5987 flags
|= MGMT_ADV_FLAG_TX_POWER
;
5992 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
5993 void *data
, u16 data_len
)
5995 struct mgmt_rp_read_adv_features
*rp
;
5998 struct adv_info
*adv_instance
;
5999 u32 supported_flags
;
6002 BT_DBG("%s", hdev
->name
);
6004 if (!lmp_le_capable(hdev
))
6005 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6006 MGMT_STATUS_REJECTED
);
6010 rp_len
= sizeof(*rp
) + hdev
->adv_instance_cnt
;
6011 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6013 hci_dev_unlock(hdev
);
6017 supported_flags
= get_supported_adv_flags(hdev
);
6019 rp
->supported_flags
= cpu_to_le32(supported_flags
);
6020 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
6021 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
6022 rp
->max_instances
= HCI_MAX_ADV_INSTANCES
;
6023 rp
->num_instances
= hdev
->adv_instance_cnt
;
6025 instance
= rp
->instance
;
6026 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
6027 *instance
= adv_instance
->instance
;
6031 hci_dev_unlock(hdev
);
6033 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6034 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
6041 static u8
tlv_data_max_len(u32 adv_flags
, bool is_adv_data
)
6043 u8 max_len
= HCI_MAX_AD_LENGTH
;
6046 if (adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
6047 MGMT_ADV_FLAG_LIMITED_DISCOV
|
6048 MGMT_ADV_FLAG_MANAGED_FLAGS
))
6051 if (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)
6054 /* at least 1 byte of name should fit in */
6055 if (adv_flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
6058 if (adv_flags
& (MGMT_ADV_FLAG_APPEARANCE
))
6065 static bool flags_managed(u32 adv_flags
)
6067 return adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
6068 MGMT_ADV_FLAG_LIMITED_DISCOV
|
6069 MGMT_ADV_FLAG_MANAGED_FLAGS
);
6072 static bool tx_power_managed(u32 adv_flags
)
6074 return adv_flags
& MGMT_ADV_FLAG_TX_POWER
;
6077 static bool name_managed(u32 adv_flags
)
6079 return adv_flags
& MGMT_ADV_FLAG_LOCAL_NAME
;
6082 static bool appearance_managed(u32 adv_flags
)
6084 return adv_flags
& MGMT_ADV_FLAG_APPEARANCE
;
6087 static bool tlv_data_is_valid(u32 adv_flags
, u8
*data
, u8 len
, bool is_adv_data
)
6092 max_len
= tlv_data_max_len(adv_flags
, is_adv_data
);
6097 /* Make sure that the data is correctly formatted. */
6098 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
6101 if (data
[i
+ 1] == EIR_FLAGS
&&
6102 (!is_adv_data
|| flags_managed(adv_flags
)))
6105 if (data
[i
+ 1] == EIR_TX_POWER
&& tx_power_managed(adv_flags
))
6108 if (data
[i
+ 1] == EIR_NAME_COMPLETE
&& name_managed(adv_flags
))
6111 if (data
[i
+ 1] == EIR_NAME_SHORT
&& name_managed(adv_flags
))
6114 if (data
[i
+ 1] == EIR_APPEARANCE
&&
6115 appearance_managed(adv_flags
))
6118 /* If the current field length would exceed the total data
6119 * length, then it's invalid.
6121 if (i
+ cur_len
>= len
)
6128 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
6131 struct mgmt_pending_cmd
*cmd
;
6132 struct mgmt_cp_add_advertising
*cp
;
6133 struct mgmt_rp_add_advertising rp
;
6134 struct adv_info
*adv_instance
, *n
;
6137 BT_DBG("status %d", status
);
6141 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
6143 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
6144 if (!adv_instance
->pending
)
6148 adv_instance
->pending
= false;
6152 instance
= adv_instance
->instance
;
6154 if (hdev
->cur_adv_instance
== instance
)
6155 cancel_adv_timeout(hdev
);
6157 hci_remove_adv_instance(hdev
, instance
);
6158 mgmt_advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, instance
);
6165 rp
.instance
= cp
->instance
;
6168 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6169 mgmt_status(status
));
6171 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6172 mgmt_status(status
), &rp
, sizeof(rp
));
6174 mgmt_pending_remove(cmd
);
6177 hci_dev_unlock(hdev
);
6180 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6181 void *data
, u16 data_len
)
6183 struct mgmt_cp_add_advertising
*cp
= data
;
6184 struct mgmt_rp_add_advertising rp
;
6186 u32 supported_flags
;
6188 u16 timeout
, duration
;
6189 unsigned int prev_instance_cnt
= hdev
->adv_instance_cnt
;
6190 u8 schedule_instance
= 0;
6191 struct adv_info
*next_instance
;
6193 struct mgmt_pending_cmd
*cmd
;
6194 struct hci_request req
;
6196 BT_DBG("%s", hdev
->name
);
6198 status
= mgmt_le_support(hdev
);
6200 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6203 if (cp
->instance
< 1 || cp
->instance
> HCI_MAX_ADV_INSTANCES
)
6204 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6205 MGMT_STATUS_INVALID_PARAMS
);
6207 if (data_len
!= sizeof(*cp
) + cp
->adv_data_len
+ cp
->scan_rsp_len
)
6208 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6209 MGMT_STATUS_INVALID_PARAMS
);
6211 flags
= __le32_to_cpu(cp
->flags
);
6212 timeout
= __le16_to_cpu(cp
->timeout
);
6213 duration
= __le16_to_cpu(cp
->duration
);
6215 /* The current implementation only supports a subset of the specified
6218 supported_flags
= get_supported_adv_flags(hdev
);
6219 if (flags
& ~supported_flags
)
6220 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6221 MGMT_STATUS_INVALID_PARAMS
);
6225 if (timeout
&& !hdev_is_powered(hdev
)) {
6226 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6227 MGMT_STATUS_REJECTED
);
6231 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
6232 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
6233 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6234 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6239 if (!tlv_data_is_valid(flags
, cp
->data
, cp
->adv_data_len
, true) ||
6240 !tlv_data_is_valid(flags
, cp
->data
+ cp
->adv_data_len
,
6241 cp
->scan_rsp_len
, false)) {
6242 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6243 MGMT_STATUS_INVALID_PARAMS
);
6247 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
6248 cp
->adv_data_len
, cp
->data
,
6250 cp
->data
+ cp
->adv_data_len
,
6253 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6254 MGMT_STATUS_FAILED
);
6258 /* Only trigger an advertising added event if a new instance was
6261 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
6262 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
6264 if (hdev
->cur_adv_instance
== cp
->instance
) {
6265 /* If the currently advertised instance is being changed then
6266 * cancel the current advertising and schedule the next
6267 * instance. If there is only one instance then the overridden
6268 * advertising data will be visible right away.
6270 cancel_adv_timeout(hdev
);
6272 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
6274 schedule_instance
= next_instance
->instance
;
6275 } else if (!hdev
->adv_instance_timeout
) {
6276 /* Immediately advertise the new instance if no other
6277 * instance is currently being advertised.
6279 schedule_instance
= cp
->instance
;
6282 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6283 * there is no instance to be advertised then we have no HCI
6284 * communication to make. Simply return.
6286 if (!hdev_is_powered(hdev
) ||
6287 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
6288 !schedule_instance
) {
6289 rp
.instance
= cp
->instance
;
6290 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6291 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6295 /* We're good to go, update advertising data, parameters, and start
6298 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
6305 hci_req_init(&req
, hdev
);
6307 err
= __hci_req_schedule_adv_instance(&req
, schedule_instance
, true);
6310 err
= hci_req_run(&req
, add_advertising_complete
);
6313 mgmt_pending_remove(cmd
);
6316 hci_dev_unlock(hdev
);
6321 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
6324 struct mgmt_pending_cmd
*cmd
;
6325 struct mgmt_cp_remove_advertising
*cp
;
6326 struct mgmt_rp_remove_advertising rp
;
6328 BT_DBG("status %d", status
);
6332 /* A failure status here only means that we failed to disable
6333 * advertising. Otherwise, the advertising instance has been removed,
6334 * so report success.
6336 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
6341 rp
.instance
= cp
->instance
;
6343 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
6345 mgmt_pending_remove(cmd
);
6348 hci_dev_unlock(hdev
);
6351 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6352 void *data
, u16 data_len
)
6354 struct mgmt_cp_remove_advertising
*cp
= data
;
6355 struct mgmt_rp_remove_advertising rp
;
6356 struct mgmt_pending_cmd
*cmd
;
6357 struct hci_request req
;
6360 BT_DBG("%s", hdev
->name
);
6364 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
6365 err
= mgmt_cmd_status(sk
, hdev
->id
,
6366 MGMT_OP_REMOVE_ADVERTISING
,
6367 MGMT_STATUS_INVALID_PARAMS
);
6371 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
6372 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
6373 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6374 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
6379 if (list_empty(&hdev
->adv_instances
)) {
6380 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
6381 MGMT_STATUS_INVALID_PARAMS
);
6385 hci_req_init(&req
, hdev
);
6387 hci_req_clear_adv_instance(hdev
, sk
, &req
, cp
->instance
, true);
6389 if (list_empty(&hdev
->adv_instances
))
6390 __hci_req_disable_advertising(&req
);
6392 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6393 * flag is set or the device isn't powered then we have no HCI
6394 * communication to make. Simply return.
6396 if (skb_queue_empty(&req
.cmd_q
) ||
6397 !hdev_is_powered(hdev
) ||
6398 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
6399 rp
.instance
= cp
->instance
;
6400 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6401 MGMT_OP_REMOVE_ADVERTISING
,
6402 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6406 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
6413 err
= hci_req_run(&req
, remove_advertising_complete
);
6415 mgmt_pending_remove(cmd
);
6418 hci_dev_unlock(hdev
);
6423 static int get_adv_size_info(struct sock
*sk
, struct hci_dev
*hdev
,
6424 void *data
, u16 data_len
)
6426 struct mgmt_cp_get_adv_size_info
*cp
= data
;
6427 struct mgmt_rp_get_adv_size_info rp
;
6428 u32 flags
, supported_flags
;
6431 BT_DBG("%s", hdev
->name
);
6433 if (!lmp_le_capable(hdev
))
6434 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6435 MGMT_STATUS_REJECTED
);
6437 if (cp
->instance
< 1 || cp
->instance
> HCI_MAX_ADV_INSTANCES
)
6438 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6439 MGMT_STATUS_INVALID_PARAMS
);
6441 flags
= __le32_to_cpu(cp
->flags
);
6443 /* The current implementation only supports a subset of the specified
6446 supported_flags
= get_supported_adv_flags(hdev
);
6447 if (flags
& ~supported_flags
)
6448 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6449 MGMT_STATUS_INVALID_PARAMS
);
6451 rp
.instance
= cp
->instance
;
6452 rp
.flags
= cp
->flags
;
6453 rp
.max_adv_data_len
= tlv_data_max_len(flags
, true);
6454 rp
.max_scan_rsp_len
= tlv_data_max_len(flags
, false);
6456 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
6457 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6462 static const struct hci_mgmt_handler mgmt_handlers
[] = {
6463 { NULL
}, /* 0x0000 (no command) */
6464 { read_version
, MGMT_READ_VERSION_SIZE
,
6466 HCI_MGMT_UNTRUSTED
},
6467 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
6469 HCI_MGMT_UNTRUSTED
},
6470 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
6472 HCI_MGMT_UNTRUSTED
},
6473 { read_controller_info
, MGMT_READ_INFO_SIZE
,
6474 HCI_MGMT_UNTRUSTED
},
6475 { set_powered
, MGMT_SETTING_SIZE
},
6476 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
6477 { set_connectable
, MGMT_SETTING_SIZE
},
6478 { set_fast_connectable
, MGMT_SETTING_SIZE
},
6479 { set_bondable
, MGMT_SETTING_SIZE
},
6480 { set_link_security
, MGMT_SETTING_SIZE
},
6481 { set_ssp
, MGMT_SETTING_SIZE
},
6482 { set_hs
, MGMT_SETTING_SIZE
},
6483 { set_le
, MGMT_SETTING_SIZE
},
6484 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
6485 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
6486 { add_uuid
, MGMT_ADD_UUID_SIZE
},
6487 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
6488 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
6490 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
6492 { disconnect
, MGMT_DISCONNECT_SIZE
},
6493 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
6494 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
6495 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
6496 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
6497 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
6498 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
6499 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
6500 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
6501 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
6502 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
6503 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
6504 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
6505 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
6507 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
6508 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
6509 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
6510 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
6511 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
6512 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
6513 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
6514 { set_advertising
, MGMT_SETTING_SIZE
},
6515 { set_bredr
, MGMT_SETTING_SIZE
},
6516 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
6517 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
6518 { set_secure_conn
, MGMT_SETTING_SIZE
},
6519 { set_debug_keys
, MGMT_SETTING_SIZE
},
6520 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
6521 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
6523 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
6524 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
6525 { add_device
, MGMT_ADD_DEVICE_SIZE
},
6526 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
6527 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
6529 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
6531 HCI_MGMT_UNTRUSTED
},
6532 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
6533 HCI_MGMT_UNCONFIGURED
|
6534 HCI_MGMT_UNTRUSTED
},
6535 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
6536 HCI_MGMT_UNCONFIGURED
},
6537 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
6538 HCI_MGMT_UNCONFIGURED
},
6539 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
6541 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
6542 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
6544 HCI_MGMT_UNTRUSTED
},
6545 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
6546 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
6548 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
6549 { get_adv_size_info
, MGMT_GET_ADV_SIZE_INFO_SIZE
},
6550 { start_limited_discovery
, MGMT_START_DISCOVERY_SIZE
},
6551 { read_ext_controller_info
,MGMT_READ_EXT_INFO_SIZE
,
6552 HCI_MGMT_UNTRUSTED
},
6553 { set_appearance
, MGMT_SET_APPEARANCE_SIZE
},
6556 void mgmt_index_added(struct hci_dev
*hdev
)
6558 struct mgmt_ev_ext_index ev
;
6560 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6563 switch (hdev
->dev_type
) {
6565 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
6566 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
6567 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
6570 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
6571 HCI_MGMT_INDEX_EVENTS
);
6584 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
6585 HCI_MGMT_EXT_INDEX_EVENTS
);
6588 void mgmt_index_removed(struct hci_dev
*hdev
)
6590 struct mgmt_ev_ext_index ev
;
6591 u8 status
= MGMT_STATUS_INVALID_INDEX
;
6593 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6596 switch (hdev
->dev_type
) {
6598 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6600 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
6601 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
6602 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
6605 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
6606 HCI_MGMT_INDEX_EVENTS
);
6619 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
6620 HCI_MGMT_EXT_INDEX_EVENTS
);
6623 /* This function requires the caller holds hdev->lock */
6624 static void restart_le_actions(struct hci_dev
*hdev
)
6626 struct hci_conn_params
*p
;
6628 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
6629 /* Needed for AUTO_OFF case where might not "really"
6630 * have been powered off.
6632 list_del_init(&p
->action
);
6634 switch (p
->auto_connect
) {
6635 case HCI_AUTO_CONN_DIRECT
:
6636 case HCI_AUTO_CONN_ALWAYS
:
6637 list_add(&p
->action
, &hdev
->pend_le_conns
);
6639 case HCI_AUTO_CONN_REPORT
:
6640 list_add(&p
->action
, &hdev
->pend_le_reports
);
6648 void mgmt_power_on(struct hci_dev
*hdev
, int err
)
6650 struct cmd_lookup match
= { NULL
, hdev
};
6652 BT_DBG("err %d", err
);
6657 restart_le_actions(hdev
);
6658 hci_update_background_scan(hdev
);
6661 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6663 new_settings(hdev
, match
.sk
);
6668 hci_dev_unlock(hdev
);
6671 void __mgmt_power_off(struct hci_dev
*hdev
)
6673 struct cmd_lookup match
= { NULL
, hdev
};
6674 u8 status
, zero_cod
[] = { 0, 0, 0 };
6676 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6678 /* If the power off is because of hdev unregistration let
6679 * use the appropriate INVALID_INDEX status. Otherwise use
6680 * NOT_POWERED. We cover both scenarios here since later in
6681 * mgmt_index_removed() any hci_conn callbacks will have already
6682 * been triggered, potentially causing misleading DISCONNECTED
6685 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
6686 status
= MGMT_STATUS_INVALID_INDEX
;
6688 status
= MGMT_STATUS_NOT_POWERED
;
6690 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6692 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0) {
6693 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
6694 zero_cod
, sizeof(zero_cod
),
6695 HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
6696 ext_info_changed(hdev
, NULL
);
6699 new_settings(hdev
, match
.sk
);
6705 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
6707 struct mgmt_pending_cmd
*cmd
;
6710 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
6714 if (err
== -ERFKILL
)
6715 status
= MGMT_STATUS_RFKILLED
;
6717 status
= MGMT_STATUS_FAILED
;
6719 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
6721 mgmt_pending_remove(cmd
);
6724 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
6727 struct mgmt_ev_new_link_key ev
;
6729 memset(&ev
, 0, sizeof(ev
));
6731 ev
.store_hint
= persistent
;
6732 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6733 ev
.key
.addr
.type
= BDADDR_BREDR
;
6734 ev
.key
.type
= key
->type
;
6735 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
6736 ev
.key
.pin_len
= key
->pin_len
;
6738 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6741 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
6743 switch (ltk
->type
) {
6746 if (ltk
->authenticated
)
6747 return MGMT_LTK_AUTHENTICATED
;
6748 return MGMT_LTK_UNAUTHENTICATED
;
6750 if (ltk
->authenticated
)
6751 return MGMT_LTK_P256_AUTH
;
6752 return MGMT_LTK_P256_UNAUTH
;
6753 case SMP_LTK_P256_DEBUG
:
6754 return MGMT_LTK_P256_DEBUG
;
6757 return MGMT_LTK_UNAUTHENTICATED
;
6760 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
6762 struct mgmt_ev_new_long_term_key ev
;
6764 memset(&ev
, 0, sizeof(ev
));
6766 /* Devices using resolvable or non-resolvable random addresses
6767 * without providing an identity resolving key don't require
6768 * to store long term keys. Their addresses will change the
6771 * Only when a remote device provides an identity address
6772 * make sure the long term key is stored. If the remote
6773 * identity is known, the long term keys are internally
6774 * mapped to the identity address. So allow static random
6775 * and public addresses here.
6777 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6778 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6779 ev
.store_hint
= 0x00;
6781 ev
.store_hint
= persistent
;
6783 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6784 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
6785 ev
.key
.type
= mgmt_ltk_type(key
);
6786 ev
.key
.enc_size
= key
->enc_size
;
6787 ev
.key
.ediv
= key
->ediv
;
6788 ev
.key
.rand
= key
->rand
;
6790 if (key
->type
== SMP_LTK
)
6793 /* Make sure we copy only the significant bytes based on the
6794 * encryption key size, and set the rest of the value to zeroes.
6796 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
6797 memset(ev
.key
.val
+ key
->enc_size
, 0,
6798 sizeof(ev
.key
.val
) - key
->enc_size
);
6800 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6803 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
, bool persistent
)
6805 struct mgmt_ev_new_irk ev
;
6807 memset(&ev
, 0, sizeof(ev
));
6809 ev
.store_hint
= persistent
;
6811 bacpy(&ev
.rpa
, &irk
->rpa
);
6812 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
6813 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
6814 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6816 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6819 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6822 struct mgmt_ev_new_csrk ev
;
6824 memset(&ev
, 0, sizeof(ev
));
6826 /* Devices using resolvable or non-resolvable random addresses
6827 * without providing an identity resolving key don't require
6828 * to store signature resolving keys. Their addresses will change
6829 * the next time around.
6831 * Only when a remote device provides an identity address
6832 * make sure the signature resolving key is stored. So allow
6833 * static random and public addresses here.
6835 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6836 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6837 ev
.store_hint
= 0x00;
6839 ev
.store_hint
= persistent
;
6841 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6842 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6843 ev
.key
.type
= csrk
->type
;
6844 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6846 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6849 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6850 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6851 u16 max_interval
, u16 latency
, u16 timeout
)
6853 struct mgmt_ev_new_conn_param ev
;
6855 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6858 memset(&ev
, 0, sizeof(ev
));
6859 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6860 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6861 ev
.store_hint
= store_hint
;
6862 ev
.min_interval
= cpu_to_le16(min_interval
);
6863 ev
.max_interval
= cpu_to_le16(max_interval
);
6864 ev
.latency
= cpu_to_le16(latency
);
6865 ev
.timeout
= cpu_to_le16(timeout
);
6867 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6870 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
6871 u32 flags
, u8
*name
, u8 name_len
)
6874 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6877 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
6878 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6880 ev
->flags
= __cpu_to_le32(flags
);
6882 /* We must ensure that the EIR Data fields are ordered and
6883 * unique. Keep it simple for now and avoid the problem by not
6884 * adding any BR/EDR data to the LE adv.
6886 if (conn
->le_adv_data_len
> 0) {
6887 memcpy(&ev
->eir
[eir_len
],
6888 conn
->le_adv_data
, conn
->le_adv_data_len
);
6889 eir_len
= conn
->le_adv_data_len
;
6892 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6895 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
6896 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6898 conn
->dev_class
, 3);
6901 ev
->eir_len
= cpu_to_le16(eir_len
);
6903 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6904 sizeof(*ev
) + eir_len
, NULL
);
6907 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
6909 struct sock
**sk
= data
;
6911 cmd
->cmd_complete(cmd
, 0);
6916 mgmt_pending_remove(cmd
);
6919 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
6921 struct hci_dev
*hdev
= data
;
6922 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6924 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6926 cmd
->cmd_complete(cmd
, 0);
6927 mgmt_pending_remove(cmd
);
6930 bool mgmt_powering_down(struct hci_dev
*hdev
)
6932 struct mgmt_pending_cmd
*cmd
;
6933 struct mgmt_mode
*cp
;
6935 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
6946 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6947 u8 link_type
, u8 addr_type
, u8 reason
,
6948 bool mgmt_connected
)
6950 struct mgmt_ev_device_disconnected ev
;
6951 struct sock
*sk
= NULL
;
6953 /* The connection is still in hci_conn_hash so test for 1
6954 * instead of 0 to know if this is the last one.
6956 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6957 cancel_delayed_work(&hdev
->power_off
);
6958 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6961 if (!mgmt_connected
)
6964 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6967 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6969 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6970 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6973 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6978 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6982 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6983 u8 link_type
, u8 addr_type
, u8 status
)
6985 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6986 struct mgmt_cp_disconnect
*cp
;
6987 struct mgmt_pending_cmd
*cmd
;
6989 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6992 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
6998 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
7001 if (cp
->addr
.type
!= bdaddr_type
)
7004 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7005 mgmt_pending_remove(cmd
);
7008 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7009 u8 addr_type
, u8 status
)
7011 struct mgmt_ev_connect_failed ev
;
7013 /* The connection is still in hci_conn_hash so test for 1
7014 * instead of 0 to know if this is the last one.
7016 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
7017 cancel_delayed_work(&hdev
->power_off
);
7018 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
7021 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7022 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7023 ev
.status
= mgmt_status(status
);
7025 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
7028 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
7030 struct mgmt_ev_pin_code_request ev
;
7032 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7033 ev
.addr
.type
= BDADDR_BREDR
;
7036 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
7039 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7042 struct mgmt_pending_cmd
*cmd
;
7044 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
7048 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7049 mgmt_pending_remove(cmd
);
7052 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7055 struct mgmt_pending_cmd
*cmd
;
7057 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
7061 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7062 mgmt_pending_remove(cmd
);
7065 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7066 u8 link_type
, u8 addr_type
, u32 value
,
7069 struct mgmt_ev_user_confirm_request ev
;
7071 BT_DBG("%s", hdev
->name
);
7073 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7074 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7075 ev
.confirm_hint
= confirm_hint
;
7076 ev
.value
= cpu_to_le32(value
);
7078 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
7082 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7083 u8 link_type
, u8 addr_type
)
7085 struct mgmt_ev_user_passkey_request ev
;
7087 BT_DBG("%s", hdev
->name
);
7089 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7090 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7092 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
7096 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7097 u8 link_type
, u8 addr_type
, u8 status
,
7100 struct mgmt_pending_cmd
*cmd
;
7102 cmd
= pending_find(opcode
, hdev
);
7106 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7107 mgmt_pending_remove(cmd
);
7112 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7113 u8 link_type
, u8 addr_type
, u8 status
)
7115 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7116 status
, MGMT_OP_USER_CONFIRM_REPLY
);
7119 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7120 u8 link_type
, u8 addr_type
, u8 status
)
7122 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7124 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
7127 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7128 u8 link_type
, u8 addr_type
, u8 status
)
7130 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7131 status
, MGMT_OP_USER_PASSKEY_REPLY
);
7134 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7135 u8 link_type
, u8 addr_type
, u8 status
)
7137 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7139 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
7142 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7143 u8 link_type
, u8 addr_type
, u32 passkey
,
7146 struct mgmt_ev_passkey_notify ev
;
7148 BT_DBG("%s", hdev
->name
);
7150 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7151 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7152 ev
.passkey
= __cpu_to_le32(passkey
);
7153 ev
.entered
= entered
;
7155 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
7158 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
7160 struct mgmt_ev_auth_failed ev
;
7161 struct mgmt_pending_cmd
*cmd
;
7162 u8 status
= mgmt_status(hci_status
);
7164 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
7165 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7168 cmd
= find_pairing(conn
);
7170 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
7171 cmd
? cmd
->sk
: NULL
);
7174 cmd
->cmd_complete(cmd
, status
);
7175 mgmt_pending_remove(cmd
);
7179 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
7181 struct cmd_lookup match
= { NULL
, hdev
};
7185 u8 mgmt_err
= mgmt_status(status
);
7186 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
7187 cmd_status_rsp
, &mgmt_err
);
7191 if (test_bit(HCI_AUTH
, &hdev
->flags
))
7192 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
7194 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
7196 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
7200 new_settings(hdev
, match
.sk
);
7206 static void clear_eir(struct hci_request
*req
)
7208 struct hci_dev
*hdev
= req
->hdev
;
7209 struct hci_cp_write_eir cp
;
7211 if (!lmp_ext_inq_capable(hdev
))
7214 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
7216 memset(&cp
, 0, sizeof(cp
));
7218 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
7221 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
7223 struct cmd_lookup match
= { NULL
, hdev
};
7224 struct hci_request req
;
7225 bool changed
= false;
7228 u8 mgmt_err
= mgmt_status(status
);
7230 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
7232 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
7233 new_settings(hdev
, NULL
);
7236 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
7242 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
7244 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
7246 changed
= hci_dev_test_and_clear_flag(hdev
,
7249 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
7252 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
7255 new_settings(hdev
, match
.sk
);
7260 hci_req_init(&req
, hdev
);
7262 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
7263 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
7264 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
7265 sizeof(enable
), &enable
);
7266 __hci_req_update_eir(&req
);
7271 hci_req_run(&req
, NULL
);
7274 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
7276 struct cmd_lookup
*match
= data
;
7278 if (match
->sk
== NULL
) {
7279 match
->sk
= cmd
->sk
;
7280 sock_hold(match
->sk
);
7284 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
7287 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
7289 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
7290 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
7291 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
7294 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
,
7295 3, HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
7296 ext_info_changed(hdev
, NULL
);
7303 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
7305 struct mgmt_cp_set_local_name ev
;
7306 struct mgmt_pending_cmd
*cmd
;
7311 memset(&ev
, 0, sizeof(ev
));
7312 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
7313 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
7315 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
7317 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
7319 /* If this is a HCI command related to powering on the
7320 * HCI dev don't send any mgmt signals.
7322 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
7326 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
7327 HCI_MGMT_LOCAL_NAME_EVENTS
, cmd
? cmd
->sk
: NULL
);
7328 ext_info_changed(hdev
, cmd
? cmd
->sk
: NULL
);
7331 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
7335 for (i
= 0; i
< uuid_count
; i
++) {
7336 if (!memcmp(uuid
, uuids
[i
], 16))
7343 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
7347 while (parsed
< eir_len
) {
7348 u8 field_len
= eir
[0];
7355 if (eir_len
- parsed
< field_len
+ 1)
7359 case EIR_UUID16_ALL
:
7360 case EIR_UUID16_SOME
:
7361 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
7362 memcpy(uuid
, bluetooth_base_uuid
, 16);
7363 uuid
[13] = eir
[i
+ 3];
7364 uuid
[12] = eir
[i
+ 2];
7365 if (has_uuid(uuid
, uuid_count
, uuids
))
7369 case EIR_UUID32_ALL
:
7370 case EIR_UUID32_SOME
:
7371 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
7372 memcpy(uuid
, bluetooth_base_uuid
, 16);
7373 uuid
[15] = eir
[i
+ 5];
7374 uuid
[14] = eir
[i
+ 4];
7375 uuid
[13] = eir
[i
+ 3];
7376 uuid
[12] = eir
[i
+ 2];
7377 if (has_uuid(uuid
, uuid_count
, uuids
))
7381 case EIR_UUID128_ALL
:
7382 case EIR_UUID128_SOME
:
7383 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
7384 memcpy(uuid
, eir
+ i
+ 2, 16);
7385 if (has_uuid(uuid
, uuid_count
, uuids
))
7391 parsed
+= field_len
+ 1;
7392 eir
+= field_len
+ 1;
7398 static void restart_le_scan(struct hci_dev
*hdev
)
7400 /* If controller is not scanning we are done. */
7401 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
7404 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
7405 hdev
->discovery
.scan_start
+
7406 hdev
->discovery
.scan_duration
))
7409 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_restart
,
7410 DISCOV_LE_RESTART_DELAY
);
7413 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
7414 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
7416 /* If a RSSI threshold has been specified, and
7417 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7418 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7419 * is set, let it through for further processing, as we might need to
7422 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7423 * the results are also dropped.
7425 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7426 (rssi
== HCI_RSSI_INVALID
||
7427 (rssi
< hdev
->discovery
.rssi
&&
7428 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
7431 if (hdev
->discovery
.uuid_count
!= 0) {
7432 /* If a list of UUIDs is provided in filter, results with no
7433 * matching UUID should be dropped.
7435 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
7436 hdev
->discovery
.uuids
) &&
7437 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
7438 hdev
->discovery
.uuid_count
,
7439 hdev
->discovery
.uuids
))
7443 /* If duplicate filtering does not report RSSI changes, then restart
7444 * scanning to ensure updated result with updated RSSI values.
7446 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
7447 restart_le_scan(hdev
);
7449 /* Validate RSSI value against the RSSI threshold once more. */
7450 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7451 rssi
< hdev
->discovery
.rssi
)
7458 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7459 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
7460 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
7463 struct mgmt_ev_device_found
*ev
= (void *)buf
;
7466 /* Don't send events for a non-kernel initiated discovery. With
7467 * LE one exception is if we have pend_le_reports > 0 in which
7468 * case we're doing passive scanning and want these events.
7470 if (!hci_discovery_active(hdev
)) {
7471 if (link_type
== ACL_LINK
)
7473 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
7477 if (hdev
->discovery
.result_filtering
) {
7478 /* We are using service discovery */
7479 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
7484 if (hdev
->discovery
.limited
) {
7485 /* Check for limited discoverable bit */
7487 if (!(dev_class
[1] & 0x20))
7490 u8
*flags
= eir_get_data(eir
, eir_len
, EIR_FLAGS
, NULL
);
7491 if (!flags
|| !(flags
[0] & LE_AD_LIMITED
))
7496 /* Make sure that the buffer is big enough. The 5 extra bytes
7497 * are for the potential CoD field.
7499 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
7502 memset(buf
, 0, sizeof(buf
));
7504 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7505 * RSSI value was reported as 0 when not available. This behavior
7506 * is kept when using device discovery. This is required for full
7507 * backwards compatibility with the API.
7509 * However when using service discovery, the value 127 will be
7510 * returned when the RSSI is not available.
7512 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
7513 link_type
== ACL_LINK
)
7516 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7517 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7519 ev
->flags
= cpu_to_le32(flags
);
7522 /* Copy EIR or advertising data into event */
7523 memcpy(ev
->eir
, eir
, eir_len
);
7525 if (dev_class
&& !eir_get_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7527 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7530 if (scan_rsp_len
> 0)
7531 /* Append scan response data to event */
7532 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
7534 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
7535 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
7537 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
7540 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7541 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
7543 struct mgmt_ev_device_found
*ev
;
7544 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
7547 ev
= (struct mgmt_ev_device_found
*) buf
;
7549 memset(buf
, 0, sizeof(buf
));
7551 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7552 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7555 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
7558 ev
->eir_len
= cpu_to_le16(eir_len
);
7560 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
7563 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
7565 struct mgmt_ev_discovering ev
;
7567 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
7569 memset(&ev
, 0, sizeof(ev
));
7570 ev
.type
= hdev
->discovery
.type
;
7571 ev
.discovering
= discovering
;
7573 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
7576 static struct hci_mgmt_chan chan
= {
7577 .channel
= HCI_CHANNEL_CONTROL
,
7578 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
7579 .handlers
= mgmt_handlers
,
7580 .hdev_init
= mgmt_init_hdev
,
7585 return hci_mgmt_chan_register(&chan
);
7588 void mgmt_exit(void)
7590 hci_mgmt_chan_unregister(&chan
);